• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===-- release.h -----------------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #ifndef SCUDO_RELEASE_H_
10 #define SCUDO_RELEASE_H_
11 
12 #include "common.h"
13 #include "list.h"
14 #include "mutex.h"
15 
16 namespace scudo {
17 
18 class ReleaseRecorder {
19 public:
20   ReleaseRecorder(uptr BaseAddress, MapPlatformData *Data = nullptr)
BaseAddress(BaseAddress)21       : BaseAddress(BaseAddress), Data(Data) {}
22 
getReleasedRangesCount()23   uptr getReleasedRangesCount() const { return ReleasedRangesCount; }
24 
getReleasedBytes()25   uptr getReleasedBytes() const { return ReleasedBytes; }
26 
27   // Releases [From, To) range of pages back to OS.
releasePageRangeToOS(uptr From,uptr To)28   void releasePageRangeToOS(uptr From, uptr To) {
29     const uptr Size = To - From;
30     releasePagesToOS(BaseAddress, From, Size, Data);
31     ReleasedRangesCount++;
32     ReleasedBytes += Size;
33   }
34 
35 private:
36   uptr ReleasedRangesCount = 0;
37   uptr ReleasedBytes = 0;
38   uptr BaseAddress = 0;
39   MapPlatformData *Data = nullptr;
40 };
41 
42 // A packed array of Counters. Each counter occupies 2^N bits, enough to store
43 // counter's MaxValue. Ctor will try to use a static buffer first, and if that
44 // fails (the buffer is too small or already locked), will allocate the
45 // required Buffer via map(). The caller is expected to check whether the
46 // initialization was successful by checking isAllocated() result. For
47 // performance sake, none of the accessors check the validity of the arguments,
48 // It is assumed that Index is always in [0, N) range and the value is not
49 // incremented past MaxValue.
50 class PackedCounterArray {
51 public:
PackedCounterArray(uptr NumberOfRegions,uptr CountersPerRegion,uptr MaxValue)52   PackedCounterArray(uptr NumberOfRegions, uptr CountersPerRegion,
53                      uptr MaxValue)
54       : Regions(NumberOfRegions), NumCounters(CountersPerRegion) {
55     CHECK_GT(Regions, 0);
56     CHECK_GT(NumCounters, 0);
57     CHECK_GT(MaxValue, 0);
58     constexpr uptr MaxCounterBits = sizeof(*Buffer) * 8UL;
59     // Rounding counter storage size up to the power of two allows for using
60     // bit shifts calculating particular counter's Index and offset.
61     const uptr CounterSizeBits =
62         roundUpToPowerOfTwo(getMostSignificantSetBitIndex(MaxValue) + 1);
63     CHECK_LE(CounterSizeBits, MaxCounterBits);
64     CounterSizeBitsLog = getLog2(CounterSizeBits);
65     CounterMask = ~(static_cast<uptr>(0)) >> (MaxCounterBits - CounterSizeBits);
66 
67     const uptr PackingRatio = MaxCounterBits >> CounterSizeBitsLog;
68     CHECK_GT(PackingRatio, 0);
69     PackingRatioLog = getLog2(PackingRatio);
70     BitOffsetMask = PackingRatio - 1;
71 
72     SizePerRegion =
73         roundUpTo(NumCounters, static_cast<uptr>(1U) << PackingRatioLog) >>
74         PackingRatioLog;
75     BufferSize = SizePerRegion * sizeof(*Buffer) * Regions;
76     if (BufferSize <= (StaticBufferCount * sizeof(Buffer[0])) &&
77         Mutex.tryLock()) {
78       Buffer = &StaticBuffer[0];
79       memset(Buffer, 0, BufferSize);
80     } else {
81       Buffer = reinterpret_cast<uptr *>(
82           map(nullptr, BufferSize, "scudo:counters", MAP_ALLOWNOMEM));
83     }
84   }
~PackedCounterArray()85   ~PackedCounterArray() {
86     if (!isAllocated())
87       return;
88     if (Buffer == &StaticBuffer[0])
89       Mutex.unlock();
90     else
91       unmap(reinterpret_cast<void *>(Buffer), BufferSize);
92   }
93 
isAllocated()94   bool isAllocated() const { return !!Buffer; }
95 
96 
getCount()97   uptr getCount() const { return NumCounters; }
98 
get(uptr Region,uptr I)99   uptr get(uptr Region, uptr I) const {
100     DCHECK_LT(Region, Regions);
101     DCHECK_LT(I, NumCounters);
102     const uptr Index = I >> PackingRatioLog;
103     const uptr BitOffset = (I & BitOffsetMask) << CounterSizeBitsLog;
104     return (Buffer[Region * SizePerRegion + Index] >> BitOffset) & CounterMask;
105   }
106 
inc(uptr Region,uptr I)107   void inc(uptr Region, uptr I) const {
108     DCHECK_LT(get(Region, I), CounterMask);
109     const uptr Index = I >> PackingRatioLog;
110     const uptr BitOffset = (I & BitOffsetMask) << CounterSizeBitsLog;
111     DCHECK_LT(BitOffset, SCUDO_WORDSIZE);
112     Buffer[Region * SizePerRegion + Index] += static_cast<uptr>(1U)
113                                               << BitOffset;
114   }
115 
incRange(uptr Region,uptr From,uptr To)116   void incRange(uptr Region, uptr From, uptr To) const {
117     DCHECK_LE(From, To);
118     const uptr Top = Min(To + 1, NumCounters);
119     for (uptr I = From; I < Top; I++)
120       inc(Region, I);
121   }
122 
getBufferSize()123   uptr getBufferSize() const { return BufferSize; }
124 
125   static const uptr StaticBufferCount = 2048U;
126 
127 private:
128   const uptr Regions;
129   const uptr NumCounters;
130   uptr CounterSizeBitsLog;
131   uptr CounterMask;
132   uptr PackingRatioLog;
133   uptr BitOffsetMask;
134 
135   uptr SizePerRegion;
136   uptr BufferSize;
137   uptr *Buffer;
138 
139   static HybridMutex Mutex;
140   static uptr StaticBuffer[StaticBufferCount];
141 };
142 
143 template <class ReleaseRecorderT> class FreePagesRangeTracker {
144 public:
FreePagesRangeTracker(ReleaseRecorderT * Recorder)145   explicit FreePagesRangeTracker(ReleaseRecorderT *Recorder)
146       : Recorder(Recorder), PageSizeLog(getLog2(getPageSizeCached())) {}
147 
processNextPage(bool Freed)148   void processNextPage(bool Freed) {
149     if (Freed) {
150       if (!InRange) {
151         CurrentRangeStatePage = CurrentPage;
152         InRange = true;
153       }
154     } else {
155       closeOpenedRange();
156     }
157     CurrentPage++;
158   }
159 
skipPages(uptr N)160   void skipPages(uptr N) {
161     closeOpenedRange();
162     CurrentPage += N;
163   }
164 
finish()165   void finish() { closeOpenedRange(); }
166 
167 private:
closeOpenedRange()168   void closeOpenedRange() {
169     if (InRange) {
170       Recorder->releasePageRangeToOS((CurrentRangeStatePage << PageSizeLog),
171                                      (CurrentPage << PageSizeLog));
172       InRange = false;
173     }
174   }
175 
176   ReleaseRecorderT *const Recorder;
177   const uptr PageSizeLog;
178   bool InRange = false;
179   uptr CurrentPage = 0;
180   uptr CurrentRangeStatePage = 0;
181 };
182 
183 template <class TransferBatchT, class ReleaseRecorderT, typename SkipRegionT>
184 NOINLINE void
releaseFreeMemoryToOS(const IntrusiveList<TransferBatchT> & FreeList,uptr Base,uptr RegionSize,uptr NumberOfRegions,uptr BlockSize,ReleaseRecorderT * Recorder,SkipRegionT SkipRegion)185 releaseFreeMemoryToOS(const IntrusiveList<TransferBatchT> &FreeList, uptr Base,
186                       uptr RegionSize, uptr NumberOfRegions, uptr BlockSize,
187                       ReleaseRecorderT *Recorder, SkipRegionT SkipRegion) {
188   const uptr PageSize = getPageSizeCached();
189 
190   // Figure out the number of chunks per page and whether we can take a fast
191   // path (the number of chunks per page is the same for all pages).
192   uptr FullPagesBlockCountMax;
193   bool SameBlockCountPerPage;
194   if (BlockSize <= PageSize) {
195     if (PageSize % BlockSize == 0) {
196       // Same number of chunks per page, no cross overs.
197       FullPagesBlockCountMax = PageSize / BlockSize;
198       SameBlockCountPerPage = true;
199     } else if (BlockSize % (PageSize % BlockSize) == 0) {
200       // Some chunks are crossing page boundaries, which means that the page
201       // contains one or two partial chunks, but all pages contain the same
202       // number of chunks.
203       FullPagesBlockCountMax = PageSize / BlockSize + 1;
204       SameBlockCountPerPage = true;
205     } else {
206       // Some chunks are crossing page boundaries, which means that the page
207       // contains one or two partial chunks.
208       FullPagesBlockCountMax = PageSize / BlockSize + 2;
209       SameBlockCountPerPage = false;
210     }
211   } else {
212     if (BlockSize % PageSize == 0) {
213       // One chunk covers multiple pages, no cross overs.
214       FullPagesBlockCountMax = 1;
215       SameBlockCountPerPage = true;
216     } else {
217       // One chunk covers multiple pages, Some chunks are crossing page
218       // boundaries. Some pages contain one chunk, some contain two.
219       FullPagesBlockCountMax = 2;
220       SameBlockCountPerPage = false;
221     }
222   }
223 
224   const uptr PagesCount = roundUpTo(RegionSize, PageSize) / PageSize;
225   PackedCounterArray Counters(NumberOfRegions, PagesCount,
226                               FullPagesBlockCountMax);
227   if (!Counters.isAllocated())
228     return;
229 
230   const uptr PageSizeLog = getLog2(PageSize);
231   const uptr RoundedSize = NumberOfRegions * (PagesCount << PageSizeLog);
232 
233   // Iterate over free chunks and count how many free chunks affect each
234   // allocated page.
235   if (BlockSize <= PageSize && PageSize % BlockSize == 0) {
236     // Each chunk affects one page only.
237     for (const auto &It : FreeList) {
238       // If dealing with a TransferBatch, the first pointer of the batch will
239       // point to the batch itself, we do not want to mark this for release as
240       // the batch is in use, so skip the first entry.
241       const bool IsTransferBatch =
242           (It.getCount() != 0) &&
243           (reinterpret_cast<uptr>(It.get(0)) == reinterpret_cast<uptr>(&It));
244       for (u32 I = IsTransferBatch ? 1 : 0; I < It.getCount(); I++) {
245         const uptr P = reinterpret_cast<uptr>(It.get(I)) - Base;
246         // This takes care of P < Base and P >= Base + RoundedSize.
247         if (P < RoundedSize) {
248           const uptr RegionIndex = NumberOfRegions == 1U ? 0 : P / RegionSize;
249           const uptr PInRegion = P - RegionIndex * RegionSize;
250           Counters.inc(RegionIndex, PInRegion >> PageSizeLog);
251         }
252       }
253     }
254   } else {
255     // In all other cases chunks might affect more than one page.
256     for (const auto &It : FreeList) {
257       // See TransferBatch comment above.
258       const bool IsTransferBatch =
259           (It.getCount() != 0) &&
260           (reinterpret_cast<uptr>(It.get(0)) == reinterpret_cast<uptr>(&It));
261       for (u32 I = IsTransferBatch ? 1 : 0; I < It.getCount(); I++) {
262         const uptr P = reinterpret_cast<uptr>(It.get(I)) - Base;
263         // This takes care of P < Base and P >= Base + RoundedSize.
264         if (P < RoundedSize) {
265           const uptr RegionIndex = NumberOfRegions == 1U ? 0 : P / RegionSize;
266           const uptr PInRegion = P - RegionIndex * RegionSize;
267           Counters.incRange(RegionIndex, PInRegion >> PageSizeLog,
268                             (PInRegion + BlockSize - 1) >> PageSizeLog);
269         }
270       }
271     }
272   }
273 
274   // Iterate over pages detecting ranges of pages with chunk Counters equal
275   // to the expected number of chunks for the particular page.
276   FreePagesRangeTracker<ReleaseRecorderT> RangeTracker(Recorder);
277   if (SameBlockCountPerPage) {
278     // Fast path, every page has the same number of chunks affecting it.
279     for (uptr I = 0; I < NumberOfRegions; I++) {
280       if (SkipRegion(I)) {
281         RangeTracker.skipPages(PagesCount);
282         continue;
283       }
284       for (uptr J = 0; J < PagesCount; J++)
285         RangeTracker.processNextPage(Counters.get(I, J) ==
286                                      FullPagesBlockCountMax);
287     }
288   } else {
289     // Slow path, go through the pages keeping count how many chunks affect
290     // each page.
291     const uptr Pn = BlockSize < PageSize ? PageSize / BlockSize : 1;
292     const uptr Pnc = Pn * BlockSize;
293     // The idea is to increment the current page pointer by the first chunk
294     // size, middle portion size (the portion of the page covered by chunks
295     // except the first and the last one) and then the last chunk size, adding
296     // up the number of chunks on the current page and checking on every step
297     // whether the page boundary was crossed.
298     for (uptr I = 0; I < NumberOfRegions; I++) {
299       if (SkipRegion(I)) {
300         RangeTracker.skipPages(PagesCount);
301         continue;
302       }
303       uptr PrevPageBoundary = 0;
304       uptr CurrentBoundary = 0;
305       for (uptr J = 0; J < PagesCount; J++) {
306         const uptr PageBoundary = PrevPageBoundary + PageSize;
307         uptr BlocksPerPage = Pn;
308         if (CurrentBoundary < PageBoundary) {
309           if (CurrentBoundary > PrevPageBoundary)
310             BlocksPerPage++;
311           CurrentBoundary += Pnc;
312           if (CurrentBoundary < PageBoundary) {
313             BlocksPerPage++;
314             CurrentBoundary += BlockSize;
315           }
316         }
317         PrevPageBoundary = PageBoundary;
318 
319         RangeTracker.processNextPage(Counters.get(I, J) == BlocksPerPage);
320       }
321     }
322   }
323   RangeTracker.finish();
324 }
325 
326 } // namespace scudo
327 
328 #endif // SCUDO_RELEASE_H_
329