• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===-- sanitizer_quarantine.h ----------------------------------*- C++ -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // Memory quarantine for AddressSanitizer and potentially other tools.
11 // Quarantine caches some specified amount of memory in per-thread caches,
12 // then evicts to global FIFO queue. When the queue reaches specified threshold,
13 // oldest memory is recycled.
14 //
15 //===----------------------------------------------------------------------===//
16 
17 #ifndef SANITIZER_QUARANTINE_H
18 #define SANITIZER_QUARANTINE_H
19 
20 #include "sanitizer_internal_defs.h"
21 #include "sanitizer_mutex.h"
22 #include "sanitizer_list.h"
23 
24 namespace __sanitizer {
25 
26 template<typename Node> class QuarantineCache;
27 
28 struct QuarantineBatch {
29   static const uptr kSize = 1021;
30   QuarantineBatch *next;
31   uptr size;
32   uptr count;
33   void *batch[kSize];
34 };
35 
36 COMPILER_CHECK(sizeof(QuarantineBatch) <= (1 << 13));  // 8Kb.
37 
38 // The callback interface is:
39 // void Callback::Recycle(Node *ptr);
40 // void *cb.Allocate(uptr size);
41 // void cb.Deallocate(void *ptr);
42 template<typename Callback, typename Node>
43 class Quarantine {
44  public:
45   typedef QuarantineCache<Callback> Cache;
46 
Quarantine(LinkerInitialized)47   explicit Quarantine(LinkerInitialized)
48       : cache_(LINKER_INITIALIZED) {
49   }
50 
Init(uptr size,uptr cache_size)51   void Init(uptr size, uptr cache_size) {
52     atomic_store(&max_size_, size, memory_order_release);
53     atomic_store(&min_size_, size / 10 * 9,
54                  memory_order_release); // 90% of max size.
55     max_cache_size_ = cache_size;
56   }
57 
GetSize()58   uptr GetSize() const { return atomic_load(&max_size_, memory_order_acquire); }
59 
Put(Cache * c,Callback cb,Node * ptr,uptr size)60   void Put(Cache *c, Callback cb, Node *ptr, uptr size) {
61     c->Enqueue(cb, ptr, size);
62     if (c->Size() > max_cache_size_)
63       Drain(c, cb);
64   }
65 
Drain(Cache * c,Callback cb)66   void NOINLINE Drain(Cache *c, Callback cb) {
67     {
68       SpinMutexLock l(&cache_mutex_);
69       cache_.Transfer(c);
70     }
71     if (cache_.Size() > GetSize() && recycle_mutex_.TryLock())
72       Recycle(cb);
73   }
74 
75  private:
76   // Read-only data.
77   char pad0_[kCacheLineSize];
78   atomic_uintptr_t max_size_;
79   atomic_uintptr_t min_size_;
80   uptr max_cache_size_;
81   char pad1_[kCacheLineSize];
82   SpinMutex cache_mutex_;
83   SpinMutex recycle_mutex_;
84   Cache cache_;
85   char pad2_[kCacheLineSize];
86 
Recycle(Callback cb)87   void NOINLINE Recycle(Callback cb) {
88     Cache tmp;
89     uptr min_size = atomic_load(&min_size_, memory_order_acquire);
90     {
91       SpinMutexLock l(&cache_mutex_);
92       while (cache_.Size() > min_size) {
93         QuarantineBatch *b = cache_.DequeueBatch();
94         tmp.EnqueueBatch(b);
95       }
96     }
97     recycle_mutex_.Unlock();
98     DoRecycle(&tmp, cb);
99   }
100 
DoRecycle(Cache * c,Callback cb)101   void NOINLINE DoRecycle(Cache *c, Callback cb) {
102     while (QuarantineBatch *b = c->DequeueBatch()) {
103       const uptr kPrefetch = 16;
104       CHECK(kPrefetch <= ARRAY_SIZE(b->batch));
105       for (uptr i = 0; i < kPrefetch; i++)
106         PREFETCH(b->batch[i]);
107       for (uptr i = 0, count = b->count; i < count; i++) {
108         if (i + kPrefetch < count)
109           PREFETCH(b->batch[i + kPrefetch]);
110         cb.Recycle((Node*)b->batch[i]);
111       }
112       cb.Deallocate(b);
113     }
114   }
115 };
116 
117 // Per-thread cache of memory blocks.
118 template<typename Callback>
119 class QuarantineCache {
120  public:
QuarantineCache(LinkerInitialized)121   explicit QuarantineCache(LinkerInitialized) {
122   }
123 
QuarantineCache()124   QuarantineCache()
125       : size_() {
126     list_.clear();
127   }
128 
Size()129   uptr Size() const {
130     return atomic_load(&size_, memory_order_relaxed);
131   }
132 
Enqueue(Callback cb,void * ptr,uptr size)133   void Enqueue(Callback cb, void *ptr, uptr size) {
134     if (list_.empty() || list_.back()->count == QuarantineBatch::kSize) {
135       AllocBatch(cb);
136       size += sizeof(QuarantineBatch);  // Count the batch in Quarantine size.
137     }
138     QuarantineBatch *b = list_.back();
139     CHECK(b);
140     b->batch[b->count++] = ptr;
141     b->size += size;
142     SizeAdd(size);
143   }
144 
Transfer(QuarantineCache * c)145   void Transfer(QuarantineCache *c) {
146     list_.append_back(&c->list_);
147     SizeAdd(c->Size());
148     atomic_store(&c->size_, 0, memory_order_relaxed);
149   }
150 
EnqueueBatch(QuarantineBatch * b)151   void EnqueueBatch(QuarantineBatch *b) {
152     list_.push_back(b);
153     SizeAdd(b->size);
154   }
155 
DequeueBatch()156   QuarantineBatch *DequeueBatch() {
157     if (list_.empty())
158       return nullptr;
159     QuarantineBatch *b = list_.front();
160     list_.pop_front();
161     SizeSub(b->size);
162     return b;
163   }
164 
165  private:
166   IntrusiveList<QuarantineBatch> list_;
167   atomic_uintptr_t size_;
168 
SizeAdd(uptr add)169   void SizeAdd(uptr add) {
170     atomic_store(&size_, Size() + add, memory_order_relaxed);
171   }
SizeSub(uptr sub)172   void SizeSub(uptr sub) {
173     atomic_store(&size_, Size() - sub, memory_order_relaxed);
174   }
175 
AllocBatch(Callback cb)176   NOINLINE QuarantineBatch* AllocBatch(Callback cb) {
177     QuarantineBatch *b = (QuarantineBatch *)cb.Allocate(sizeof(*b));
178     CHECK(b);
179     b->count = 0;
180     b->size = 0;
181     list_.push_back(b);
182     return b;
183   }
184 };
185 } // namespace __sanitizer
186 
187 #endif // SANITIZER_QUARANTINE_H
188