• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===-- hwasan_thread_list.h ------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of HWAddressSanitizer.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 // HwasanThreadList is a registry for live threads, as well as an allocator for
14 // HwasanThread objects and their stack history ring buffers. There are
15 // constraints on memory layout of the shadow region and CompactRingBuffer that
16 // are part of the ABI contract between compiler-rt and llvm.
17 //
18 // * Start of the shadow memory region is aligned to 2**kShadowBaseAlignment.
19 // * All stack ring buffers are located within (2**kShadowBaseAlignment)
20 // sized region below and adjacent to the shadow region.
21 // * Each ring buffer has a size of (2**N)*4096 where N is in [0, 8), and is
22 // aligned to twice its size. The value of N can be different for each buffer.
23 //
24 // These constrains guarantee that, given an address A of any element of the
25 // ring buffer,
26 //     A_next = (A + sizeof(uptr)) & ~((1 << (N + 13)) - 1)
27 //   is the address of the next element of that ring buffer (with wrap-around).
28 // And, with K = kShadowBaseAlignment,
29 //     S = (A | ((1 << K) - 1)) + 1
30 //   (align up to kShadowBaseAlignment) is the start of the shadow region.
31 //
32 // These calculations are used in compiler instrumentation to update the ring
33 // buffer and obtain the base address of shadow using only two inputs: address
34 // of the current element of the ring buffer, and N (i.e. size of the ring
35 // buffer). Since the value of N is very limited, we pack both inputs into a
36 // single thread-local word as
37 //   (1 << (N + 56)) | A
38 // See the implementation of class CompactRingBuffer, which is what is stored in
39 // said thread-local word.
40 //
41 // Note the unusual way of aligning up the address of the shadow:
42 //   (A | ((1 << K) - 1)) + 1
43 // It is only correct if A is not already equal to the shadow base address, but
44 // it saves 2 instructions on AArch64.
45 
46 #include "hwasan.h"
47 #include "hwasan_allocator.h"
48 #include "hwasan_flags.h"
49 #include "hwasan_thread.h"
50 
51 #include "sanitizer_common/sanitizer_placement_new.h"
52 
53 namespace __hwasan {
54 
RingBufferSize()55 static uptr RingBufferSize() {
56   uptr desired_bytes = flags()->stack_history_size * sizeof(uptr);
57   // FIXME: increase the limit to 8 once this bug is fixed:
58   // https://bugs.llvm.org/show_bug.cgi?id=39030
59   for (int shift = 1; shift < 7; ++shift) {
60     uptr size = 4096 * (1ULL << shift);
61     if (size >= desired_bytes)
62       return size;
63   }
64   Printf("stack history size too large: %d\n", flags()->stack_history_size);
65   CHECK(0);
66   return 0;
67 }
68 
69 struct ThreadStats {
70   uptr n_live_threads;
71   uptr total_stack_size;
72 };
73 
74 class HwasanThreadList {
75  public:
HwasanThreadList(uptr storage,uptr size)76   HwasanThreadList(uptr storage, uptr size)
77       : free_space_(storage), free_space_end_(storage + size) {
78     // [storage, storage + size) is used as a vector of
79     // thread_alloc_size_-sized, ring_buffer_size_*2-aligned elements.
80     // Each element contains
81     // * a ring buffer at offset 0,
82     // * a Thread object at offset ring_buffer_size_.
83     ring_buffer_size_ = RingBufferSize();
84     thread_alloc_size_ =
85         RoundUpTo(ring_buffer_size_ + sizeof(Thread), ring_buffer_size_ * 2);
86   }
87 
CreateCurrentThread()88   Thread *CreateCurrentThread() {
89     Thread *t;
90     {
91       SpinMutexLock l(&list_mutex_);
92       if (!free_list_.empty()) {
93         t = free_list_.back();
94         free_list_.pop_back();
95         uptr start = (uptr)t - ring_buffer_size_;
96         internal_memset((void *)start, 0, ring_buffer_size_ + sizeof(Thread));
97       } else {
98         t = AllocThread();
99       }
100       live_list_.push_back(t);
101     }
102     t->Init((uptr)t - ring_buffer_size_, ring_buffer_size_);
103     AddThreadStats(t);
104     return t;
105   }
106 
DontNeedThread(Thread * t)107   void DontNeedThread(Thread *t) {
108     uptr start = (uptr)t - ring_buffer_size_;
109     ReleaseMemoryPagesToOS(start, start + thread_alloc_size_);
110   }
111 
RemoveThreadFromLiveList(Thread * t)112   void RemoveThreadFromLiveList(Thread *t) {
113     for (Thread *&t2 : live_list_)
114       if (t2 == t) {
115         // To remove t2, copy the last element of the list in t2's position, and
116         // pop_back(). This works even if t2 is itself the last element.
117         t2 = live_list_.back();
118         live_list_.pop_back();
119         return;
120       }
121     CHECK(0 && "thread not found in live list");
122   }
123 
ReleaseThread(Thread * t)124   void ReleaseThread(Thread *t) {
125     RemoveThreadStats(t);
126     t->Destroy();
127     SpinMutexLock l(&list_mutex_);
128     RemoveThreadFromLiveList(t);
129     free_list_.push_back(t);
130     DontNeedThread(t);
131   }
132 
GetThreadByBufferAddress(uptr p)133   Thread *GetThreadByBufferAddress(uptr p) {
134     return (Thread *)(RoundDownTo(p, ring_buffer_size_ * 2) +
135                       ring_buffer_size_);
136   }
137 
MemoryUsedPerThread()138   uptr MemoryUsedPerThread() {
139     uptr res = sizeof(Thread) + ring_buffer_size_;
140     if (auto sz = flags()->heap_history_size)
141       res += HeapAllocationsRingBuffer::SizeInBytes(sz);
142     return res;
143   }
144 
145   template <class CB>
VisitAllLiveThreads(CB cb)146   void VisitAllLiveThreads(CB cb) {
147     SpinMutexLock l(&list_mutex_);
148     for (Thread *t : live_list_) cb(t);
149   }
150 
AddThreadStats(Thread * t)151   void AddThreadStats(Thread *t) {
152     SpinMutexLock l(&stats_mutex_);
153     stats_.n_live_threads++;
154     stats_.total_stack_size += t->stack_size();
155   }
156 
RemoveThreadStats(Thread * t)157   void RemoveThreadStats(Thread *t) {
158     SpinMutexLock l(&stats_mutex_);
159     stats_.n_live_threads--;
160     stats_.total_stack_size -= t->stack_size();
161   }
162 
GetThreadStats()163   ThreadStats GetThreadStats() {
164     SpinMutexLock l(&stats_mutex_);
165     return stats_;
166   }
167 
168  private:
AllocThread()169   Thread *AllocThread() {
170     uptr align = ring_buffer_size_ * 2;
171     CHECK(IsAligned(free_space_, align));
172     Thread *t = (Thread *)(free_space_ + ring_buffer_size_);
173     free_space_ += thread_alloc_size_;
174     CHECK(free_space_ <= free_space_end_ && "out of thread memory");
175     return t;
176   }
177 
178   uptr free_space_;
179   uptr free_space_end_;
180   uptr ring_buffer_size_;
181   uptr thread_alloc_size_;
182 
183   InternalMmapVector<Thread *> free_list_;
184   InternalMmapVector<Thread *> live_list_;
185   SpinMutex list_mutex_;
186 
187   ThreadStats stats_;
188   SpinMutex stats_mutex_;
189 };
190 
191 void InitThreadList(uptr storage, uptr size);
192 HwasanThreadList &hwasanThreadList();
193 
194 } // namespace
195