1 //===-- sanitizer_stackdepot.cc -------------------------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file is shared between AddressSanitizer and ThreadSanitizer
11 // run-time libraries.
12 //===----------------------------------------------------------------------===//
13
14 #include "sanitizer_stackdepot.h"
15
16 #include "sanitizer_common.h"
17 #include "sanitizer_stackdepotbase.h"
18
19 namespace __sanitizer {
20
21 struct StackDepotNode {
22 StackDepotNode *link;
23 u32 id;
24 atomic_uint32_t hash_and_use_count; // hash_bits : 12; use_count : 20;
25 u32 size;
26 u32 tag;
27 uptr stack[1]; // [size]
28
29 static const u32 kTabSizeLog = 20;
30 // Lower kTabSizeLog bits are equal for all items in one bucket.
31 // We use these bits to store the per-stack use counter.
32 static const u32 kUseCountBits = kTabSizeLog;
33 static const u32 kMaxUseCount = 1 << kUseCountBits;
34 static const u32 kUseCountMask = (1 << kUseCountBits) - 1;
35 static const u32 kHashMask = ~kUseCountMask;
36
37 typedef StackTrace args_type;
eq__sanitizer::StackDepotNode38 bool eq(u32 hash, const args_type &args) const {
39 u32 hash_bits =
40 atomic_load(&hash_and_use_count, memory_order_relaxed) & kHashMask;
41 if ((hash & kHashMask) != hash_bits || args.size != size || args.tag != tag)
42 return false;
43 uptr i = 0;
44 for (; i < size; i++) {
45 if (stack[i] != args.trace[i]) return false;
46 }
47 return true;
48 }
storage_size__sanitizer::StackDepotNode49 static uptr storage_size(const args_type &args) {
50 return sizeof(StackDepotNode) + (args.size - 1) * sizeof(uptr);
51 }
hash__sanitizer::StackDepotNode52 static u32 hash(const args_type &args) {
53 // murmur2
54 const u32 m = 0x5bd1e995;
55 const u32 seed = 0x9747b28c;
56 const u32 r = 24;
57 u32 h = seed ^ (args.size * sizeof(uptr));
58 for (uptr i = 0; i < args.size; i++) {
59 u32 k = args.trace[i];
60 k *= m;
61 k ^= k >> r;
62 k *= m;
63 h *= m;
64 h ^= k;
65 }
66 h ^= h >> 13;
67 h *= m;
68 h ^= h >> 15;
69 return h;
70 }
is_valid__sanitizer::StackDepotNode71 static bool is_valid(const args_type &args) {
72 return args.size > 0 && args.trace;
73 }
store__sanitizer::StackDepotNode74 void store(const args_type &args, u32 hash) {
75 atomic_store(&hash_and_use_count, hash & kHashMask, memory_order_relaxed);
76 size = args.size;
77 tag = args.tag;
78 internal_memcpy(stack, args.trace, size * sizeof(uptr));
79 }
load__sanitizer::StackDepotNode80 args_type load() const {
81 return args_type(&stack[0], size, tag);
82 }
get_handle__sanitizer::StackDepotNode83 StackDepotHandle get_handle() { return StackDepotHandle(this); }
84
85 typedef StackDepotHandle handle_type;
86 };
87
88 COMPILER_CHECK(StackDepotNode::kMaxUseCount == (u32)kStackDepotMaxUseCount);
89
id()90 u32 StackDepotHandle::id() { return node_->id; }
use_count()91 int StackDepotHandle::use_count() {
92 return atomic_load(&node_->hash_and_use_count, memory_order_relaxed) &
93 StackDepotNode::kUseCountMask;
94 }
inc_use_count_unsafe()95 void StackDepotHandle::inc_use_count_unsafe() {
96 u32 prev =
97 atomic_fetch_add(&node_->hash_and_use_count, 1, memory_order_relaxed) &
98 StackDepotNode::kUseCountMask;
99 CHECK_LT(prev + 1, StackDepotNode::kMaxUseCount);
100 }
101
102 // FIXME(dvyukov): this single reserved bit is used in TSan.
103 typedef StackDepotBase<StackDepotNode, 1, StackDepotNode::kTabSizeLog>
104 StackDepot;
105 static StackDepot theDepot;
106
StackDepotGetStats()107 StackDepotStats *StackDepotGetStats() {
108 return theDepot.GetStats();
109 }
110
StackDepotPut(StackTrace stack)111 u32 StackDepotPut(StackTrace stack) {
112 StackDepotHandle h = theDepot.Put(stack);
113 return h.valid() ? h.id() : 0;
114 }
115
StackDepotPut_WithHandle(StackTrace stack)116 StackDepotHandle StackDepotPut_WithHandle(StackTrace stack) {
117 return theDepot.Put(stack);
118 }
119
StackDepotGet(u32 id)120 StackTrace StackDepotGet(u32 id) {
121 return theDepot.Get(id);
122 }
123
StackDepotLockAll()124 void StackDepotLockAll() {
125 theDepot.LockAll();
126 }
127
StackDepotUnlockAll()128 void StackDepotUnlockAll() {
129 theDepot.UnlockAll();
130 }
131
IdComparator(const StackDepotReverseMap::IdDescPair & a,const StackDepotReverseMap::IdDescPair & b)132 bool StackDepotReverseMap::IdDescPair::IdComparator(
133 const StackDepotReverseMap::IdDescPair &a,
134 const StackDepotReverseMap::IdDescPair &b) {
135 return a.id < b.id;
136 }
137
StackDepotReverseMap()138 StackDepotReverseMap::StackDepotReverseMap()
139 : map_(StackDepotGetStats()->n_uniq_ids + 100) {
140 for (int idx = 0; idx < StackDepot::kTabSize; idx++) {
141 atomic_uintptr_t *p = &theDepot.tab[idx];
142 uptr v = atomic_load(p, memory_order_consume);
143 StackDepotNode *s = (StackDepotNode*)(v & ~1);
144 for (; s; s = s->link) {
145 IdDescPair pair = {s->id, s};
146 map_.push_back(pair);
147 }
148 }
149 InternalSort(&map_, map_.size(), IdDescPair::IdComparator);
150 }
151
Get(u32 id)152 StackTrace StackDepotReverseMap::Get(u32 id) {
153 if (!map_.size())
154 return StackTrace();
155 IdDescPair pair = {id, 0};
156 uptr idx = InternalBinarySearch(map_, 0, map_.size(), pair,
157 IdDescPair::IdComparator);
158 if (idx > map_.size())
159 return StackTrace();
160 return map_[idx].desc->load();
161 }
162
163 } // namespace __sanitizer
164