1 //===-- sanitizer_stackdepot.cpp ------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is shared between AddressSanitizer and ThreadSanitizer
10 // run-time libraries.
11 //===----------------------------------------------------------------------===//
12
13 #include "sanitizer_stackdepot.h"
14
15 #include "sanitizer_common.h"
16 #include "sanitizer_hash.h"
17 #include "sanitizer_stackdepotbase.h"
18
19 namespace __sanitizer {
20
21 struct StackDepotNode {
22 StackDepotNode *link;
23 u32 id;
24 atomic_uint32_t hash_and_use_count; // hash_bits : 12; use_count : 20;
25 u32 size;
26 u32 tag;
27 uptr stack[1]; // [size]
28
29 static const u32 kTabSizeLog = SANITIZER_ANDROID ? 16 : 20;
30 // Lower kTabSizeLog bits are equal for all items in one bucket.
31 // We use these bits to store the per-stack use counter.
32 static const u32 kUseCountBits = kTabSizeLog;
33 static const u32 kMaxUseCount = 1 << kUseCountBits;
34 static const u32 kUseCountMask = (1 << kUseCountBits) - 1;
35 static const u32 kHashMask = ~kUseCountMask;
36
37 typedef StackTrace args_type;
eq__sanitizer::StackDepotNode38 bool eq(u32 hash, const args_type &args) const {
39 u32 hash_bits =
40 atomic_load(&hash_and_use_count, memory_order_relaxed) & kHashMask;
41 if ((hash & kHashMask) != hash_bits || args.size != size || args.tag != tag)
42 return false;
43 uptr i = 0;
44 for (; i < size; i++) {
45 if (stack[i] != args.trace[i]) return false;
46 }
47 return true;
48 }
storage_size__sanitizer::StackDepotNode49 static uptr storage_size(const args_type &args) {
50 return sizeof(StackDepotNode) + (args.size - 1) * sizeof(uptr);
51 }
hash__sanitizer::StackDepotNode52 static u32 hash(const args_type &args) {
53 MurMur2HashBuilder H(args.size * sizeof(uptr));
54 for (uptr i = 0; i < args.size; i++) H.add(args.trace[i]);
55 return H.get();
56 }
is_valid__sanitizer::StackDepotNode57 static bool is_valid(const args_type &args) {
58 return args.size > 0 && args.trace;
59 }
store__sanitizer::StackDepotNode60 void store(const args_type &args, u32 hash) {
61 atomic_store(&hash_and_use_count, hash & kHashMask, memory_order_relaxed);
62 size = args.size;
63 tag = args.tag;
64 internal_memcpy(stack, args.trace, size * sizeof(uptr));
65 }
load__sanitizer::StackDepotNode66 args_type load() const {
67 return args_type(&stack[0], size, tag);
68 }
get_handle__sanitizer::StackDepotNode69 StackDepotHandle get_handle() { return StackDepotHandle(this); }
70
71 typedef StackDepotHandle handle_type;
72 };
73
74 COMPILER_CHECK(StackDepotNode::kMaxUseCount == (u32)kStackDepotMaxUseCount);
75
id()76 u32 StackDepotHandle::id() { return node_->id; }
use_count()77 int StackDepotHandle::use_count() {
78 return atomic_load(&node_->hash_and_use_count, memory_order_relaxed) &
79 StackDepotNode::kUseCountMask;
80 }
inc_use_count_unsafe()81 void StackDepotHandle::inc_use_count_unsafe() {
82 u32 prev =
83 atomic_fetch_add(&node_->hash_and_use_count, 1, memory_order_relaxed) &
84 StackDepotNode::kUseCountMask;
85 CHECK_LT(prev + 1, StackDepotNode::kMaxUseCount);
86 }
87
88 // FIXME(dvyukov): this single reserved bit is used in TSan.
89 typedef StackDepotBase<StackDepotNode, 1, StackDepotNode::kTabSizeLog>
90 StackDepot;
91 static StackDepot theDepot;
92
StackDepotGetStats()93 StackDepotStats *StackDepotGetStats() {
94 return theDepot.GetStats();
95 }
96
StackDepotPut(StackTrace stack)97 u32 StackDepotPut(StackTrace stack) {
98 StackDepotHandle h = theDepot.Put(stack);
99 return h.valid() ? h.id() : 0;
100 }
101
StackDepotPut_WithHandle(StackTrace stack)102 StackDepotHandle StackDepotPut_WithHandle(StackTrace stack) {
103 return theDepot.Put(stack);
104 }
105
StackDepotGet(u32 id)106 StackTrace StackDepotGet(u32 id) {
107 return theDepot.Get(id);
108 }
109
StackDepotLockAll()110 void StackDepotLockAll() {
111 theDepot.LockAll();
112 }
113
StackDepotUnlockAll()114 void StackDepotUnlockAll() {
115 theDepot.UnlockAll();
116 }
117
StackDepotPrintAll()118 void StackDepotPrintAll() {
119 #if !SANITIZER_GO
120 theDepot.PrintAll();
121 #endif
122 }
123
IdComparator(const StackDepotReverseMap::IdDescPair & a,const StackDepotReverseMap::IdDescPair & b)124 bool StackDepotReverseMap::IdDescPair::IdComparator(
125 const StackDepotReverseMap::IdDescPair &a,
126 const StackDepotReverseMap::IdDescPair &b) {
127 return a.id < b.id;
128 }
129
StackDepotReverseMap()130 StackDepotReverseMap::StackDepotReverseMap() {
131 map_.reserve(StackDepotGetStats()->n_uniq_ids + 100);
132 for (int idx = 0; idx < StackDepot::kTabSize; idx++) {
133 atomic_uintptr_t *p = &theDepot.tab[idx];
134 uptr v = atomic_load(p, memory_order_consume);
135 StackDepotNode *s = (StackDepotNode*)(v & ~1);
136 for (; s; s = s->link) {
137 IdDescPair pair = {s->id, s};
138 map_.push_back(pair);
139 }
140 }
141 Sort(map_.data(), map_.size(), &IdDescPair::IdComparator);
142 }
143
Get(u32 id)144 StackTrace StackDepotReverseMap::Get(u32 id) {
145 if (!map_.size())
146 return StackTrace();
147 IdDescPair pair = {id, nullptr};
148 uptr idx =
149 InternalLowerBound(map_, 0, map_.size(), pair, IdDescPair::IdComparator);
150 if (idx > map_.size() || map_[idx].id != id)
151 return StackTrace();
152 return map_[idx].desc->load();
153 }
154
155 } // namespace __sanitizer
156