• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===-- sanitizer_stackdepotbase.h ------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Implementation of a mapping from arbitrary values to unique 32-bit
10 // identifiers.
11 //===----------------------------------------------------------------------===//
12 
13 #ifndef SANITIZER_STACKDEPOTBASE_H
14 #define SANITIZER_STACKDEPOTBASE_H
15 
16 #include <stdio.h>
17 
18 #include "sanitizer_atomic.h"
19 #include "sanitizer_internal_defs.h"
20 #include "sanitizer_mutex.h"
21 #include "sanitizer_persistent_allocator.h"
22 
23 namespace __sanitizer {
24 
25 template <class Node, int kReservedBits, int kTabSizeLog>
26 class StackDepotBase {
27  public:
28   typedef typename Node::args_type args_type;
29   typedef typename Node::handle_type handle_type;
30   // Maps stack trace to an unique id.
31   handle_type Put(args_type args, bool *inserted = nullptr);
32   // Retrieves a stored stack trace by the id.
33   args_type Get(u32 id);
34 
GetStats()35   StackDepotStats *GetStats() { return &stats; }
36 
37   void LockAll();
38   void UnlockAll();
39   void PrintAll();
40 
41  private:
42   static Node *find(Node *s, args_type args, u32 hash);
43   static Node *lock(atomic_uintptr_t *p);
44   static void unlock(atomic_uintptr_t *p, Node *s);
45 
46   static const int kTabSize = 1 << kTabSizeLog;  // Hash table size.
47   static const int kPartBits = 8;
48   static const int kPartShift = sizeof(u32) * 8 - kPartBits - kReservedBits;
49   static const int kPartCount =
50       1 << kPartBits;  // Number of subparts in the table.
51   static const int kPartSize = kTabSize / kPartCount;
52   static const int kMaxId = 1 << kPartShift;
53 
54   atomic_uintptr_t tab[kTabSize];   // Hash table of Node's.
55   atomic_uint32_t seq[kPartCount];  // Unique id generators.
56 
57   StackDepotStats stats;
58 
59   friend class StackDepotReverseMap;
60 };
61 
62 template <class Node, int kReservedBits, int kTabSizeLog>
find(Node * s,args_type args,u32 hash)63 Node *StackDepotBase<Node, kReservedBits, kTabSizeLog>::find(Node *s,
64                                                              args_type args,
65                                                              u32 hash) {
66   // Searches linked list s for the stack, returns its id.
67   for (; s; s = s->link) {
68     if (s->eq(hash, args)) {
69       return s;
70     }
71   }
72   return nullptr;
73 }
74 
75 template <class Node, int kReservedBits, int kTabSizeLog>
lock(atomic_uintptr_t * p)76 Node *StackDepotBase<Node, kReservedBits, kTabSizeLog>::lock(
77     atomic_uintptr_t *p) {
78   // Uses the pointer lsb as mutex.
79   for (int i = 0;; i++) {
80     uptr cmp = atomic_load(p, memory_order_relaxed);
81     if ((cmp & 1) == 0 &&
82         atomic_compare_exchange_weak(p, &cmp, cmp | 1, memory_order_acquire))
83       return (Node *)cmp;
84     if (i < 10)
85       proc_yield(10);
86     else
87       internal_sched_yield();
88   }
89 }
90 
91 template <class Node, int kReservedBits, int kTabSizeLog>
unlock(atomic_uintptr_t * p,Node * s)92 void StackDepotBase<Node, kReservedBits, kTabSizeLog>::unlock(
93     atomic_uintptr_t *p, Node *s) {
94   DCHECK_EQ((uptr)s & 1, 0);
95   atomic_store(p, (uptr)s, memory_order_release);
96 }
97 
98 template <class Node, int kReservedBits, int kTabSizeLog>
99 typename StackDepotBase<Node, kReservedBits, kTabSizeLog>::handle_type
Put(args_type args,bool * inserted)100 StackDepotBase<Node, kReservedBits, kTabSizeLog>::Put(args_type args,
101                                                       bool *inserted) {
102   if (inserted) *inserted = false;
103   if (!Node::is_valid(args)) return handle_type();
104   uptr h = Node::hash(args);
105   atomic_uintptr_t *p = &tab[h % kTabSize];
106   uptr v = atomic_load(p, memory_order_consume);
107   Node *s = (Node *)(v & ~1);
108   // First, try to find the existing stack.
109   Node *node = find(s, args, h);
110   if (node) return node->get_handle();
111   // If failed, lock, retry and insert new.
112   Node *s2 = lock(p);
113   if (s2 != s) {
114     node = find(s2, args, h);
115     if (node) {
116       unlock(p, s2);
117       return node->get_handle();
118     }
119   }
120   uptr part = (h % kTabSize) / kPartSize;
121   u32 id = atomic_fetch_add(&seq[part], 1, memory_order_relaxed) + 1;
122   stats.n_uniq_ids++;
123   CHECK_LT(id, kMaxId);
124   id |= part << kPartShift;
125   CHECK_NE(id, 0);
126   CHECK_EQ(id & (((u32)-1) >> kReservedBits), id);
127   uptr memsz = Node::storage_size(args);
128   s = (Node *)PersistentAlloc(memsz);
129   stats.allocated += memsz;
130   s->id = id;
131   s->store(args, h);
132   s->link = s2;
133   unlock(p, s);
134   if (inserted) *inserted = true;
135   return s->get_handle();
136 }
137 
138 template <class Node, int kReservedBits, int kTabSizeLog>
139 typename StackDepotBase<Node, kReservedBits, kTabSizeLog>::args_type
Get(u32 id)140 StackDepotBase<Node, kReservedBits, kTabSizeLog>::Get(u32 id) {
141   if (id == 0) {
142     return args_type();
143   }
144   CHECK_EQ(id & (((u32)-1) >> kReservedBits), id);
145   // High kPartBits contain part id, so we need to scan at most kPartSize lists.
146   uptr part = id >> kPartShift;
147   for (int i = 0; i != kPartSize; i++) {
148     uptr idx = part * kPartSize + i;
149     CHECK_LT(idx, kTabSize);
150     atomic_uintptr_t *p = &tab[idx];
151     uptr v = atomic_load(p, memory_order_consume);
152     Node *s = (Node *)(v & ~1);
153     for (; s; s = s->link) {
154       if (s->id == id) {
155         return s->load();
156       }
157     }
158   }
159   return args_type();
160 }
161 
162 template <class Node, int kReservedBits, int kTabSizeLog>
LockAll()163 void StackDepotBase<Node, kReservedBits, kTabSizeLog>::LockAll() {
164   for (int i = 0; i < kTabSize; ++i) {
165     lock(&tab[i]);
166   }
167 }
168 
169 template <class Node, int kReservedBits, int kTabSizeLog>
UnlockAll()170 void StackDepotBase<Node, kReservedBits, kTabSizeLog>::UnlockAll() {
171   for (int i = 0; i < kTabSize; ++i) {
172     atomic_uintptr_t *p = &tab[i];
173     uptr s = atomic_load(p, memory_order_relaxed);
174     unlock(p, (Node *)(s & ~1UL));
175   }
176 }
177 
178 template <class Node, int kReservedBits, int kTabSizeLog>
PrintAll()179 void StackDepotBase<Node, kReservedBits, kTabSizeLog>::PrintAll() {
180   for (int i = 0; i < kTabSize; ++i) {
181     atomic_uintptr_t *p = &tab[i];
182     lock(p);
183     uptr v = atomic_load(p, memory_order_relaxed);
184     Node *s = (Node *)(v & ~1UL);
185     for (; s; s = s->link) {
186       Printf("Stack for id %u:\n", s->id);
187       s->load().Print();
188     }
189     unlock(p, s);
190   }
191 }
192 
193 } // namespace __sanitizer
194 
195 #endif // SANITIZER_STACKDEPOTBASE_H
196