• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===-- sanitizer_stackdepotbase.h ------------------------------*- C++ -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // Implementation of a mapping from arbitrary values to unique 32-bit
11 // identifiers.
12 //===----------------------------------------------------------------------===//
13 #ifndef SANITIZER_STACKDEPOTBASE_H
14 #define SANITIZER_STACKDEPOTBASE_H
15 
16 #include "sanitizer_internal_defs.h"
17 #include "sanitizer_mutex.h"
18 #include "sanitizer_atomic.h"
19 #include "sanitizer_persistent_allocator.h"
20 
21 namespace __sanitizer {
22 
23 template <class Node, int kReservedBits, int kTabSizeLog>
24 class StackDepotBase {
25  public:
26   typedef typename Node::args_type args_type;
27   typedef typename Node::handle_type handle_type;
28   // Maps stack trace to an unique id.
29   handle_type Put(args_type args, bool *inserted = 0);
30   // Retrieves a stored stack trace by the id.
31   args_type Get(u32 id);
32 
GetStats()33   StackDepotStats *GetStats() { return &stats; }
34 
35  private:
36   static Node *find(Node *s, args_type args, u32 hash);
37   static Node *lock(atomic_uintptr_t *p);
38   static void unlock(atomic_uintptr_t *p, Node *s);
39 
40   static const int kTabSize = 1 << kTabSizeLog;  // Hash table size.
41   static const int kPartBits = 8;
42   static const int kPartShift = sizeof(u32) * 8 - kPartBits - kReservedBits;
43   static const int kPartCount =
44       1 << kPartBits;  // Number of subparts in the table.
45   static const int kPartSize = kTabSize / kPartCount;
46   static const int kMaxId = 1 << kPartShift;
47 
48   atomic_uintptr_t tab[kTabSize];   // Hash table of Node's.
49   atomic_uint32_t seq[kPartCount];  // Unique id generators.
50 
51   StackDepotStats stats;
52 
53   friend class StackDepotReverseMap;
54 };
55 
56 template <class Node, int kReservedBits, int kTabSizeLog>
find(Node * s,args_type args,u32 hash)57 Node *StackDepotBase<Node, kReservedBits, kTabSizeLog>::find(Node *s,
58                                                              args_type args,
59                                                              u32 hash) {
60   // Searches linked list s for the stack, returns its id.
61   for (; s; s = s->link) {
62     if (s->eq(hash, args)) {
63       return s;
64     }
65   }
66   return 0;
67 }
68 
69 template <class Node, int kReservedBits, int kTabSizeLog>
lock(atomic_uintptr_t * p)70 Node *StackDepotBase<Node, kReservedBits, kTabSizeLog>::lock(
71     atomic_uintptr_t *p) {
72   // Uses the pointer lsb as mutex.
73   for (int i = 0;; i++) {
74     uptr cmp = atomic_load(p, memory_order_relaxed);
75     if ((cmp & 1) == 0 &&
76         atomic_compare_exchange_weak(p, &cmp, cmp | 1, memory_order_acquire))
77       return (Node *)cmp;
78     if (i < 10)
79       proc_yield(10);
80     else
81       internal_sched_yield();
82   }
83 }
84 
85 template <class Node, int kReservedBits, int kTabSizeLog>
unlock(atomic_uintptr_t * p,Node * s)86 void StackDepotBase<Node, kReservedBits, kTabSizeLog>::unlock(
87     atomic_uintptr_t *p, Node *s) {
88   DCHECK_EQ((uptr)s & 1, 0);
89   atomic_store(p, (uptr)s, memory_order_release);
90 }
91 
92 template <class Node, int kReservedBits, int kTabSizeLog>
93 typename StackDepotBase<Node, kReservedBits, kTabSizeLog>::handle_type
Put(args_type args,bool * inserted)94 StackDepotBase<Node, kReservedBits, kTabSizeLog>::Put(args_type args,
95                                                       bool *inserted) {
96   if (inserted) *inserted = false;
97   if (!args.is_valid()) return handle_type();
98   uptr h = args.hash();
99   atomic_uintptr_t *p = &tab[h % kTabSize];
100   uptr v = atomic_load(p, memory_order_consume);
101   Node *s = (Node *)(v & ~1);
102   // First, try to find the existing stack.
103   Node *node = find(s, args, h);
104   if (node) return node->get_handle();
105   // If failed, lock, retry and insert new.
106   Node *s2 = lock(p);
107   if (s2 != s) {
108     node = find(s2, args, h);
109     if (node) {
110       unlock(p, s2);
111       return node->get_handle();
112     }
113   }
114   uptr part = (h % kTabSize) / kPartSize;
115   u32 id = atomic_fetch_add(&seq[part], 1, memory_order_relaxed) + 1;
116   stats.n_uniq_ids++;
117   CHECK_LT(id, kMaxId);
118   id |= part << kPartShift;
119   CHECK_NE(id, 0);
120   CHECK_EQ(id & (((u32)-1) >> kReservedBits), id);
121   uptr memsz = Node::storage_size(args);
122   s = (Node *)PersistentAlloc(memsz);
123   stats.allocated += memsz;
124   s->id = id;
125   s->store(args, h);
126   s->link = s2;
127   unlock(p, s);
128   if (inserted) *inserted = true;
129   return s->get_handle();
130 }
131 
132 template <class Node, int kReservedBits, int kTabSizeLog>
133 typename StackDepotBase<Node, kReservedBits, kTabSizeLog>::args_type
Get(u32 id)134 StackDepotBase<Node, kReservedBits, kTabSizeLog>::Get(u32 id) {
135   if (id == 0) {
136     return args_type();
137   }
138   CHECK_EQ(id & (((u32)-1) >> kReservedBits), id);
139   // High kPartBits contain part id, so we need to scan at most kPartSize lists.
140   uptr part = id >> kPartShift;
141   for (int i = 0; i != kPartSize; i++) {
142     uptr idx = part * kPartSize + i;
143     CHECK_LT(idx, kTabSize);
144     atomic_uintptr_t *p = &tab[idx];
145     uptr v = atomic_load(p, memory_order_consume);
146     Node *s = (Node *)(v & ~1);
147     for (; s; s = s->link) {
148       if (s->id == id) {
149         return s->load();
150       }
151     }
152   }
153   return args_type();
154 }
155 
156 }  // namespace __sanitizer
157 #endif  // SANITIZER_STACKDEPOTBASE_H
158