1 //===-- sanitizer_deadlock_detector.h ---------------------------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file is a part of Sanitizer runtime. 11 // The deadlock detector maintains a directed graph of lock acquisitions. 12 // When a lock event happens, the detector checks if the locks already held by 13 // the current thread are reachable from the newly acquired lock. 14 // 15 // The detector can handle only a fixed amount of simultaneously live locks 16 // (a lock is alive if it has been locked at least once and has not been 17 // destroyed). When the maximal number of locks is reached the entire graph 18 // is flushed and the new lock epoch is started. The node ids from the old 19 // epochs can not be used with any of the detector methods except for 20 // nodeBelongsToCurrentEpoch(). 21 // 22 // FIXME: this is work in progress, nothing really works yet. 23 // 24 //===----------------------------------------------------------------------===// 25 26 #ifndef SANITIZER_DEADLOCK_DETECTOR_H 27 #define SANITIZER_DEADLOCK_DETECTOR_H 28 29 #include "sanitizer_common.h" 30 #include "sanitizer_bvgraph.h" 31 32 namespace __sanitizer { 33 34 // Thread-local state for DeadlockDetector. 35 // It contains the locks currently held by the owning thread. 36 template <class BV> 37 class DeadlockDetectorTLS { 38 public: 39 // No CTOR. clear()40 void clear() { 41 bv_.clear(); 42 epoch_ = 0; 43 n_recursive_locks = 0; 44 n_all_locks_ = 0; 45 } 46 empty()47 bool empty() const { return bv_.empty(); } 48 ensureCurrentEpoch(uptr current_epoch)49 void ensureCurrentEpoch(uptr current_epoch) { 50 if (epoch_ == current_epoch) return; 51 bv_.clear(); 52 epoch_ = current_epoch; 53 } 54 getEpoch()55 uptr getEpoch() const { return epoch_; } 56 57 // Returns true if this is the first (non-recursive) acquisition of this lock. addLock(uptr lock_id,uptr current_epoch,u32 stk)58 bool addLock(uptr lock_id, uptr current_epoch, u32 stk) { 59 // Printf("addLock: %zx %zx stk %u\n", lock_id, current_epoch, stk); 60 CHECK_EQ(epoch_, current_epoch); 61 if (!bv_.setBit(lock_id)) { 62 // The lock is already held by this thread, it must be recursive. 63 CHECK_LT(n_recursive_locks, ARRAY_SIZE(recursive_locks)); 64 recursive_locks[n_recursive_locks++] = lock_id; 65 return false; 66 } 67 CHECK_LT(n_all_locks_, ARRAY_SIZE(all_locks_with_contexts_)); 68 // lock_id < BV::kSize, can cast to a smaller int. 69 u32 lock_id_short = static_cast<u32>(lock_id); 70 LockWithContext l = {lock_id_short, stk}; 71 all_locks_with_contexts_[n_all_locks_++] = l; 72 return true; 73 } 74 removeLock(uptr lock_id)75 void removeLock(uptr lock_id) { 76 if (n_recursive_locks) { 77 for (sptr i = n_recursive_locks - 1; i >= 0; i--) { 78 if (recursive_locks[i] == lock_id) { 79 n_recursive_locks--; 80 Swap(recursive_locks[i], recursive_locks[n_recursive_locks]); 81 return; 82 } 83 } 84 } 85 // Printf("remLock: %zx %zx\n", lock_id, epoch_); 86 CHECK(bv_.clearBit(lock_id)); 87 if (n_all_locks_) { 88 for (sptr i = n_all_locks_ - 1; i >= 0; i--) { 89 if (all_locks_with_contexts_[i].lock == static_cast<u32>(lock_id)) { 90 Swap(all_locks_with_contexts_[i], 91 all_locks_with_contexts_[n_all_locks_ - 1]); 92 n_all_locks_--; 93 break; 94 } 95 } 96 } 97 } 98 findLockContext(uptr lock_id)99 u32 findLockContext(uptr lock_id) { 100 for (uptr i = 0; i < n_all_locks_; i++) 101 if (all_locks_with_contexts_[i].lock == static_cast<u32>(lock_id)) 102 return all_locks_with_contexts_[i].stk; 103 return 0; 104 } 105 getLocks(uptr current_epoch)106 const BV &getLocks(uptr current_epoch) const { 107 CHECK_EQ(epoch_, current_epoch); 108 return bv_; 109 } 110 getNumLocks()111 uptr getNumLocks() const { return n_all_locks_; } getLock(uptr idx)112 uptr getLock(uptr idx) const { return all_locks_with_contexts_[idx].lock; } 113 114 private: 115 BV bv_; 116 uptr epoch_; 117 uptr recursive_locks[64]; 118 uptr n_recursive_locks; 119 struct LockWithContext { 120 u32 lock; 121 u32 stk; 122 }; 123 LockWithContext all_locks_with_contexts_[64]; 124 uptr n_all_locks_; 125 }; 126 127 // DeadlockDetector. 128 // For deadlock detection to work we need one global DeadlockDetector object 129 // and one DeadlockDetectorTLS object per evey thread. 130 // This class is not thread safe, all concurrent accesses should be guarded 131 // by an external lock. 132 // Most of the methods of this class are not thread-safe (i.e. should 133 // be protected by an external lock) unless explicitly told otherwise. 134 template <class BV> 135 class DeadlockDetector { 136 public: 137 typedef BV BitVector; 138 size()139 uptr size() const { return g_.size(); } 140 141 // No CTOR. clear()142 void clear() { 143 current_epoch_ = 0; 144 available_nodes_.clear(); 145 recycled_nodes_.clear(); 146 g_.clear(); 147 n_edges_ = 0; 148 } 149 150 // Allocate new deadlock detector node. 151 // If we are out of available nodes first try to recycle some. 152 // If there is nothing to recycle, flush the graph and increment the epoch. 153 // Associate 'data' (opaque user's object) with the new node. newNode(uptr data)154 uptr newNode(uptr data) { 155 if (!available_nodes_.empty()) 156 return getAvailableNode(data); 157 if (!recycled_nodes_.empty()) { 158 // Printf("recycling: n_edges_ %zd\n", n_edges_); 159 for (sptr i = n_edges_ - 1; i >= 0; i--) { 160 if (recycled_nodes_.getBit(edges_[i].from) || 161 recycled_nodes_.getBit(edges_[i].to)) { 162 Swap(edges_[i], edges_[n_edges_ - 1]); 163 n_edges_--; 164 } 165 } 166 CHECK(available_nodes_.empty()); 167 // removeEdgesFrom was called in removeNode. 168 g_.removeEdgesTo(recycled_nodes_); 169 available_nodes_.setUnion(recycled_nodes_); 170 recycled_nodes_.clear(); 171 return getAvailableNode(data); 172 } 173 // We are out of vacant nodes. Flush and increment the current_epoch_. 174 current_epoch_ += size(); 175 recycled_nodes_.clear(); 176 available_nodes_.setAll(); 177 g_.clear(); 178 return getAvailableNode(data); 179 } 180 181 // Get data associated with the node created by newNode(). getData(uptr node)182 uptr getData(uptr node) const { return data_[nodeToIndex(node)]; } 183 nodeBelongsToCurrentEpoch(uptr node)184 bool nodeBelongsToCurrentEpoch(uptr node) { 185 return node && (node / size() * size()) == current_epoch_; 186 } 187 removeNode(uptr node)188 void removeNode(uptr node) { 189 uptr idx = nodeToIndex(node); 190 CHECK(!available_nodes_.getBit(idx)); 191 CHECK(recycled_nodes_.setBit(idx)); 192 g_.removeEdgesFrom(idx); 193 } 194 ensureCurrentEpoch(DeadlockDetectorTLS<BV> * dtls)195 void ensureCurrentEpoch(DeadlockDetectorTLS<BV> *dtls) { 196 dtls->ensureCurrentEpoch(current_epoch_); 197 } 198 199 // Returns true if there is a cycle in the graph after this lock event. 200 // Ideally should be called before the lock is acquired so that we can 201 // report a deadlock before a real deadlock happens. onLockBefore(DeadlockDetectorTLS<BV> * dtls,uptr cur_node)202 bool onLockBefore(DeadlockDetectorTLS<BV> *dtls, uptr cur_node) { 203 ensureCurrentEpoch(dtls); 204 uptr cur_idx = nodeToIndex(cur_node); 205 return g_.isReachable(cur_idx, dtls->getLocks(current_epoch_)); 206 } 207 findLockContext(DeadlockDetectorTLS<BV> * dtls,uptr node)208 u32 findLockContext(DeadlockDetectorTLS<BV> *dtls, uptr node) { 209 return dtls->findLockContext(nodeToIndex(node)); 210 } 211 212 // Add cur_node to the set of locks held currently by dtls. 213 void onLockAfter(DeadlockDetectorTLS<BV> *dtls, uptr cur_node, u32 stk = 0) { 214 ensureCurrentEpoch(dtls); 215 uptr cur_idx = nodeToIndex(cur_node); 216 dtls->addLock(cur_idx, current_epoch_, stk); 217 } 218 219 // Experimental *racy* fast path function. 220 // Returns true if all edges from the currently held locks to cur_node exist. hasAllEdges(DeadlockDetectorTLS<BV> * dtls,uptr cur_node)221 bool hasAllEdges(DeadlockDetectorTLS<BV> *dtls, uptr cur_node) { 222 uptr local_epoch = dtls->getEpoch(); 223 // Read from current_epoch_ is racy. 224 if (cur_node && local_epoch == current_epoch_ && 225 local_epoch == nodeToEpoch(cur_node)) { 226 uptr cur_idx = nodeToIndexUnchecked(cur_node); 227 for (uptr i = 0, n = dtls->getNumLocks(); i < n; i++) { 228 if (!g_.hasEdge(dtls->getLock(i), cur_idx)) 229 return false; 230 } 231 return true; 232 } 233 return false; 234 } 235 236 // Adds edges from currently held locks to cur_node, 237 // returns the number of added edges, and puts the sources of added edges 238 // into added_edges[]. 239 // Should be called before onLockAfter. addEdges(DeadlockDetectorTLS<BV> * dtls,uptr cur_node,u32 stk,int unique_tid)240 uptr addEdges(DeadlockDetectorTLS<BV> *dtls, uptr cur_node, u32 stk, 241 int unique_tid) { 242 ensureCurrentEpoch(dtls); 243 uptr cur_idx = nodeToIndex(cur_node); 244 uptr added_edges[40]; 245 uptr n_added_edges = g_.addEdges(dtls->getLocks(current_epoch_), cur_idx, 246 added_edges, ARRAY_SIZE(added_edges)); 247 for (uptr i = 0; i < n_added_edges; i++) { 248 if (n_edges_ < ARRAY_SIZE(edges_)) { 249 Edge e = {(u16)added_edges[i], (u16)cur_idx, 250 dtls->findLockContext(added_edges[i]), stk, 251 unique_tid}; 252 edges_[n_edges_++] = e; 253 } 254 // Printf("Edge%zd: %u %zd=>%zd in T%d\n", 255 // n_edges_, stk, added_edges[i], cur_idx, unique_tid); 256 } 257 return n_added_edges; 258 } 259 findEdge(uptr from_node,uptr to_node,u32 * stk_from,u32 * stk_to,int * unique_tid)260 bool findEdge(uptr from_node, uptr to_node, u32 *stk_from, u32 *stk_to, 261 int *unique_tid) { 262 uptr from_idx = nodeToIndex(from_node); 263 uptr to_idx = nodeToIndex(to_node); 264 for (uptr i = 0; i < n_edges_; i++) { 265 if (edges_[i].from == from_idx && edges_[i].to == to_idx) { 266 *stk_from = edges_[i].stk_from; 267 *stk_to = edges_[i].stk_to; 268 *unique_tid = edges_[i].unique_tid; 269 return true; 270 } 271 } 272 return false; 273 } 274 275 // Test-only function. Handles the before/after lock events, 276 // returns true if there is a cycle. 277 bool onLock(DeadlockDetectorTLS<BV> *dtls, uptr cur_node, u32 stk = 0) { 278 ensureCurrentEpoch(dtls); 279 bool is_reachable = !isHeld(dtls, cur_node) && onLockBefore(dtls, cur_node); 280 addEdges(dtls, cur_node, stk, 0); 281 onLockAfter(dtls, cur_node, stk); 282 return is_reachable; 283 } 284 285 // Handles the try_lock event, returns false. 286 // When a try_lock event happens (i.e. a try_lock call succeeds) we need 287 // to add this lock to the currently held locks, but we should not try to 288 // change the lock graph or to detect a cycle. We may want to investigate 289 // whether a more aggressive strategy is possible for try_lock. 290 bool onTryLock(DeadlockDetectorTLS<BV> *dtls, uptr cur_node, u32 stk = 0) { 291 ensureCurrentEpoch(dtls); 292 uptr cur_idx = nodeToIndex(cur_node); 293 dtls->addLock(cur_idx, current_epoch_, stk); 294 return false; 295 } 296 297 // Returns true iff dtls is empty (no locks are currently held) and we can 298 // add the node to the currently held locks w/o chanding the global state. 299 // This operation is thread-safe as it only touches the dtls. 300 bool onFirstLock(DeadlockDetectorTLS<BV> *dtls, uptr node, u32 stk = 0) { 301 if (!dtls->empty()) return false; 302 if (dtls->getEpoch() && dtls->getEpoch() == nodeToEpoch(node)) { 303 dtls->addLock(nodeToIndexUnchecked(node), nodeToEpoch(node), stk); 304 return true; 305 } 306 return false; 307 } 308 309 // Finds a path between the lock 'cur_node' (currently not held in dtls) 310 // and some currently held lock, returns the length of the path 311 // or 0 on failure. findPathToLock(DeadlockDetectorTLS<BV> * dtls,uptr cur_node,uptr * path,uptr path_size)312 uptr findPathToLock(DeadlockDetectorTLS<BV> *dtls, uptr cur_node, uptr *path, 313 uptr path_size) { 314 tmp_bv_.copyFrom(dtls->getLocks(current_epoch_)); 315 uptr idx = nodeToIndex(cur_node); 316 CHECK(!tmp_bv_.getBit(idx)); 317 uptr res = g_.findShortestPath(idx, tmp_bv_, path, path_size); 318 for (uptr i = 0; i < res; i++) 319 path[i] = indexToNode(path[i]); 320 if (res) 321 CHECK_EQ(path[0], cur_node); 322 return res; 323 } 324 325 // Handle the unlock event. 326 // This operation is thread-safe as it only touches the dtls. onUnlock(DeadlockDetectorTLS<BV> * dtls,uptr node)327 void onUnlock(DeadlockDetectorTLS<BV> *dtls, uptr node) { 328 if (dtls->getEpoch() == nodeToEpoch(node)) 329 dtls->removeLock(nodeToIndexUnchecked(node)); 330 } 331 332 // Tries to handle the lock event w/o writing to global state. 333 // Returns true on success. 334 // This operation is thread-safe as it only touches the dtls 335 // (modulo racy nature of hasAllEdges). 336 bool onLockFast(DeadlockDetectorTLS<BV> *dtls, uptr node, u32 stk = 0) { 337 if (hasAllEdges(dtls, node)) { 338 dtls->addLock(nodeToIndexUnchecked(node), nodeToEpoch(node), stk); 339 return true; 340 } 341 return false; 342 } 343 isHeld(DeadlockDetectorTLS<BV> * dtls,uptr node)344 bool isHeld(DeadlockDetectorTLS<BV> *dtls, uptr node) const { 345 return dtls->getLocks(current_epoch_).getBit(nodeToIndex(node)); 346 } 347 testOnlyGetEpoch()348 uptr testOnlyGetEpoch() const { return current_epoch_; } testOnlyHasEdge(uptr l1,uptr l2)349 bool testOnlyHasEdge(uptr l1, uptr l2) { 350 return g_.hasEdge(nodeToIndex(l1), nodeToIndex(l2)); 351 } 352 // idx1 and idx2 are raw indices to g_, not lock IDs. testOnlyHasEdgeRaw(uptr idx1,uptr idx2)353 bool testOnlyHasEdgeRaw(uptr idx1, uptr idx2) { 354 return g_.hasEdge(idx1, idx2); 355 } 356 Print()357 void Print() { 358 for (uptr from = 0; from < size(); from++) 359 for (uptr to = 0; to < size(); to++) 360 if (g_.hasEdge(from, to)) 361 Printf(" %zx => %zx\n", from, to); 362 } 363 364 private: check_idx(uptr idx)365 void check_idx(uptr idx) const { CHECK_LT(idx, size()); } 366 check_node(uptr node)367 void check_node(uptr node) const { 368 CHECK_GE(node, size()); 369 CHECK_EQ(current_epoch_, nodeToEpoch(node)); 370 } 371 indexToNode(uptr idx)372 uptr indexToNode(uptr idx) const { 373 check_idx(idx); 374 return idx + current_epoch_; 375 } 376 nodeToIndexUnchecked(uptr node)377 uptr nodeToIndexUnchecked(uptr node) const { return node % size(); } 378 nodeToIndex(uptr node)379 uptr nodeToIndex(uptr node) const { 380 check_node(node); 381 return nodeToIndexUnchecked(node); 382 } 383 nodeToEpoch(uptr node)384 uptr nodeToEpoch(uptr node) const { return node / size() * size(); } 385 getAvailableNode(uptr data)386 uptr getAvailableNode(uptr data) { 387 uptr idx = available_nodes_.getAndClearFirstOne(); 388 data_[idx] = data; 389 return indexToNode(idx); 390 } 391 392 struct Edge { 393 u16 from; 394 u16 to; 395 u32 stk_from; 396 u32 stk_to; 397 int unique_tid; 398 }; 399 400 uptr current_epoch_; 401 BV available_nodes_; 402 BV recycled_nodes_; 403 BV tmp_bv_; 404 BVGraph<BV> g_; 405 uptr data_[BV::kSize]; 406 Edge edges_[BV::kSize * 32]; 407 uptr n_edges_; 408 }; 409 410 } // namespace __sanitizer 411 412 #endif // SANITIZER_DEADLOCK_DETECTOR_H 413