• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2017 The Abseil Authors.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 //      https://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 // GraphCycles provides incremental cycle detection on a dynamic
16 // graph using the following algorithm:
17 //
18 // A dynamic topological sort algorithm for directed acyclic graphs
19 // David J. Pearce, Paul H. J. Kelly
20 // Journal of Experimental Algorithmics (JEA) JEA Homepage archive
21 // Volume 11, 2006, Article No. 1.7
22 //
23 // Brief summary of the algorithm:
24 //
25 // (1) Maintain a rank for each node that is consistent
26 //     with the topological sort of the graph. I.e., path from x to y
27 //     implies rank[x] < rank[y].
28 // (2) When a new edge (x->y) is inserted, do nothing if rank[x] < rank[y].
29 // (3) Otherwise: adjust ranks in the neighborhood of x and y.
30 
31 #include "absl/base/attributes.h"
32 // This file is a no-op if the required LowLevelAlloc support is missing.
33 #include "absl/base/internal/low_level_alloc.h"
34 #ifndef ABSL_LOW_LEVEL_ALLOC_MISSING
35 
36 #include "absl/synchronization/internal/graphcycles.h"
37 
38 #include <algorithm>
39 #include <array>
40 #include <limits>
41 #include "absl/base/internal/hide_ptr.h"
42 #include "absl/base/internal/raw_logging.h"
43 #include "absl/base/internal/spinlock.h"
44 
45 // Do not use STL.   This module does not use standard memory allocation.
46 
47 namespace absl {
48 ABSL_NAMESPACE_BEGIN
49 namespace synchronization_internal {
50 
51 namespace {
52 
53 // Avoid LowLevelAlloc's default arena since it calls malloc hooks in
54 // which people are doing things like acquiring Mutexes.
55 ABSL_CONST_INIT static absl::base_internal::SpinLock arena_mu(
56     absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);
57 ABSL_CONST_INIT static base_internal::LowLevelAlloc::Arena* arena;
58 
InitArenaIfNecessary()59 static void InitArenaIfNecessary() {
60   arena_mu.Lock();
61   if (arena == nullptr) {
62     arena = base_internal::LowLevelAlloc::NewArena(0);
63   }
64   arena_mu.Unlock();
65 }
66 
67 // Number of inlined elements in Vec.  Hash table implementation
68 // relies on this being a power of two.
69 static const uint32_t kInline = 8;
70 
71 // A simple LowLevelAlloc based resizable vector with inlined storage
72 // for a few elements.  T must be a plain type since constructor
73 // and destructor are not run on elements of type T managed by Vec.
74 template <typename T>
75 class Vec {
76  public:
Vec()77   Vec() { Init(); }
~Vec()78   ~Vec() { Discard(); }
79 
clear()80   void clear() {
81     Discard();
82     Init();
83   }
84 
empty() const85   bool empty() const { return size_ == 0; }
size() const86   uint32_t size() const { return size_; }
begin()87   T* begin() { return ptr_; }
end()88   T* end() { return ptr_ + size_; }
operator [](uint32_t i) const89   const T& operator[](uint32_t i) const { return ptr_[i]; }
operator [](uint32_t i)90   T& operator[](uint32_t i) { return ptr_[i]; }
back() const91   const T& back() const { return ptr_[size_-1]; }
pop_back()92   void pop_back() { size_--; }
93 
push_back(const T & v)94   void push_back(const T& v) {
95     if (size_ == capacity_) Grow(size_ + 1);
96     ptr_[size_] = v;
97     size_++;
98   }
99 
resize(uint32_t n)100   void resize(uint32_t n) {
101     if (n > capacity_) Grow(n);
102     size_ = n;
103   }
104 
fill(const T & val)105   void fill(const T& val) {
106     for (uint32_t i = 0; i < size(); i++) {
107       ptr_[i] = val;
108     }
109   }
110 
111   // Guarantees src is empty at end.
112   // Provided for the hash table resizing code below.
MoveFrom(Vec<T> * src)113   void MoveFrom(Vec<T>* src) {
114     if (src->ptr_ == src->space_) {
115       // Need to actually copy
116       resize(src->size_);
117       std::copy(src->ptr_, src->ptr_ + src->size_, ptr_);
118       src->size_ = 0;
119     } else {
120       Discard();
121       ptr_ = src->ptr_;
122       size_ = src->size_;
123       capacity_ = src->capacity_;
124       src->Init();
125     }
126   }
127 
128  private:
129   T* ptr_;
130   T space_[kInline];
131   uint32_t size_;
132   uint32_t capacity_;
133 
Init()134   void Init() {
135     ptr_ = space_;
136     size_ = 0;
137     capacity_ = kInline;
138   }
139 
Discard()140   void Discard() {
141     if (ptr_ != space_) base_internal::LowLevelAlloc::Free(ptr_);
142   }
143 
Grow(uint32_t n)144   void Grow(uint32_t n) {
145     while (capacity_ < n) {
146       capacity_ *= 2;
147     }
148     size_t request = static_cast<size_t>(capacity_) * sizeof(T);
149     T* copy = static_cast<T*>(
150         base_internal::LowLevelAlloc::AllocWithArena(request, arena));
151     std::copy(ptr_, ptr_ + size_, copy);
152     Discard();
153     ptr_ = copy;
154   }
155 
156   Vec(const Vec&) = delete;
157   Vec& operator=(const Vec&) = delete;
158 };
159 
160 // A hash set of non-negative int32_t that uses Vec for its underlying storage.
161 class NodeSet {
162  public:
NodeSet()163   NodeSet() { Init(); }
164 
clear()165   void clear() { Init(); }
contains(int32_t v) const166   bool contains(int32_t v) const { return table_[FindIndex(v)] == v; }
167 
insert(int32_t v)168   bool insert(int32_t v) {
169     uint32_t i = FindIndex(v);
170     if (table_[i] == v) {
171       return false;
172     }
173     if (table_[i] == kEmpty) {
174       // Only inserting over an empty cell increases the number of occupied
175       // slots.
176       occupied_++;
177     }
178     table_[i] = v;
179     // Double when 75% full.
180     if (occupied_ >= table_.size() - table_.size()/4) Grow();
181     return true;
182   }
183 
erase(uint32_t v)184   void erase(uint32_t v) {
185     uint32_t i = FindIndex(v);
186     if (static_cast<uint32_t>(table_[i]) == v) {
187       table_[i] = kDel;
188     }
189   }
190 
191   // Iteration: is done via HASH_FOR_EACH
192   // Example:
193   //    HASH_FOR_EACH(elem, node->out) { ... }
194 #define HASH_FOR_EACH(elem, eset) \
195   for (int32_t elem, _cursor = 0; (eset).Next(&_cursor, &elem); )
Next(int32_t * cursor,int32_t * elem)196   bool Next(int32_t* cursor, int32_t* elem) {
197     while (static_cast<uint32_t>(*cursor) < table_.size()) {
198       int32_t v = table_[*cursor];
199       (*cursor)++;
200       if (v >= 0) {
201         *elem = v;
202         return true;
203       }
204     }
205     return false;
206   }
207 
208  private:
209   enum : int32_t { kEmpty = -1, kDel = -2 };
210   Vec<int32_t> table_;
211   uint32_t occupied_;     // Count of non-empty slots (includes deleted slots)
212 
Hash(uint32_t a)213   static uint32_t Hash(uint32_t a) { return a * 41; }
214 
215   // Return index for storing v.  May return an empty index or deleted index
FindIndex(int32_t v) const216   int FindIndex(int32_t v) const {
217     // Search starting at hash index.
218     const uint32_t mask = table_.size() - 1;
219     uint32_t i = Hash(v) & mask;
220     int deleted_index = -1;  // If >= 0, index of first deleted element we see
221     while (true) {
222       int32_t e = table_[i];
223       if (v == e) {
224         return i;
225       } else if (e == kEmpty) {
226         // Return any previously encountered deleted slot.
227         return (deleted_index >= 0) ? deleted_index : i;
228       } else if (e == kDel && deleted_index < 0) {
229         // Keep searching since v might be present later.
230         deleted_index = i;
231       }
232       i = (i + 1) & mask;  // Linear probing; quadratic is slightly slower.
233     }
234   }
235 
Init()236   void Init() {
237     table_.clear();
238     table_.resize(kInline);
239     table_.fill(kEmpty);
240     occupied_ = 0;
241   }
242 
Grow()243   void Grow() {
244     Vec<int32_t> copy;
245     copy.MoveFrom(&table_);
246     occupied_ = 0;
247     table_.resize(copy.size() * 2);
248     table_.fill(kEmpty);
249 
250     for (const auto& e : copy) {
251       if (e >= 0) insert(e);
252     }
253   }
254 
255   NodeSet(const NodeSet&) = delete;
256   NodeSet& operator=(const NodeSet&) = delete;
257 };
258 
259 // We encode a node index and a node version in GraphId.  The version
260 // number is incremented when the GraphId is freed which automatically
261 // invalidates all copies of the GraphId.
262 
MakeId(int32_t index,uint32_t version)263 inline GraphId MakeId(int32_t index, uint32_t version) {
264   GraphId g;
265   g.handle =
266       (static_cast<uint64_t>(version) << 32) | static_cast<uint32_t>(index);
267   return g;
268 }
269 
NodeIndex(GraphId id)270 inline int32_t NodeIndex(GraphId id) {
271   return static_cast<uint32_t>(id.handle & 0xfffffffful);
272 }
273 
NodeVersion(GraphId id)274 inline uint32_t NodeVersion(GraphId id) {
275   return static_cast<uint32_t>(id.handle >> 32);
276 }
277 
278 struct Node {
279   int32_t rank;               // rank number assigned by Pearce-Kelly algorithm
280   uint32_t version;           // Current version number
281   int32_t next_hash;          // Next entry in hash table
282   bool visited;               // Temporary marker used by depth-first-search
283   uintptr_t masked_ptr;       // User-supplied pointer
284   NodeSet in;                 // List of immediate predecessor nodes in graph
285   NodeSet out;                // List of immediate successor nodes in graph
286   int priority;               // Priority of recorded stack trace.
287   int nstack;                 // Depth of recorded stack trace.
288   void* stack[40];            // stack[0,nstack-1] holds stack trace for node.
289 };
290 
291 // Hash table for pointer to node index lookups.
292 class PointerMap {
293  public:
PointerMap(const Vec<Node * > * nodes)294   explicit PointerMap(const Vec<Node*>* nodes) : nodes_(nodes) {
295     table_.fill(-1);
296   }
297 
Find(void * ptr)298   int32_t Find(void* ptr) {
299     auto masked = base_internal::HidePtr(ptr);
300     for (int32_t i = table_[Hash(ptr)]; i != -1;) {
301       Node* n = (*nodes_)[i];
302       if (n->masked_ptr == masked) return i;
303       i = n->next_hash;
304     }
305     return -1;
306   }
307 
Add(void * ptr,int32_t i)308   void Add(void* ptr, int32_t i) {
309     int32_t* head = &table_[Hash(ptr)];
310     (*nodes_)[i]->next_hash = *head;
311     *head = i;
312   }
313 
Remove(void * ptr)314   int32_t Remove(void* ptr) {
315     // Advance through linked list while keeping track of the
316     // predecessor slot that points to the current entry.
317     auto masked = base_internal::HidePtr(ptr);
318     for (int32_t* slot = &table_[Hash(ptr)]; *slot != -1; ) {
319       int32_t index = *slot;
320       Node* n = (*nodes_)[index];
321       if (n->masked_ptr == masked) {
322         *slot = n->next_hash;  // Remove n from linked list
323         n->next_hash = -1;
324         return index;
325       }
326       slot = &n->next_hash;
327     }
328     return -1;
329   }
330 
331  private:
332   // Number of buckets in hash table for pointer lookups.
333   static constexpr uint32_t kHashTableSize = 8171;  // should be prime
334 
335   const Vec<Node*>* nodes_;
336   std::array<int32_t, kHashTableSize> table_;
337 
Hash(void * ptr)338   static uint32_t Hash(void* ptr) {
339     return reinterpret_cast<uintptr_t>(ptr) % kHashTableSize;
340   }
341 };
342 
343 }  // namespace
344 
345 struct GraphCycles::Rep {
346   Vec<Node*> nodes_;
347   Vec<int32_t> free_nodes_;  // Indices for unused entries in nodes_
348   PointerMap ptrmap_;
349 
350   // Temporary state.
351   Vec<int32_t> deltaf_;  // Results of forward DFS
352   Vec<int32_t> deltab_;  // Results of backward DFS
353   Vec<int32_t> list_;    // All nodes to reprocess
354   Vec<int32_t> merged_;  // Rank values to assign to list_ entries
355   Vec<int32_t> stack_;   // Emulates recursion stack for depth-first searches
356 
Repabsl::synchronization_internal::GraphCycles::Rep357   Rep() : ptrmap_(&nodes_) {}
358 };
359 
FindNode(GraphCycles::Rep * rep,GraphId id)360 static Node* FindNode(GraphCycles::Rep* rep, GraphId id) {
361   Node* n = rep->nodes_[NodeIndex(id)];
362   return (n->version == NodeVersion(id)) ? n : nullptr;
363 }
364 
GraphCycles()365 GraphCycles::GraphCycles() {
366   InitArenaIfNecessary();
367   rep_ = new (base_internal::LowLevelAlloc::AllocWithArena(sizeof(Rep), arena))
368       Rep;
369 }
370 
~GraphCycles()371 GraphCycles::~GraphCycles() {
372   for (auto* node : rep_->nodes_) {
373     node->Node::~Node();
374     base_internal::LowLevelAlloc::Free(node);
375   }
376   rep_->Rep::~Rep();
377   base_internal::LowLevelAlloc::Free(rep_);
378 }
379 
CheckInvariants() const380 bool GraphCycles::CheckInvariants() const {
381   Rep* r = rep_;
382   NodeSet ranks;  // Set of ranks seen so far.
383   for (uint32_t x = 0; x < r->nodes_.size(); x++) {
384     Node* nx = r->nodes_[x];
385     void* ptr = base_internal::UnhidePtr<void>(nx->masked_ptr);
386     if (ptr != nullptr && static_cast<uint32_t>(r->ptrmap_.Find(ptr)) != x) {
387       ABSL_RAW_LOG(FATAL, "Did not find live node in hash table %u %p", x, ptr);
388     }
389     if (nx->visited) {
390       ABSL_RAW_LOG(FATAL, "Did not clear visited marker on node %u", x);
391     }
392     if (!ranks.insert(nx->rank)) {
393       ABSL_RAW_LOG(FATAL, "Duplicate occurrence of rank %d", nx->rank);
394     }
395     HASH_FOR_EACH(y, nx->out) {
396       Node* ny = r->nodes_[y];
397       if (nx->rank >= ny->rank) {
398         ABSL_RAW_LOG(FATAL, "Edge %u->%d has bad rank assignment %d->%d", x, y,
399                      nx->rank, ny->rank);
400       }
401     }
402   }
403   return true;
404 }
405 
GetId(void * ptr)406 GraphId GraphCycles::GetId(void* ptr) {
407   int32_t i = rep_->ptrmap_.Find(ptr);
408   if (i != -1) {
409     return MakeId(i, rep_->nodes_[i]->version);
410   } else if (rep_->free_nodes_.empty()) {
411     Node* n =
412         new (base_internal::LowLevelAlloc::AllocWithArena(sizeof(Node), arena))
413             Node;
414     n->version = 1;  // Avoid 0 since it is used by InvalidGraphId()
415     n->visited = false;
416     n->rank = rep_->nodes_.size();
417     n->masked_ptr = base_internal::HidePtr(ptr);
418     n->nstack = 0;
419     n->priority = 0;
420     rep_->nodes_.push_back(n);
421     rep_->ptrmap_.Add(ptr, n->rank);
422     return MakeId(n->rank, n->version);
423   } else {
424     // Preserve preceding rank since the set of ranks in use must be
425     // a permutation of [0,rep_->nodes_.size()-1].
426     int32_t r = rep_->free_nodes_.back();
427     rep_->free_nodes_.pop_back();
428     Node* n = rep_->nodes_[r];
429     n->masked_ptr = base_internal::HidePtr(ptr);
430     n->nstack = 0;
431     n->priority = 0;
432     rep_->ptrmap_.Add(ptr, r);
433     return MakeId(r, n->version);
434   }
435 }
436 
RemoveNode(void * ptr)437 void GraphCycles::RemoveNode(void* ptr) {
438   int32_t i = rep_->ptrmap_.Remove(ptr);
439   if (i == -1) {
440     return;
441   }
442   Node* x = rep_->nodes_[i];
443   HASH_FOR_EACH(y, x->out) {
444     rep_->nodes_[y]->in.erase(i);
445   }
446   HASH_FOR_EACH(y, x->in) {
447     rep_->nodes_[y]->out.erase(i);
448   }
449   x->in.clear();
450   x->out.clear();
451   x->masked_ptr = base_internal::HidePtr<void>(nullptr);
452   if (x->version == std::numeric_limits<uint32_t>::max()) {
453     // Cannot use x any more
454   } else {
455     x->version++;  // Invalidates all copies of node.
456     rep_->free_nodes_.push_back(i);
457   }
458 }
459 
Ptr(GraphId id)460 void* GraphCycles::Ptr(GraphId id) {
461   Node* n = FindNode(rep_, id);
462   return n == nullptr ? nullptr
463                       : base_internal::UnhidePtr<void>(n->masked_ptr);
464 }
465 
HasNode(GraphId node)466 bool GraphCycles::HasNode(GraphId node) {
467   return FindNode(rep_, node) != nullptr;
468 }
469 
HasEdge(GraphId x,GraphId y) const470 bool GraphCycles::HasEdge(GraphId x, GraphId y) const {
471   Node* xn = FindNode(rep_, x);
472   return xn && FindNode(rep_, y) && xn->out.contains(NodeIndex(y));
473 }
474 
RemoveEdge(GraphId x,GraphId y)475 void GraphCycles::RemoveEdge(GraphId x, GraphId y) {
476   Node* xn = FindNode(rep_, x);
477   Node* yn = FindNode(rep_, y);
478   if (xn && yn) {
479     xn->out.erase(NodeIndex(y));
480     yn->in.erase(NodeIndex(x));
481     // No need to update the rank assignment since a previous valid
482     // rank assignment remains valid after an edge deletion.
483   }
484 }
485 
486 static bool ForwardDFS(GraphCycles::Rep* r, int32_t n, int32_t upper_bound);
487 static void BackwardDFS(GraphCycles::Rep* r, int32_t n, int32_t lower_bound);
488 static void Reorder(GraphCycles::Rep* r);
489 static void Sort(const Vec<Node*>&, Vec<int32_t>* delta);
490 static void MoveToList(
491     GraphCycles::Rep* r, Vec<int32_t>* src, Vec<int32_t>* dst);
492 
InsertEdge(GraphId idx,GraphId idy)493 bool GraphCycles::InsertEdge(GraphId idx, GraphId idy) {
494   Rep* r = rep_;
495   const int32_t x = NodeIndex(idx);
496   const int32_t y = NodeIndex(idy);
497   Node* nx = FindNode(r, idx);
498   Node* ny = FindNode(r, idy);
499   if (nx == nullptr || ny == nullptr) return true;  // Expired ids
500 
501   if (nx == ny) return false;  // Self edge
502   if (!nx->out.insert(y)) {
503     // Edge already exists.
504     return true;
505   }
506 
507   ny->in.insert(x);
508 
509   if (nx->rank <= ny->rank) {
510     // New edge is consistent with existing rank assignment.
511     return true;
512   }
513 
514   // Current rank assignments are incompatible with the new edge.  Recompute.
515   // We only need to consider nodes that fall in the range [ny->rank,nx->rank].
516   if (!ForwardDFS(r, y, nx->rank)) {
517     // Found a cycle.  Undo the insertion and tell caller.
518     nx->out.erase(y);
519     ny->in.erase(x);
520     // Since we do not call Reorder() on this path, clear any visited
521     // markers left by ForwardDFS.
522     for (const auto& d : r->deltaf_) {
523       r->nodes_[d]->visited = false;
524     }
525     return false;
526   }
527   BackwardDFS(r, x, ny->rank);
528   Reorder(r);
529   return true;
530 }
531 
ForwardDFS(GraphCycles::Rep * r,int32_t n,int32_t upper_bound)532 static bool ForwardDFS(GraphCycles::Rep* r, int32_t n, int32_t upper_bound) {
533   // Avoid recursion since stack space might be limited.
534   // We instead keep a stack of nodes to visit.
535   r->deltaf_.clear();
536   r->stack_.clear();
537   r->stack_.push_back(n);
538   while (!r->stack_.empty()) {
539     n = r->stack_.back();
540     r->stack_.pop_back();
541     Node* nn = r->nodes_[n];
542     if (nn->visited) continue;
543 
544     nn->visited = true;
545     r->deltaf_.push_back(n);
546 
547     HASH_FOR_EACH(w, nn->out) {
548       Node* nw = r->nodes_[w];
549       if (nw->rank == upper_bound) {
550         return false;  // Cycle
551       }
552       if (!nw->visited && nw->rank < upper_bound) {
553         r->stack_.push_back(w);
554       }
555     }
556   }
557   return true;
558 }
559 
BackwardDFS(GraphCycles::Rep * r,int32_t n,int32_t lower_bound)560 static void BackwardDFS(GraphCycles::Rep* r, int32_t n, int32_t lower_bound) {
561   r->deltab_.clear();
562   r->stack_.clear();
563   r->stack_.push_back(n);
564   while (!r->stack_.empty()) {
565     n = r->stack_.back();
566     r->stack_.pop_back();
567     Node* nn = r->nodes_[n];
568     if (nn->visited) continue;
569 
570     nn->visited = true;
571     r->deltab_.push_back(n);
572 
573     HASH_FOR_EACH(w, nn->in) {
574       Node* nw = r->nodes_[w];
575       if (!nw->visited && lower_bound < nw->rank) {
576         r->stack_.push_back(w);
577       }
578     }
579   }
580 }
581 
Reorder(GraphCycles::Rep * r)582 static void Reorder(GraphCycles::Rep* r) {
583   Sort(r->nodes_, &r->deltab_);
584   Sort(r->nodes_, &r->deltaf_);
585 
586   // Adds contents of delta lists to list_ (backwards deltas first).
587   r->list_.clear();
588   MoveToList(r, &r->deltab_, &r->list_);
589   MoveToList(r, &r->deltaf_, &r->list_);
590 
591   // Produce sorted list of all ranks that will be reassigned.
592   r->merged_.resize(r->deltab_.size() + r->deltaf_.size());
593   std::merge(r->deltab_.begin(), r->deltab_.end(),
594              r->deltaf_.begin(), r->deltaf_.end(),
595              r->merged_.begin());
596 
597   // Assign the ranks in order to the collected list.
598   for (uint32_t i = 0; i < r->list_.size(); i++) {
599     r->nodes_[r->list_[i]]->rank = r->merged_[i];
600   }
601 }
602 
Sort(const Vec<Node * > & nodes,Vec<int32_t> * delta)603 static void Sort(const Vec<Node*>& nodes, Vec<int32_t>* delta) {
604   struct ByRank {
605     const Vec<Node*>* nodes;
606     bool operator()(int32_t a, int32_t b) const {
607       return (*nodes)[a]->rank < (*nodes)[b]->rank;
608     }
609   };
610   ByRank cmp;
611   cmp.nodes = &nodes;
612   std::sort(delta->begin(), delta->end(), cmp);
613 }
614 
MoveToList(GraphCycles::Rep * r,Vec<int32_t> * src,Vec<int32_t> * dst)615 static void MoveToList(
616     GraphCycles::Rep* r, Vec<int32_t>* src, Vec<int32_t>* dst) {
617   for (auto& v : *src) {
618     int32_t w = v;
619     v = r->nodes_[w]->rank;         // Replace v entry with its rank
620     r->nodes_[w]->visited = false;  // Prepare for future DFS calls
621     dst->push_back(w);
622   }
623 }
624 
FindPath(GraphId idx,GraphId idy,int max_path_len,GraphId path[]) const625 int GraphCycles::FindPath(GraphId idx, GraphId idy, int max_path_len,
626                           GraphId path[]) const {
627   Rep* r = rep_;
628   if (FindNode(r, idx) == nullptr || FindNode(r, idy) == nullptr) return 0;
629   const int32_t x = NodeIndex(idx);
630   const int32_t y = NodeIndex(idy);
631 
632   // Forward depth first search starting at x until we hit y.
633   // As we descend into a node, we push it onto the path.
634   // As we leave a node, we remove it from the path.
635   int path_len = 0;
636 
637   NodeSet seen;
638   r->stack_.clear();
639   r->stack_.push_back(x);
640   while (!r->stack_.empty()) {
641     int32_t n = r->stack_.back();
642     r->stack_.pop_back();
643     if (n < 0) {
644       // Marker to indicate that we are leaving a node
645       path_len--;
646       continue;
647     }
648 
649     if (path_len < max_path_len) {
650       path[path_len] = MakeId(n, rep_->nodes_[n]->version);
651     }
652     path_len++;
653     r->stack_.push_back(-1);  // Will remove tentative path entry
654 
655     if (n == y) {
656       return path_len;
657     }
658 
659     HASH_FOR_EACH(w, r->nodes_[n]->out) {
660       if (seen.insert(w)) {
661         r->stack_.push_back(w);
662       }
663     }
664   }
665 
666   return 0;
667 }
668 
IsReachable(GraphId x,GraphId y) const669 bool GraphCycles::IsReachable(GraphId x, GraphId y) const {
670   return FindPath(x, y, 0, nullptr) > 0;
671 }
672 
UpdateStackTrace(GraphId id,int priority,int (* get_stack_trace)(void ** stack,int))673 void GraphCycles::UpdateStackTrace(GraphId id, int priority,
674                                    int (*get_stack_trace)(void** stack, int)) {
675   Node* n = FindNode(rep_, id);
676   if (n == nullptr || n->priority >= priority) {
677     return;
678   }
679   n->nstack = (*get_stack_trace)(n->stack, ABSL_ARRAYSIZE(n->stack));
680   n->priority = priority;
681 }
682 
GetStackTrace(GraphId id,void *** ptr)683 int GraphCycles::GetStackTrace(GraphId id, void*** ptr) {
684   Node* n = FindNode(rep_, id);
685   if (n == nullptr) {
686     *ptr = nullptr;
687     return 0;
688   } else {
689     *ptr = n->stack;
690     return n->nstack;
691   }
692 }
693 
694 }  // namespace synchronization_internal
695 ABSL_NAMESPACE_END
696 }  // namespace absl
697 
698 #endif  // ABSL_LOW_LEVEL_ALLOC_MISSING
699