1 //===- LazyCallGraph.h - Analysis of a Module's call graph ------*- C++ -*-===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 /// \file
10 ///
11 /// Implements a lazy call graph analysis and related passes for the new pass
12 /// manager.
13 ///
14 /// NB: This is *not* a traditional call graph! It is a graph which models both
15 /// the current calls and potential calls. As a consequence there are many
16 /// edges in this call graph that do not correspond to a 'call' or 'invoke'
17 /// instruction.
18 ///
19 /// The primary use cases of this graph analysis is to facilitate iterating
20 /// across the functions of a module in ways that ensure all callees are
21 /// visited prior to a caller (given any SCC constraints), or vice versa. As
22 /// such is it particularly well suited to organizing CGSCC optimizations such
23 /// as inlining, outlining, argument promotion, etc. That is its primary use
24 /// case and motivates the design. It may not be appropriate for other
25 /// purposes. The use graph of functions or some other conservative analysis of
26 /// call instructions may be interesting for optimizations and subsequent
27 /// analyses which don't work in the context of an overly specified
28 /// potential-call-edge graph.
29 ///
30 /// To understand the specific rules and nature of this call graph analysis,
31 /// see the documentation of the \c LazyCallGraph below.
32 ///
33 //===----------------------------------------------------------------------===//
34
35 #ifndef LLVM_ANALYSIS_LAZYCALLGRAPH_H
36 #define LLVM_ANALYSIS_LAZYCALLGRAPH_H
37
38 #include "llvm/ADT/ArrayRef.h"
39 #include "llvm/ADT/DenseMap.h"
40 #include "llvm/ADT/Optional.h"
41 #include "llvm/ADT/PointerIntPair.h"
42 #include "llvm/ADT/SetVector.h"
43 #include "llvm/ADT/SmallPtrSet.h"
44 #include "llvm/ADT/SmallVector.h"
45 #include "llvm/ADT/StringRef.h"
46 #include "llvm/ADT/iterator.h"
47 #include "llvm/ADT/iterator_range.h"
48 #include "llvm/Analysis/TargetLibraryInfo.h"
49 #include "llvm/IR/Constant.h"
50 #include "llvm/IR/Constants.h"
51 #include "llvm/IR/Function.h"
52 #include "llvm/IR/PassManager.h"
53 #include "llvm/Support/Allocator.h"
54 #include "llvm/Support/Casting.h"
55 #include "llvm/Support/raw_ostream.h"
56 #include <cassert>
57 #include <iterator>
58 #include <string>
59 #include <utility>
60
61 namespace llvm {
62
63 class Module;
64 class Value;
65
66 /// A lazily constructed view of the call graph of a module.
67 ///
68 /// With the edges of this graph, the motivating constraint that we are
69 /// attempting to maintain is that function-local optimization, CGSCC-local
70 /// optimizations, and optimizations transforming a pair of functions connected
71 /// by an edge in the graph, do not invalidate a bottom-up traversal of the SCC
72 /// DAG. That is, no optimizations will delete, remove, or add an edge such
73 /// that functions already visited in a bottom-up order of the SCC DAG are no
74 /// longer valid to have visited, or such that functions not yet visited in
75 /// a bottom-up order of the SCC DAG are not required to have already been
76 /// visited.
77 ///
78 /// Within this constraint, the desire is to minimize the merge points of the
79 /// SCC DAG. The greater the fanout of the SCC DAG and the fewer merge points
80 /// in the SCC DAG, the more independence there is in optimizing within it.
81 /// There is a strong desire to enable parallelization of optimizations over
82 /// the call graph, and both limited fanout and merge points will (artificially
83 /// in some cases) limit the scaling of such an effort.
84 ///
85 /// To this end, graph represents both direct and any potential resolution to
86 /// an indirect call edge. Another way to think about it is that it represents
87 /// both the direct call edges and any direct call edges that might be formed
88 /// through static optimizations. Specifically, it considers taking the address
89 /// of a function to be an edge in the call graph because this might be
90 /// forwarded to become a direct call by some subsequent function-local
91 /// optimization. The result is that the graph closely follows the use-def
92 /// edges for functions. Walking "up" the graph can be done by looking at all
93 /// of the uses of a function.
94 ///
95 /// The roots of the call graph are the external functions and functions
96 /// escaped into global variables. Those functions can be called from outside
97 /// of the module or via unknowable means in the IR -- we may not be able to
98 /// form even a potential call edge from a function body which may dynamically
99 /// load the function and call it.
100 ///
101 /// This analysis still requires updates to remain valid after optimizations
102 /// which could potentially change the set of potential callees. The
103 /// constraints it operates under only make the traversal order remain valid.
104 ///
105 /// The entire analysis must be re-computed if full interprocedural
106 /// optimizations run at any point. For example, globalopt completely
107 /// invalidates the information in this analysis.
108 ///
109 /// FIXME: This class is named LazyCallGraph in a lame attempt to distinguish
110 /// it from the existing CallGraph. At some point, it is expected that this
111 /// will be the only call graph and it will be renamed accordingly.
112 class LazyCallGraph {
113 public:
114 class Node;
115 class EdgeSequence;
116 class SCC;
117 class RefSCC;
118 class edge_iterator;
119 class call_edge_iterator;
120
121 /// A class used to represent edges in the call graph.
122 ///
123 /// The lazy call graph models both *call* edges and *reference* edges. Call
124 /// edges are much what you would expect, and exist when there is a 'call' or
125 /// 'invoke' instruction of some function. Reference edges are also tracked
126 /// along side these, and exist whenever any instruction (transitively
127 /// through its operands) references a function. All call edges are
128 /// inherently reference edges, and so the reference graph forms a superset
129 /// of the formal call graph.
130 ///
131 /// All of these forms of edges are fundamentally represented as outgoing
132 /// edges. The edges are stored in the source node and point at the target
133 /// node. This allows the edge structure itself to be a very compact data
134 /// structure: essentially a tagged pointer.
135 class Edge {
136 public:
137 /// The kind of edge in the graph.
138 enum Kind : bool { Ref = false, Call = true };
139
140 Edge();
141 explicit Edge(Node &N, Kind K);
142
143 /// Test whether the edge is null.
144 ///
145 /// This happens when an edge has been deleted. We leave the edge objects
146 /// around but clear them.
147 explicit operator bool() const;
148
149 /// Returnss the \c Kind of the edge.
150 Kind getKind() const;
151
152 /// Test whether the edge represents a direct call to a function.
153 ///
154 /// This requires that the edge is not null.
155 bool isCall() const;
156
157 /// Get the call graph node referenced by this edge.
158 ///
159 /// This requires that the edge is not null.
160 Node &getNode() const;
161
162 /// Get the function referenced by this edge.
163 ///
164 /// This requires that the edge is not null.
165 Function &getFunction() const;
166
167 private:
168 friend class LazyCallGraph::EdgeSequence;
169 friend class LazyCallGraph::RefSCC;
170
171 PointerIntPair<Node *, 1, Kind> Value;
172
setKind(Kind K)173 void setKind(Kind K) { Value.setInt(K); }
174 };
175
176 /// The edge sequence object.
177 ///
178 /// This typically exists entirely within the node but is exposed as
179 /// a separate type because a node doesn't initially have edges. An explicit
180 /// population step is required to produce this sequence at first and it is
181 /// then cached in the node. It is also used to represent edges entering the
182 /// graph from outside the module to model the graph's roots.
183 ///
184 /// The sequence itself both iterable and indexable. The indexes remain
185 /// stable even as the sequence mutates (including removal).
186 class EdgeSequence {
187 friend class LazyCallGraph;
188 friend class LazyCallGraph::Node;
189 friend class LazyCallGraph::RefSCC;
190
191 using VectorT = SmallVector<Edge, 4>;
192 using VectorImplT = SmallVectorImpl<Edge>;
193
194 public:
195 /// An iterator used for the edges to both entry nodes and child nodes.
196 class iterator
197 : public iterator_adaptor_base<iterator, VectorImplT::iterator,
198 std::forward_iterator_tag> {
199 friend class LazyCallGraph;
200 friend class LazyCallGraph::Node;
201
202 VectorImplT::iterator E;
203
204 // Build the iterator for a specific position in the edge list.
iterator(VectorImplT::iterator BaseI,VectorImplT::iterator E)205 iterator(VectorImplT::iterator BaseI, VectorImplT::iterator E)
206 : iterator_adaptor_base(BaseI), E(E) {
207 while (I != E && !*I)
208 ++I;
209 }
210
211 public:
212 iterator() = default;
213
214 using iterator_adaptor_base::operator++;
215 iterator &operator++() {
216 do {
217 ++I;
218 } while (I != E && !*I);
219 return *this;
220 }
221 };
222
223 /// An iterator over specifically call edges.
224 ///
225 /// This has the same iteration properties as the \c iterator, but
226 /// restricts itself to edges which represent actual calls.
227 class call_iterator
228 : public iterator_adaptor_base<call_iterator, VectorImplT::iterator,
229 std::forward_iterator_tag> {
230 friend class LazyCallGraph;
231 friend class LazyCallGraph::Node;
232
233 VectorImplT::iterator E;
234
235 /// Advance the iterator to the next valid, call edge.
advanceToNextEdge()236 void advanceToNextEdge() {
237 while (I != E && (!*I || !I->isCall()))
238 ++I;
239 }
240
241 // Build the iterator for a specific position in the edge list.
call_iterator(VectorImplT::iterator BaseI,VectorImplT::iterator E)242 call_iterator(VectorImplT::iterator BaseI, VectorImplT::iterator E)
243 : iterator_adaptor_base(BaseI), E(E) {
244 advanceToNextEdge();
245 }
246
247 public:
248 call_iterator() = default;
249
250 using iterator_adaptor_base::operator++;
251 call_iterator &operator++() {
252 ++I;
253 advanceToNextEdge();
254 return *this;
255 }
256 };
257
begin()258 iterator begin() { return iterator(Edges.begin(), Edges.end()); }
end()259 iterator end() { return iterator(Edges.end(), Edges.end()); }
260
261 Edge &operator[](int i) { return Edges[i]; }
262 Edge &operator[](Node &N) {
263 assert(EdgeIndexMap.find(&N) != EdgeIndexMap.end() && "No such edge!");
264 auto &E = Edges[EdgeIndexMap.find(&N)->second];
265 assert(E && "Dead or null edge!");
266 return E;
267 }
268
lookup(Node & N)269 Edge *lookup(Node &N) {
270 auto EI = EdgeIndexMap.find(&N);
271 if (EI == EdgeIndexMap.end())
272 return nullptr;
273 auto &E = Edges[EI->second];
274 return E ? &E : nullptr;
275 }
276
call_begin()277 call_iterator call_begin() {
278 return call_iterator(Edges.begin(), Edges.end());
279 }
call_end()280 call_iterator call_end() { return call_iterator(Edges.end(), Edges.end()); }
281
calls()282 iterator_range<call_iterator> calls() {
283 return make_range(call_begin(), call_end());
284 }
285
empty()286 bool empty() {
287 for (auto &E : Edges)
288 if (E)
289 return false;
290
291 return true;
292 }
293
294 private:
295 VectorT Edges;
296 DenseMap<Node *, int> EdgeIndexMap;
297
298 EdgeSequence() = default;
299
300 /// Internal helper to insert an edge to a node.
301 void insertEdgeInternal(Node &ChildN, Edge::Kind EK);
302
303 /// Internal helper to change an edge kind.
304 void setEdgeKind(Node &ChildN, Edge::Kind EK);
305
306 /// Internal helper to remove the edge to the given function.
307 bool removeEdgeInternal(Node &ChildN);
308
309 /// Internal helper to replace an edge key with a new one.
310 ///
311 /// This should be used when the function for a particular node in the
312 /// graph gets replaced and we are updating all of the edges to that node
313 /// to use the new function as the key.
314 void replaceEdgeKey(Function &OldTarget, Function &NewTarget);
315 };
316
317 /// A node in the call graph.
318 ///
319 /// This represents a single node. It's primary roles are to cache the list of
320 /// callees, de-duplicate and provide fast testing of whether a function is
321 /// a callee, and facilitate iteration of child nodes in the graph.
322 ///
323 /// The node works much like an optional in order to lazily populate the
324 /// edges of each node. Until populated, there are no edges. Once populated,
325 /// you can access the edges by dereferencing the node or using the `->`
326 /// operator as if the node was an `Optional<EdgeSequence>`.
327 class Node {
328 friend class LazyCallGraph;
329 friend class LazyCallGraph::RefSCC;
330
331 public:
getGraph()332 LazyCallGraph &getGraph() const { return *G; }
333
getFunction()334 Function &getFunction() const { return *F; }
335
getName()336 StringRef getName() const { return F->getName(); }
337
338 /// Equality is defined as address equality.
339 bool operator==(const Node &N) const { return this == &N; }
340 bool operator!=(const Node &N) const { return !operator==(N); }
341
342 /// Tests whether the node has been populated with edges.
isPopulated()343 bool isPopulated() const { return Edges.hasValue(); }
344
345 /// Tests whether this is actually a dead node and no longer valid.
346 ///
347 /// Users rarely interact with nodes in this state and other methods are
348 /// invalid. This is used to model a node in an edge list where the
349 /// function has been completely removed.
isDead()350 bool isDead() const {
351 assert(!G == !F &&
352 "Both graph and function pointers should be null or non-null.");
353 return !G;
354 }
355
356 // We allow accessing the edges by dereferencing or using the arrow
357 // operator, essentially wrapping the internal optional.
358 EdgeSequence &operator*() const {
359 // Rip const off because the node itself isn't changing here.
360 return const_cast<EdgeSequence &>(*Edges);
361 }
362 EdgeSequence *operator->() const { return &**this; }
363
364 /// Populate the edges of this node if necessary.
365 ///
366 /// The first time this is called it will populate the edges for this node
367 /// in the graph. It does this by scanning the underlying function, so once
368 /// this is done, any changes to that function must be explicitly reflected
369 /// in updates to the graph.
370 ///
371 /// \returns the populated \c EdgeSequence to simplify walking it.
372 ///
373 /// This will not update or re-scan anything if called repeatedly. Instead,
374 /// the edge sequence is cached and returned immediately on subsequent
375 /// calls.
populate()376 EdgeSequence &populate() {
377 if (Edges)
378 return *Edges;
379
380 return populateSlow();
381 }
382
383 private:
384 LazyCallGraph *G;
385 Function *F;
386
387 // We provide for the DFS numbering and Tarjan walk lowlink numbers to be
388 // stored directly within the node. These are both '-1' when nodes are part
389 // of an SCC (or RefSCC), or '0' when not yet reached in a DFS walk.
390 int DFSNumber = 0;
391 int LowLink = 0;
392
393 Optional<EdgeSequence> Edges;
394
395 /// Basic constructor implements the scanning of F into Edges and
396 /// EdgeIndexMap.
Node(LazyCallGraph & G,Function & F)397 Node(LazyCallGraph &G, Function &F) : G(&G), F(&F) {}
398
399 /// Implementation of the scan when populating.
400 EdgeSequence &populateSlow();
401
402 /// Internal helper to directly replace the function with a new one.
403 ///
404 /// This is used to facilitate tranfsormations which need to replace the
405 /// formal Function object but directly move the body and users from one to
406 /// the other.
407 void replaceFunction(Function &NewF);
408
clear()409 void clear() { Edges.reset(); }
410
411 /// Print the name of this node's function.
412 friend raw_ostream &operator<<(raw_ostream &OS, const Node &N) {
413 return OS << N.F->getName();
414 }
415
416 /// Dump the name of this node's function to stderr.
417 void dump() const;
418 };
419
420 /// An SCC of the call graph.
421 ///
422 /// This represents a Strongly Connected Component of the direct call graph
423 /// -- ignoring indirect calls and function references. It stores this as
424 /// a collection of call graph nodes. While the order of nodes in the SCC is
425 /// stable, it is not any particular order.
426 ///
427 /// The SCCs are nested within a \c RefSCC, see below for details about that
428 /// outer structure. SCCs do not support mutation of the call graph, that
429 /// must be done through the containing \c RefSCC in order to fully reason
430 /// about the ordering and connections of the graph.
431 class SCC {
432 friend class LazyCallGraph;
433 friend class LazyCallGraph::Node;
434
435 RefSCC *OuterRefSCC;
436 SmallVector<Node *, 1> Nodes;
437
438 template <typename NodeRangeT>
SCC(RefSCC & OuterRefSCC,NodeRangeT && Nodes)439 SCC(RefSCC &OuterRefSCC, NodeRangeT &&Nodes)
440 : OuterRefSCC(&OuterRefSCC), Nodes(std::forward<NodeRangeT>(Nodes)) {}
441
clear()442 void clear() {
443 OuterRefSCC = nullptr;
444 Nodes.clear();
445 }
446
447 /// Print a short descrtiption useful for debugging or logging.
448 ///
449 /// We print the function names in the SCC wrapped in '()'s and skipping
450 /// the middle functions if there are a large number.
451 //
452 // Note: this is defined inline to dodge issues with GCC's interpretation
453 // of enclosing namespaces for friend function declarations.
454 friend raw_ostream &operator<<(raw_ostream &OS, const SCC &C) {
455 OS << '(';
456 int i = 0;
457 for (LazyCallGraph::Node &N : C) {
458 if (i > 0)
459 OS << ", ";
460 // Elide the inner elements if there are too many.
461 if (i > 8) {
462 OS << "..., " << *C.Nodes.back();
463 break;
464 }
465 OS << N;
466 ++i;
467 }
468 OS << ')';
469 return OS;
470 }
471
472 /// Dump a short description of this SCC to stderr.
473 void dump() const;
474
475 #ifndef NDEBUG
476 /// Verify invariants about the SCC.
477 ///
478 /// This will attempt to validate all of the basic invariants within an
479 /// SCC, but not that it is a strongly connected componet per-se. Primarily
480 /// useful while building and updating the graph to check that basic
481 /// properties are in place rather than having inexplicable crashes later.
482 void verify();
483 #endif
484
485 public:
486 using iterator = pointee_iterator<SmallVectorImpl<Node *>::const_iterator>;
487
begin()488 iterator begin() const { return Nodes.begin(); }
end()489 iterator end() const { return Nodes.end(); }
490
size()491 int size() const { return Nodes.size(); }
492
getOuterRefSCC()493 RefSCC &getOuterRefSCC() const { return *OuterRefSCC; }
494
495 /// Test if this SCC is a parent of \a C.
496 ///
497 /// Note that this is linear in the number of edges departing the current
498 /// SCC.
499 bool isParentOf(const SCC &C) const;
500
501 /// Test if this SCC is an ancestor of \a C.
502 ///
503 /// Note that in the worst case this is linear in the number of edges
504 /// departing the current SCC and every SCC in the entire graph reachable
505 /// from this SCC. Thus this very well may walk every edge in the entire
506 /// call graph! Do not call this in a tight loop!
507 bool isAncestorOf(const SCC &C) const;
508
509 /// Test if this SCC is a child of \a C.
510 ///
511 /// See the comments for \c isParentOf for detailed notes about the
512 /// complexity of this routine.
isChildOf(const SCC & C)513 bool isChildOf(const SCC &C) const { return C.isParentOf(*this); }
514
515 /// Test if this SCC is a descendant of \a C.
516 ///
517 /// See the comments for \c isParentOf for detailed notes about the
518 /// complexity of this routine.
isDescendantOf(const SCC & C)519 bool isDescendantOf(const SCC &C) const { return C.isAncestorOf(*this); }
520
521 /// Provide a short name by printing this SCC to a std::string.
522 ///
523 /// This copes with the fact that we don't have a name per-se for an SCC
524 /// while still making the use of this in debugging and logging useful.
getName()525 std::string getName() const {
526 std::string Name;
527 raw_string_ostream OS(Name);
528 OS << *this;
529 OS.flush();
530 return Name;
531 }
532 };
533
534 /// A RefSCC of the call graph.
535 ///
536 /// This models a Strongly Connected Component of function reference edges in
537 /// the call graph. As opposed to actual SCCs, these can be used to scope
538 /// subgraphs of the module which are independent from other subgraphs of the
539 /// module because they do not reference it in any way. This is also the unit
540 /// where we do mutation of the graph in order to restrict mutations to those
541 /// which don't violate this independence.
542 ///
543 /// A RefSCC contains a DAG of actual SCCs. All the nodes within the RefSCC
544 /// are necessarily within some actual SCC that nests within it. Since
545 /// a direct call *is* a reference, there will always be at least one RefSCC
546 /// around any SCC.
547 class RefSCC {
548 friend class LazyCallGraph;
549 friend class LazyCallGraph::Node;
550
551 LazyCallGraph *G;
552
553 /// A postorder list of the inner SCCs.
554 SmallVector<SCC *, 4> SCCs;
555
556 /// A map from SCC to index in the postorder list.
557 SmallDenseMap<SCC *, int, 4> SCCIndices;
558
559 /// Fast-path constructor. RefSCCs should instead be constructed by calling
560 /// formRefSCCFast on the graph itself.
561 RefSCC(LazyCallGraph &G);
562
clear()563 void clear() {
564 SCCs.clear();
565 SCCIndices.clear();
566 }
567
568 /// Print a short description useful for debugging or logging.
569 ///
570 /// We print the SCCs wrapped in '[]'s and skipping the middle SCCs if
571 /// there are a large number.
572 //
573 // Note: this is defined inline to dodge issues with GCC's interpretation
574 // of enclosing namespaces for friend function declarations.
575 friend raw_ostream &operator<<(raw_ostream &OS, const RefSCC &RC) {
576 OS << '[';
577 int i = 0;
578 for (LazyCallGraph::SCC &C : RC) {
579 if (i > 0)
580 OS << ", ";
581 // Elide the inner elements if there are too many.
582 if (i > 4) {
583 OS << "..., " << *RC.SCCs.back();
584 break;
585 }
586 OS << C;
587 ++i;
588 }
589 OS << ']';
590 return OS;
591 }
592
593 /// Dump a short description of this RefSCC to stderr.
594 void dump() const;
595
596 #ifndef NDEBUG
597 /// Verify invariants about the RefSCC and all its SCCs.
598 ///
599 /// This will attempt to validate all of the invariants *within* the
600 /// RefSCC, but not that it is a strongly connected component of the larger
601 /// graph. This makes it useful even when partially through an update.
602 ///
603 /// Invariants checked:
604 /// - SCCs and their indices match.
605 /// - The SCCs list is in fact in post-order.
606 void verify();
607 #endif
608
609 /// Handle any necessary parent set updates after inserting a trivial ref
610 /// or call edge.
611 void handleTrivialEdgeInsertion(Node &SourceN, Node &TargetN);
612
613 public:
614 using iterator = pointee_iterator<SmallVectorImpl<SCC *>::const_iterator>;
615 using range = iterator_range<iterator>;
616 using parent_iterator =
617 pointee_iterator<SmallPtrSetImpl<RefSCC *>::const_iterator>;
618
begin()619 iterator begin() const { return SCCs.begin(); }
end()620 iterator end() const { return SCCs.end(); }
621
size()622 ssize_t size() const { return SCCs.size(); }
623
624 SCC &operator[](int Idx) { return *SCCs[Idx]; }
625
find(SCC & C)626 iterator find(SCC &C) const {
627 return SCCs.begin() + SCCIndices.find(&C)->second;
628 }
629
630 /// Test if this RefSCC is a parent of \a RC.
631 ///
632 /// CAUTION: This method walks every edge in the \c RefSCC, it can be very
633 /// expensive.
634 bool isParentOf(const RefSCC &RC) const;
635
636 /// Test if this RefSCC is an ancestor of \a RC.
637 ///
638 /// CAUTION: This method walks the directed graph of edges as far as
639 /// necessary to find a possible path to the argument. In the worst case
640 /// this may walk the entire graph and can be extremely expensive.
641 bool isAncestorOf(const RefSCC &RC) const;
642
643 /// Test if this RefSCC is a child of \a RC.
644 ///
645 /// CAUTION: This method walks every edge in the argument \c RefSCC, it can
646 /// be very expensive.
isChildOf(const RefSCC & RC)647 bool isChildOf(const RefSCC &RC) const { return RC.isParentOf(*this); }
648
649 /// Test if this RefSCC is a descendant of \a RC.
650 ///
651 /// CAUTION: This method walks the directed graph of edges as far as
652 /// necessary to find a possible path from the argument. In the worst case
653 /// this may walk the entire graph and can be extremely expensive.
isDescendantOf(const RefSCC & RC)654 bool isDescendantOf(const RefSCC &RC) const {
655 return RC.isAncestorOf(*this);
656 }
657
658 /// Provide a short name by printing this RefSCC to a std::string.
659 ///
660 /// This copes with the fact that we don't have a name per-se for an RefSCC
661 /// while still making the use of this in debugging and logging useful.
getName()662 std::string getName() const {
663 std::string Name;
664 raw_string_ostream OS(Name);
665 OS << *this;
666 OS.flush();
667 return Name;
668 }
669
670 ///@{
671 /// \name Mutation API
672 ///
673 /// These methods provide the core API for updating the call graph in the
674 /// presence of (potentially still in-flight) DFS-found RefSCCs and SCCs.
675 ///
676 /// Note that these methods sometimes have complex runtimes, so be careful
677 /// how you call them.
678
679 /// Make an existing internal ref edge into a call edge.
680 ///
681 /// This may form a larger cycle and thus collapse SCCs into TargetN's SCC.
682 /// If that happens, the optional callback \p MergedCB will be invoked (if
683 /// provided) on the SCCs being merged away prior to actually performing
684 /// the merge. Note that this will never include the target SCC as that
685 /// will be the SCC functions are merged into to resolve the cycle. Once
686 /// this function returns, these merged SCCs are not in a valid state but
687 /// the pointers will remain valid until destruction of the parent graph
688 /// instance for the purpose of clearing cached information. This function
689 /// also returns 'true' if a cycle was formed and some SCCs merged away as
690 /// a convenience.
691 ///
692 /// After this operation, both SourceN's SCC and TargetN's SCC may move
693 /// position within this RefSCC's postorder list. Any SCCs merged are
694 /// merged into the TargetN's SCC in order to preserve reachability analyses
695 /// which took place on that SCC.
696 bool switchInternalEdgeToCall(
697 Node &SourceN, Node &TargetN,
698 function_ref<void(ArrayRef<SCC *> MergedSCCs)> MergeCB = {});
699
700 /// Make an existing internal call edge between separate SCCs into a ref
701 /// edge.
702 ///
703 /// If SourceN and TargetN in separate SCCs within this RefSCC, changing
704 /// the call edge between them to a ref edge is a trivial operation that
705 /// does not require any structural changes to the call graph.
706 void switchTrivialInternalEdgeToRef(Node &SourceN, Node &TargetN);
707
708 /// Make an existing internal call edge within a single SCC into a ref
709 /// edge.
710 ///
711 /// Since SourceN and TargetN are part of a single SCC, this SCC may be
712 /// split up due to breaking a cycle in the call edges that formed it. If
713 /// that happens, then this routine will insert new SCCs into the postorder
714 /// list *before* the SCC of TargetN (previously the SCC of both). This
715 /// preserves postorder as the TargetN can reach all of the other nodes by
716 /// definition of previously being in a single SCC formed by the cycle from
717 /// SourceN to TargetN.
718 ///
719 /// The newly added SCCs are added *immediately* and contiguously
720 /// prior to the TargetN SCC and return the range covering the new SCCs in
721 /// the RefSCC's postorder sequence. You can directly iterate the returned
722 /// range to observe all of the new SCCs in postorder.
723 ///
724 /// Note that if SourceN and TargetN are in separate SCCs, the simpler
725 /// routine `switchTrivialInternalEdgeToRef` should be used instead.
726 iterator_range<iterator> switchInternalEdgeToRef(Node &SourceN,
727 Node &TargetN);
728
729 /// Make an existing outgoing ref edge into a call edge.
730 ///
731 /// Note that this is trivial as there are no cyclic impacts and there
732 /// remains a reference edge.
733 void switchOutgoingEdgeToCall(Node &SourceN, Node &TargetN);
734
735 /// Make an existing outgoing call edge into a ref edge.
736 ///
737 /// This is trivial as there are no cyclic impacts and there remains
738 /// a reference edge.
739 void switchOutgoingEdgeToRef(Node &SourceN, Node &TargetN);
740
741 /// Insert a ref edge from one node in this RefSCC to another in this
742 /// RefSCC.
743 ///
744 /// This is always a trivial operation as it doesn't change any part of the
745 /// graph structure besides connecting the two nodes.
746 ///
747 /// Note that we don't support directly inserting internal *call* edges
748 /// because that could change the graph structure and requires returning
749 /// information about what became invalid. As a consequence, the pattern
750 /// should be to first insert the necessary ref edge, and then to switch it
751 /// to a call edge if needed and handle any invalidation that results. See
752 /// the \c switchInternalEdgeToCall routine for details.
753 void insertInternalRefEdge(Node &SourceN, Node &TargetN);
754
755 /// Insert an edge whose parent is in this RefSCC and child is in some
756 /// child RefSCC.
757 ///
758 /// There must be an existing path from the \p SourceN to the \p TargetN.
759 /// This operation is inexpensive and does not change the set of SCCs and
760 /// RefSCCs in the graph.
761 void insertOutgoingEdge(Node &SourceN, Node &TargetN, Edge::Kind EK);
762
763 /// Insert an edge whose source is in a descendant RefSCC and target is in
764 /// this RefSCC.
765 ///
766 /// There must be an existing path from the target to the source in this
767 /// case.
768 ///
769 /// NB! This is has the potential to be a very expensive function. It
770 /// inherently forms a cycle in the prior RefSCC DAG and we have to merge
771 /// RefSCCs to resolve that cycle. But finding all of the RefSCCs which
772 /// participate in the cycle can in the worst case require traversing every
773 /// RefSCC in the graph. Every attempt is made to avoid that, but passes
774 /// must still exercise caution calling this routine repeatedly.
775 ///
776 /// Also note that this can only insert ref edges. In order to insert
777 /// a call edge, first insert a ref edge and then switch it to a call edge.
778 /// These are intentionally kept as separate interfaces because each step
779 /// of the operation invalidates a different set of data structures.
780 ///
781 /// This returns all the RefSCCs which were merged into the this RefSCC
782 /// (the target's). This allows callers to invalidate any cached
783 /// information.
784 ///
785 /// FIXME: We could possibly optimize this quite a bit for cases where the
786 /// caller and callee are very nearby in the graph. See comments in the
787 /// implementation for details, but that use case might impact users.
788 SmallVector<RefSCC *, 1> insertIncomingRefEdge(Node &SourceN,
789 Node &TargetN);
790
791 /// Remove an edge whose source is in this RefSCC and target is *not*.
792 ///
793 /// This removes an inter-RefSCC edge. All inter-RefSCC edges originating
794 /// from this SCC have been fully explored by any in-flight DFS graph
795 /// formation, so this is always safe to call once you have the source
796 /// RefSCC.
797 ///
798 /// This operation does not change the cyclic structure of the graph and so
799 /// is very inexpensive. It may change the connectivity graph of the SCCs
800 /// though, so be careful calling this while iterating over them.
801 void removeOutgoingEdge(Node &SourceN, Node &TargetN);
802
803 /// Remove a list of ref edges which are entirely within this RefSCC.
804 ///
805 /// Both the \a SourceN and all of the \a TargetNs must be within this
806 /// RefSCC. Removing these edges may break cycles that form this RefSCC and
807 /// thus this operation may change the RefSCC graph significantly. In
808 /// particular, this operation will re-form new RefSCCs based on the
809 /// remaining connectivity of the graph. The following invariants are
810 /// guaranteed to hold after calling this method:
811 ///
812 /// 1) If a ref-cycle remains after removal, it leaves this RefSCC intact
813 /// and in the graph. No new RefSCCs are built.
814 /// 2) Otherwise, this RefSCC will be dead after this call and no longer in
815 /// the graph or the postorder traversal of the call graph. Any iterator
816 /// pointing at this RefSCC will become invalid.
817 /// 3) All newly formed RefSCCs will be returned and the order of the
818 /// RefSCCs returned will be a valid postorder traversal of the new
819 /// RefSCCs.
820 /// 4) No RefSCC other than this RefSCC has its member set changed (this is
821 /// inherent in the definition of removing such an edge).
822 ///
823 /// These invariants are very important to ensure that we can build
824 /// optimization pipelines on top of the CGSCC pass manager which
825 /// intelligently update the RefSCC graph without invalidating other parts
826 /// of the RefSCC graph.
827 ///
828 /// Note that we provide no routine to remove a *call* edge. Instead, you
829 /// must first switch it to a ref edge using \c switchInternalEdgeToRef.
830 /// This split API is intentional as each of these two steps can invalidate
831 /// a different aspect of the graph structure and needs to have the
832 /// invalidation handled independently.
833 ///
834 /// The runtime complexity of this method is, in the worst case, O(V+E)
835 /// where V is the number of nodes in this RefSCC and E is the number of
836 /// edges leaving the nodes in this RefSCC. Note that E includes both edges
837 /// within this RefSCC and edges from this RefSCC to child RefSCCs. Some
838 /// effort has been made to minimize the overhead of common cases such as
839 /// self-edges and edge removals which result in a spanning tree with no
840 /// more cycles.
841 SmallVector<RefSCC *, 1> removeInternalRefEdge(Node &SourceN,
842 ArrayRef<Node *> TargetNs);
843
844 /// A convenience wrapper around the above to handle trivial cases of
845 /// inserting a new call edge.
846 ///
847 /// This is trivial whenever the target is in the same SCC as the source or
848 /// the edge is an outgoing edge to some descendant SCC. In these cases
849 /// there is no change to the cyclic structure of SCCs or RefSCCs.
850 ///
851 /// To further make calling this convenient, it also handles inserting
852 /// already existing edges.
853 void insertTrivialCallEdge(Node &SourceN, Node &TargetN);
854
855 /// A convenience wrapper around the above to handle trivial cases of
856 /// inserting a new ref edge.
857 ///
858 /// This is trivial whenever the target is in the same RefSCC as the source
859 /// or the edge is an outgoing edge to some descendant RefSCC. In these
860 /// cases there is no change to the cyclic structure of the RefSCCs.
861 ///
862 /// To further make calling this convenient, it also handles inserting
863 /// already existing edges.
864 void insertTrivialRefEdge(Node &SourceN, Node &TargetN);
865
866 /// Directly replace a node's function with a new function.
867 ///
868 /// This should be used when moving the body and users of a function to
869 /// a new formal function object but not otherwise changing the call graph
870 /// structure in any way.
871 ///
872 /// It requires that the old function in the provided node have zero uses
873 /// and the new function must have calls and references to it establishing
874 /// an equivalent graph.
875 void replaceNodeFunction(Node &N, Function &NewF);
876
877 ///@}
878 };
879
880 /// A post-order depth-first RefSCC iterator over the call graph.
881 ///
882 /// This iterator walks the cached post-order sequence of RefSCCs. However,
883 /// it trades stability for flexibility. It is restricted to a forward
884 /// iterator but will survive mutations which insert new RefSCCs and continue
885 /// to point to the same RefSCC even if it moves in the post-order sequence.
886 class postorder_ref_scc_iterator
887 : public iterator_facade_base<postorder_ref_scc_iterator,
888 std::forward_iterator_tag, RefSCC> {
889 friend class LazyCallGraph;
890 friend class LazyCallGraph::Node;
891
892 /// Nonce type to select the constructor for the end iterator.
893 struct IsAtEndT {};
894
895 LazyCallGraph *G;
896 RefSCC *RC = nullptr;
897
898 /// Build the begin iterator for a node.
postorder_ref_scc_iterator(LazyCallGraph & G)899 postorder_ref_scc_iterator(LazyCallGraph &G) : G(&G), RC(getRC(G, 0)) {}
900
901 /// Build the end iterator for a node. This is selected purely by overload.
postorder_ref_scc_iterator(LazyCallGraph & G,IsAtEndT)902 postorder_ref_scc_iterator(LazyCallGraph &G, IsAtEndT /*Nonce*/) : G(&G) {}
903
904 /// Get the post-order RefSCC at the given index of the postorder walk,
905 /// populating it if necessary.
getRC(LazyCallGraph & G,int Index)906 static RefSCC *getRC(LazyCallGraph &G, int Index) {
907 if (Index == (int)G.PostOrderRefSCCs.size())
908 // We're at the end.
909 return nullptr;
910
911 return G.PostOrderRefSCCs[Index];
912 }
913
914 public:
915 bool operator==(const postorder_ref_scc_iterator &Arg) const {
916 return G == Arg.G && RC == Arg.RC;
917 }
918
919 reference operator*() const { return *RC; }
920
921 using iterator_facade_base::operator++;
922 postorder_ref_scc_iterator &operator++() {
923 assert(RC && "Cannot increment the end iterator!");
924 RC = getRC(*G, G->RefSCCIndices.find(RC)->second + 1);
925 return *this;
926 }
927 };
928
929 /// Construct a graph for the given module.
930 ///
931 /// This sets up the graph and computes all of the entry points of the graph.
932 /// No function definitions are scanned until their nodes in the graph are
933 /// requested during traversal.
934 LazyCallGraph(Module &M, TargetLibraryInfo &TLI);
935
936 LazyCallGraph(LazyCallGraph &&G);
937 LazyCallGraph &operator=(LazyCallGraph &&RHS);
938
begin()939 EdgeSequence::iterator begin() { return EntryEdges.begin(); }
end()940 EdgeSequence::iterator end() { return EntryEdges.end(); }
941
942 void buildRefSCCs();
943
postorder_ref_scc_begin()944 postorder_ref_scc_iterator postorder_ref_scc_begin() {
945 if (!EntryEdges.empty())
946 assert(!PostOrderRefSCCs.empty() &&
947 "Must form RefSCCs before iterating them!");
948 return postorder_ref_scc_iterator(*this);
949 }
postorder_ref_scc_end()950 postorder_ref_scc_iterator postorder_ref_scc_end() {
951 if (!EntryEdges.empty())
952 assert(!PostOrderRefSCCs.empty() &&
953 "Must form RefSCCs before iterating them!");
954 return postorder_ref_scc_iterator(*this,
955 postorder_ref_scc_iterator::IsAtEndT());
956 }
957
postorder_ref_sccs()958 iterator_range<postorder_ref_scc_iterator> postorder_ref_sccs() {
959 return make_range(postorder_ref_scc_begin(), postorder_ref_scc_end());
960 }
961
962 /// Lookup a function in the graph which has already been scanned and added.
lookup(const Function & F)963 Node *lookup(const Function &F) const { return NodeMap.lookup(&F); }
964
965 /// Lookup a function's SCC in the graph.
966 ///
967 /// \returns null if the function hasn't been assigned an SCC via the RefSCC
968 /// iterator walk.
lookupSCC(Node & N)969 SCC *lookupSCC(Node &N) const { return SCCMap.lookup(&N); }
970
971 /// Lookup a function's RefSCC in the graph.
972 ///
973 /// \returns null if the function hasn't been assigned a RefSCC via the
974 /// RefSCC iterator walk.
lookupRefSCC(Node & N)975 RefSCC *lookupRefSCC(Node &N) const {
976 if (SCC *C = lookupSCC(N))
977 return &C->getOuterRefSCC();
978
979 return nullptr;
980 }
981
982 /// Get a graph node for a given function, scanning it to populate the graph
983 /// data as necessary.
get(Function & F)984 Node &get(Function &F) {
985 Node *&N = NodeMap[&F];
986 if (N)
987 return *N;
988
989 return insertInto(F, N);
990 }
991
992 /// Get the sequence of known and defined library functions.
993 ///
994 /// These functions, because they are known to LLVM, can have calls
995 /// introduced out of thin air from arbitrary IR.
getLibFunctions()996 ArrayRef<Function *> getLibFunctions() const {
997 return LibFunctions.getArrayRef();
998 }
999
1000 /// Test whether a function is a known and defined library function tracked by
1001 /// the call graph.
1002 ///
1003 /// Because these functions are known to LLVM they are specially modeled in
1004 /// the call graph and even when all IR-level references have been removed
1005 /// remain active and reachable.
isLibFunction(Function & F)1006 bool isLibFunction(Function &F) const { return LibFunctions.count(&F); }
1007
1008 ///@{
1009 /// \name Pre-SCC Mutation API
1010 ///
1011 /// These methods are only valid to call prior to forming any SCCs for this
1012 /// call graph. They can be used to update the core node-graph during
1013 /// a node-based inorder traversal that precedes any SCC-based traversal.
1014 ///
1015 /// Once you begin manipulating a call graph's SCCs, most mutation of the
1016 /// graph must be performed via a RefSCC method. There are some exceptions
1017 /// below.
1018
1019 /// Update the call graph after inserting a new edge.
1020 void insertEdge(Node &SourceN, Node &TargetN, Edge::Kind EK);
1021
1022 /// Update the call graph after inserting a new edge.
insertEdge(Function & Source,Function & Target,Edge::Kind EK)1023 void insertEdge(Function &Source, Function &Target, Edge::Kind EK) {
1024 return insertEdge(get(Source), get(Target), EK);
1025 }
1026
1027 /// Update the call graph after deleting an edge.
1028 void removeEdge(Node &SourceN, Node &TargetN);
1029
1030 /// Update the call graph after deleting an edge.
removeEdge(Function & Source,Function & Target)1031 void removeEdge(Function &Source, Function &Target) {
1032 return removeEdge(get(Source), get(Target));
1033 }
1034
1035 ///@}
1036
1037 ///@{
1038 /// \name General Mutation API
1039 ///
1040 /// There are a very limited set of mutations allowed on the graph as a whole
1041 /// once SCCs have started to be formed. These routines have strict contracts
1042 /// but may be called at any point.
1043
1044 /// Remove a dead function from the call graph (typically to delete it).
1045 ///
1046 /// Note that the function must have an empty use list, and the call graph
1047 /// must be up-to-date prior to calling this. That means it is by itself in
1048 /// a maximal SCC which is by itself in a maximal RefSCC, etc. No structural
1049 /// changes result from calling this routine other than potentially removing
1050 /// entry points into the call graph.
1051 ///
1052 /// If SCC formation has begun, this function must not be part of the current
1053 /// DFS in order to call this safely. Typically, the function will have been
1054 /// fully visited by the DFS prior to calling this routine.
1055 void removeDeadFunction(Function &F);
1056
1057 ///@}
1058
1059 ///@{
1060 /// \name Static helpers for code doing updates to the call graph.
1061 ///
1062 /// These helpers are used to implement parts of the call graph but are also
1063 /// useful to code doing updates or otherwise wanting to walk the IR in the
1064 /// same patterns as when we build the call graph.
1065
1066 /// Recursively visits the defined functions whose address is reachable from
1067 /// every constant in the \p Worklist.
1068 ///
1069 /// Doesn't recurse through any constants already in the \p Visited set, and
1070 /// updates that set with every constant visited.
1071 ///
1072 /// For each defined function, calls \p Callback with that function.
1073 template <typename CallbackT>
visitReferences(SmallVectorImpl<Constant * > & Worklist,SmallPtrSetImpl<Constant * > & Visited,CallbackT Callback)1074 static void visitReferences(SmallVectorImpl<Constant *> &Worklist,
1075 SmallPtrSetImpl<Constant *> &Visited,
1076 CallbackT Callback) {
1077 while (!Worklist.empty()) {
1078 Constant *C = Worklist.pop_back_val();
1079
1080 if (Function *F = dyn_cast<Function>(C)) {
1081 if (!F->isDeclaration())
1082 Callback(*F);
1083 continue;
1084 }
1085
1086 if (BlockAddress *BA = dyn_cast<BlockAddress>(C)) {
1087 // The blockaddress constant expression is a weird special case, we
1088 // can't generically walk its operands the way we do for all other
1089 // constants.
1090 if (Visited.insert(BA->getFunction()).second)
1091 Worklist.push_back(BA->getFunction());
1092 continue;
1093 }
1094
1095 for (Value *Op : C->operand_values())
1096 if (Visited.insert(cast<Constant>(Op)).second)
1097 Worklist.push_back(cast<Constant>(Op));
1098 }
1099 }
1100
1101 ///@}
1102
1103 private:
1104 using node_stack_iterator = SmallVectorImpl<Node *>::reverse_iterator;
1105 using node_stack_range = iterator_range<node_stack_iterator>;
1106
1107 /// Allocator that holds all the call graph nodes.
1108 SpecificBumpPtrAllocator<Node> BPA;
1109
1110 /// Maps function->node for fast lookup.
1111 DenseMap<const Function *, Node *> NodeMap;
1112
1113 /// The entry edges into the graph.
1114 ///
1115 /// These edges are from "external" sources. Put another way, they
1116 /// escape at the module scope.
1117 EdgeSequence EntryEdges;
1118
1119 /// Allocator that holds all the call graph SCCs.
1120 SpecificBumpPtrAllocator<SCC> SCCBPA;
1121
1122 /// Maps Function -> SCC for fast lookup.
1123 DenseMap<Node *, SCC *> SCCMap;
1124
1125 /// Allocator that holds all the call graph RefSCCs.
1126 SpecificBumpPtrAllocator<RefSCC> RefSCCBPA;
1127
1128 /// The post-order sequence of RefSCCs.
1129 ///
1130 /// This list is lazily formed the first time we walk the graph.
1131 SmallVector<RefSCC *, 16> PostOrderRefSCCs;
1132
1133 /// A map from RefSCC to the index for it in the postorder sequence of
1134 /// RefSCCs.
1135 DenseMap<RefSCC *, int> RefSCCIndices;
1136
1137 /// Defined functions that are also known library functions which the
1138 /// optimizer can reason about and therefore might introduce calls to out of
1139 /// thin air.
1140 SmallSetVector<Function *, 4> LibFunctions;
1141
1142 /// Helper to insert a new function, with an already looked-up entry in
1143 /// the NodeMap.
1144 Node &insertInto(Function &F, Node *&MappedN);
1145
1146 /// Helper to update pointers back to the graph object during moves.
1147 void updateGraphPtrs();
1148
1149 /// Allocates an SCC and constructs it using the graph allocator.
1150 ///
1151 /// The arguments are forwarded to the constructor.
createSCC(Ts &&...Args)1152 template <typename... Ts> SCC *createSCC(Ts &&... Args) {
1153 return new (SCCBPA.Allocate()) SCC(std::forward<Ts>(Args)...);
1154 }
1155
1156 /// Allocates a RefSCC and constructs it using the graph allocator.
1157 ///
1158 /// The arguments are forwarded to the constructor.
createRefSCC(Ts &&...Args)1159 template <typename... Ts> RefSCC *createRefSCC(Ts &&... Args) {
1160 return new (RefSCCBPA.Allocate()) RefSCC(std::forward<Ts>(Args)...);
1161 }
1162
1163 /// Common logic for building SCCs from a sequence of roots.
1164 ///
1165 /// This is a very generic implementation of the depth-first walk and SCC
1166 /// formation algorithm. It uses a generic sequence of roots and generic
1167 /// callbacks for each step. This is designed to be used to implement both
1168 /// the RefSCC formation and SCC formation with shared logic.
1169 ///
1170 /// Currently this is a relatively naive implementation of Tarjan's DFS
1171 /// algorithm to form the SCCs.
1172 ///
1173 /// FIXME: We should consider newer variants such as Nuutila.
1174 template <typename RootsT, typename GetBeginT, typename GetEndT,
1175 typename GetNodeT, typename FormSCCCallbackT>
1176 static void buildGenericSCCs(RootsT &&Roots, GetBeginT &&GetBegin,
1177 GetEndT &&GetEnd, GetNodeT &&GetNode,
1178 FormSCCCallbackT &&FormSCC);
1179
1180 /// Build the SCCs for a RefSCC out of a list of nodes.
1181 void buildSCCs(RefSCC &RC, node_stack_range Nodes);
1182
1183 /// Get the index of a RefSCC within the postorder traversal.
1184 ///
1185 /// Requires that this RefSCC is a valid one in the (perhaps partial)
1186 /// postorder traversed part of the graph.
getRefSCCIndex(RefSCC & RC)1187 int getRefSCCIndex(RefSCC &RC) {
1188 auto IndexIt = RefSCCIndices.find(&RC);
1189 assert(IndexIt != RefSCCIndices.end() && "RefSCC doesn't have an index!");
1190 assert(PostOrderRefSCCs[IndexIt->second] == &RC &&
1191 "Index does not point back at RC!");
1192 return IndexIt->second;
1193 }
1194 };
1195
Edge()1196 inline LazyCallGraph::Edge::Edge() : Value() {}
Edge(Node & N,Kind K)1197 inline LazyCallGraph::Edge::Edge(Node &N, Kind K) : Value(&N, K) {}
1198
1199 inline LazyCallGraph::Edge::operator bool() const {
1200 return Value.getPointer() && !Value.getPointer()->isDead();
1201 }
1202
getKind()1203 inline LazyCallGraph::Edge::Kind LazyCallGraph::Edge::getKind() const {
1204 assert(*this && "Queried a null edge!");
1205 return Value.getInt();
1206 }
1207
isCall()1208 inline bool LazyCallGraph::Edge::isCall() const {
1209 assert(*this && "Queried a null edge!");
1210 return getKind() == Call;
1211 }
1212
getNode()1213 inline LazyCallGraph::Node &LazyCallGraph::Edge::getNode() const {
1214 assert(*this && "Queried a null edge!");
1215 return *Value.getPointer();
1216 }
1217
getFunction()1218 inline Function &LazyCallGraph::Edge::getFunction() const {
1219 assert(*this && "Queried a null edge!");
1220 return getNode().getFunction();
1221 }
1222
1223 // Provide GraphTraits specializations for call graphs.
1224 template <> struct GraphTraits<LazyCallGraph::Node *> {
1225 using NodeRef = LazyCallGraph::Node *;
1226 using ChildIteratorType = LazyCallGraph::EdgeSequence::iterator;
1227
1228 static NodeRef getEntryNode(NodeRef N) { return N; }
1229 static ChildIteratorType child_begin(NodeRef N) { return (*N)->begin(); }
1230 static ChildIteratorType child_end(NodeRef N) { return (*N)->end(); }
1231 };
1232 template <> struct GraphTraits<LazyCallGraph *> {
1233 using NodeRef = LazyCallGraph::Node *;
1234 using ChildIteratorType = LazyCallGraph::EdgeSequence::iterator;
1235
1236 static NodeRef getEntryNode(NodeRef N) { return N; }
1237 static ChildIteratorType child_begin(NodeRef N) { return (*N)->begin(); }
1238 static ChildIteratorType child_end(NodeRef N) { return (*N)->end(); }
1239 };
1240
1241 /// An analysis pass which computes the call graph for a module.
1242 class LazyCallGraphAnalysis : public AnalysisInfoMixin<LazyCallGraphAnalysis> {
1243 friend AnalysisInfoMixin<LazyCallGraphAnalysis>;
1244
1245 static AnalysisKey Key;
1246
1247 public:
1248 /// Inform generic clients of the result type.
1249 using Result = LazyCallGraph;
1250
1251 /// Compute the \c LazyCallGraph for the module \c M.
1252 ///
1253 /// This just builds the set of entry points to the call graph. The rest is
1254 /// built lazily as it is walked.
1255 LazyCallGraph run(Module &M, ModuleAnalysisManager &AM) {
1256 return LazyCallGraph(M, AM.getResult<TargetLibraryAnalysis>(M));
1257 }
1258 };
1259
1260 /// A pass which prints the call graph to a \c raw_ostream.
1261 ///
1262 /// This is primarily useful for testing the analysis.
1263 class LazyCallGraphPrinterPass
1264 : public PassInfoMixin<LazyCallGraphPrinterPass> {
1265 raw_ostream &OS;
1266
1267 public:
1268 explicit LazyCallGraphPrinterPass(raw_ostream &OS);
1269
1270 PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
1271 };
1272
1273 /// A pass which prints the call graph as a DOT file to a \c raw_ostream.
1274 ///
1275 /// This is primarily useful for visualization purposes.
1276 class LazyCallGraphDOTPrinterPass
1277 : public PassInfoMixin<LazyCallGraphDOTPrinterPass> {
1278 raw_ostream &OS;
1279
1280 public:
1281 explicit LazyCallGraphDOTPrinterPass(raw_ostream &OS);
1282
1283 PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
1284 };
1285
1286 } // end namespace llvm
1287
1288 #endif // LLVM_ANALYSIS_LAZYCALLGRAPH_H
1289