• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 use parking_lot::Mutex;
2 use rustc_data_structures::fingerprint::Fingerprint;
3 use rustc_data_structures::fx::{FxHashMap, FxHashSet, FxIndexMap};
4 use rustc_data_structures::profiling::{EventId, QueryInvocationId, SelfProfilerRef};
5 use rustc_data_structures::sharded::{self, Sharded};
6 use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
7 use rustc_data_structures::steal::Steal;
8 use rustc_data_structures::sync::{AtomicU32, AtomicU64, Lock, Lrc, Ordering};
9 use rustc_data_structures::unord::UnordMap;
10 use rustc_index::IndexVec;
11 use rustc_serialize::opaque::{FileEncodeResult, FileEncoder};
12 use smallvec::{smallvec, SmallVec};
13 use std::assert_matches::assert_matches;
14 use std::collections::hash_map::Entry;
15 use std::fmt::Debug;
16 use std::hash::Hash;
17 use std::marker::PhantomData;
18 use std::sync::atomic::Ordering::Relaxed;
19 
20 use super::query::DepGraphQuery;
21 use super::serialized::{GraphEncoder, SerializedDepGraph, SerializedDepNodeIndex};
22 use super::{DepContext, DepKind, DepNode, HasDepContext, WorkProductId};
23 use crate::ich::StableHashingContext;
24 use crate::query::{QueryContext, QuerySideEffects};
25 
26 #[cfg(debug_assertions)]
27 use {super::debug::EdgeFilter, std::env};
28 
29 #[derive(Clone)]
30 pub struct DepGraph<K: DepKind> {
31     data: Option<Lrc<DepGraphData<K>>>,
32 
33     /// This field is used for assigning DepNodeIndices when running in
34     /// non-incremental mode. Even in non-incremental mode we make sure that
35     /// each task has a `DepNodeIndex` that uniquely identifies it. This unique
36     /// ID is used for self-profiling.
37     virtual_dep_node_index: Lrc<AtomicU32>,
38 }
39 
40 rustc_index::newtype_index! {
41     pub struct DepNodeIndex {}
42 }
43 
44 impl DepNodeIndex {
45     pub const INVALID: DepNodeIndex = DepNodeIndex::MAX;
46     pub const SINGLETON_DEPENDENCYLESS_ANON_NODE: DepNodeIndex = DepNodeIndex::from_u32(0);
47     pub const FOREVER_RED_NODE: DepNodeIndex = DepNodeIndex::from_u32(1);
48 }
49 
50 impl From<DepNodeIndex> for QueryInvocationId {
51     #[inline(always)]
from(dep_node_index: DepNodeIndex) -> Self52     fn from(dep_node_index: DepNodeIndex) -> Self {
53         QueryInvocationId(dep_node_index.as_u32())
54     }
55 }
56 
57 pub struct MarkFrame<'a> {
58     index: SerializedDepNodeIndex,
59     parent: Option<&'a MarkFrame<'a>>,
60 }
61 
62 #[derive(PartialEq)]
63 pub enum DepNodeColor {
64     Red,
65     Green(DepNodeIndex),
66 }
67 
68 impl DepNodeColor {
69     #[inline]
is_green(self) -> bool70     pub fn is_green(self) -> bool {
71         match self {
72             DepNodeColor::Red => false,
73             DepNodeColor::Green(_) => true,
74         }
75     }
76 }
77 
78 pub struct DepGraphData<K: DepKind> {
79     /// The new encoding of the dependency graph, optimized for red/green
80     /// tracking. The `current` field is the dependency graph of only the
81     /// current compilation session: We don't merge the previous dep-graph into
82     /// current one anymore, but we do reference shared data to save space.
83     current: CurrentDepGraph<K>,
84 
85     /// The dep-graph from the previous compilation session. It contains all
86     /// nodes and edges as well as all fingerprints of nodes that have them.
87     previous: SerializedDepGraph<K>,
88 
89     colors: DepNodeColorMap,
90 
91     processed_side_effects: Mutex<FxHashSet<DepNodeIndex>>,
92 
93     /// When we load, there may be `.o` files, cached MIR, or other such
94     /// things available to us. If we find that they are not dirty, we
95     /// load the path to the file storing those work-products here into
96     /// this map. We can later look for and extract that data.
97     previous_work_products: FxIndexMap<WorkProductId, WorkProduct>,
98 
99     dep_node_debug: Lock<FxHashMap<DepNode<K>, String>>,
100 
101     /// Used by incremental compilation tests to assert that
102     /// a particular query result was decoded from disk
103     /// (not just marked green)
104     debug_loaded_from_disk: Lock<FxHashSet<DepNode<K>>>,
105 }
106 
hash_result<R>(hcx: &mut StableHashingContext<'_>, result: &R) -> Fingerprint where R: for<'a> HashStable<StableHashingContext<'a>>,107 pub fn hash_result<R>(hcx: &mut StableHashingContext<'_>, result: &R) -> Fingerprint
108 where
109     R: for<'a> HashStable<StableHashingContext<'a>>,
110 {
111     let mut stable_hasher = StableHasher::new();
112     result.hash_stable(hcx, &mut stable_hasher);
113     stable_hasher.finish()
114 }
115 
116 impl<K: DepKind> DepGraph<K> {
new( profiler: &SelfProfilerRef, prev_graph: SerializedDepGraph<K>, prev_work_products: FxIndexMap<WorkProductId, WorkProduct>, encoder: FileEncoder, record_graph: bool, record_stats: bool, ) -> DepGraph<K>117     pub fn new(
118         profiler: &SelfProfilerRef,
119         prev_graph: SerializedDepGraph<K>,
120         prev_work_products: FxIndexMap<WorkProductId, WorkProduct>,
121         encoder: FileEncoder,
122         record_graph: bool,
123         record_stats: bool,
124     ) -> DepGraph<K> {
125         let prev_graph_node_count = prev_graph.node_count();
126 
127         let current = CurrentDepGraph::new(
128             profiler,
129             prev_graph_node_count,
130             encoder,
131             record_graph,
132             record_stats,
133         );
134 
135         let colors = DepNodeColorMap::new(prev_graph_node_count);
136 
137         // Instantiate a dependy-less node only once for anonymous queries.
138         let _green_node_index = current.intern_new_node(
139             profiler,
140             DepNode { kind: DepKind::NULL, hash: current.anon_id_seed.into() },
141             smallvec![],
142             Fingerprint::ZERO,
143         );
144         assert_eq!(_green_node_index, DepNodeIndex::SINGLETON_DEPENDENCYLESS_ANON_NODE);
145 
146         // Instantiate a dependy-less red node only once for anonymous queries.
147         let (red_node_index, red_node_prev_index_and_color) = current.intern_node(
148             profiler,
149             &prev_graph,
150             DepNode { kind: DepKind::RED, hash: Fingerprint::ZERO.into() },
151             smallvec![],
152             None,
153             false,
154         );
155         assert_eq!(red_node_index, DepNodeIndex::FOREVER_RED_NODE);
156         match red_node_prev_index_and_color {
157             None => {
158                 // This is expected when we have no previous compilation session.
159                 assert!(prev_graph_node_count == 0);
160             }
161             Some((prev_red_node_index, DepNodeColor::Red)) => {
162                 assert_eq!(prev_red_node_index.as_usize(), red_node_index.as_usize());
163                 colors.insert(prev_red_node_index, DepNodeColor::Red);
164             }
165             Some((_, DepNodeColor::Green(_))) => {
166                 // There must be a logic error somewhere if we hit this branch.
167                 panic!("DepNodeIndex::FOREVER_RED_NODE evaluated to DepNodeColor::Green")
168             }
169         }
170 
171         DepGraph {
172             data: Some(Lrc::new(DepGraphData {
173                 previous_work_products: prev_work_products,
174                 dep_node_debug: Default::default(),
175                 current,
176                 processed_side_effects: Default::default(),
177                 previous: prev_graph,
178                 colors,
179                 debug_loaded_from_disk: Default::default(),
180             })),
181             virtual_dep_node_index: Lrc::new(AtomicU32::new(0)),
182         }
183     }
184 
new_disabled() -> DepGraph<K>185     pub fn new_disabled() -> DepGraph<K> {
186         DepGraph { data: None, virtual_dep_node_index: Lrc::new(AtomicU32::new(0)) }
187     }
188 
189     #[inline]
data(&self) -> Option<&DepGraphData<K>>190     pub fn data(&self) -> Option<&DepGraphData<K>> {
191         self.data.as_deref()
192     }
193 
194     /// Returns `true` if we are actually building the full dep-graph, and `false` otherwise.
195     #[inline]
is_fully_enabled(&self) -> bool196     pub fn is_fully_enabled(&self) -> bool {
197         self.data.is_some()
198     }
199 
with_query(&self, f: impl Fn(&DepGraphQuery<K>))200     pub fn with_query(&self, f: impl Fn(&DepGraphQuery<K>)) {
201         if let Some(data) = &self.data {
202             data.current.encoder.borrow().with_query(f)
203         }
204     }
205 
assert_ignored(&self)206     pub fn assert_ignored(&self) {
207         if let Some(..) = self.data {
208             K::read_deps(|task_deps| {
209                 assert_matches!(
210                     task_deps,
211                     TaskDepsRef::Ignore,
212                     "expected no task dependency tracking"
213                 );
214             })
215         }
216     }
217 
with_ignore<OP, R>(&self, op: OP) -> R where OP: FnOnce() -> R,218     pub fn with_ignore<OP, R>(&self, op: OP) -> R
219     where
220         OP: FnOnce() -> R,
221     {
222         K::with_deps(TaskDepsRef::Ignore, op)
223     }
224 
225     /// Used to wrap the deserialization of a query result from disk,
226     /// This method enforces that no new `DepNodes` are created during
227     /// query result deserialization.
228     ///
229     /// Enforcing this makes the query dep graph simpler - all nodes
230     /// must be created during the query execution, and should be
231     /// created from inside the 'body' of a query (the implementation
232     /// provided by a particular compiler crate).
233     ///
234     /// Consider the case of three queries `A`, `B`, and `C`, where
235     /// `A` invokes `B` and `B` invokes `C`:
236     ///
237     /// `A -> B -> C`
238     ///
239     /// Suppose that decoding the result of query `B` required re-computing
240     /// the query `C`. If we did not create a fresh `TaskDeps` when
241     /// decoding `B`, we would still be using the `TaskDeps` for query `A`
242     /// (if we needed to re-execute `A`). This would cause us to create
243     /// a new edge `A -> C`. If this edge did not previously
244     /// exist in the `DepGraph`, then we could end up with a different
245     /// `DepGraph` at the end of compilation, even if there were no
246     /// meaningful changes to the overall program (e.g. a newline was added).
247     /// In addition, this edge might cause a subsequent compilation run
248     /// to try to force `C` before marking other necessary nodes green. If
249     /// `C` did not exist in the new compilation session, then we could
250     /// get an ICE. Normally, we would have tried (and failed) to mark
251     /// some other query green (e.g. `item_children`) which was used
252     /// to obtain `C`, which would prevent us from ever trying to force
253     /// a nonexistent `D`.
254     ///
255     /// It might be possible to enforce that all `DepNode`s read during
256     /// deserialization already exist in the previous `DepGraph`. In
257     /// the above example, we would invoke `D` during the deserialization
258     /// of `B`. Since we correctly create a new `TaskDeps` from the decoding
259     /// of `B`, this would result in an edge `B -> D`. If that edge already
260     /// existed (with the same `DepPathHash`es), then it should be correct
261     /// to allow the invocation of the query to proceed during deserialization
262     /// of a query result. We would merely assert that the dep-graph fragment
263     /// that would have been added by invoking `C` while decoding `B`
264     /// is equivalent to the dep-graph fragment that we already instantiated for B
265     /// (at the point where we successfully marked B as green).
266     ///
267     /// However, this would require additional complexity
268     /// in the query infrastructure, and is not currently needed by the
269     /// decoding of any query results. Should the need arise in the future,
270     /// we should consider extending the query system with this functionality.
with_query_deserialization<OP, R>(&self, op: OP) -> R where OP: FnOnce() -> R,271     pub fn with_query_deserialization<OP, R>(&self, op: OP) -> R
272     where
273         OP: FnOnce() -> R,
274     {
275         K::with_deps(TaskDepsRef::Forbid, op)
276     }
277 
278     #[inline(always)]
with_task<Ctxt: HasDepContext<DepKind = K>, A: Debug, R>( &self, key: DepNode<K>, cx: Ctxt, arg: A, task: fn(Ctxt, A) -> R, hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>, ) -> (R, DepNodeIndex)279     pub fn with_task<Ctxt: HasDepContext<DepKind = K>, A: Debug, R>(
280         &self,
281         key: DepNode<K>,
282         cx: Ctxt,
283         arg: A,
284         task: fn(Ctxt, A) -> R,
285         hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>,
286     ) -> (R, DepNodeIndex) {
287         match self.data() {
288             Some(data) => data.with_task(key, cx, arg, task, hash_result),
289             None => (task(cx, arg), self.next_virtual_depnode_index()),
290         }
291     }
292 
with_anon_task<Tcx: DepContext<DepKind = K>, OP, R>( &self, cx: Tcx, dep_kind: K, op: OP, ) -> (R, DepNodeIndex) where OP: FnOnce() -> R,293     pub fn with_anon_task<Tcx: DepContext<DepKind = K>, OP, R>(
294         &self,
295         cx: Tcx,
296         dep_kind: K,
297         op: OP,
298     ) -> (R, DepNodeIndex)
299     where
300         OP: FnOnce() -> R,
301     {
302         match self.data() {
303             Some(data) => data.with_anon_task(cx, dep_kind, op),
304             None => (op(), self.next_virtual_depnode_index()),
305         }
306     }
307 }
308 
309 impl<K: DepKind> DepGraphData<K> {
310     /// Starts a new dep-graph task. Dep-graph tasks are specified
311     /// using a free function (`task`) and **not** a closure -- this
312     /// is intentional because we want to exercise tight control over
313     /// what state they have access to. In particular, we want to
314     /// prevent implicit 'leaks' of tracked state into the task (which
315     /// could then be read without generating correct edges in the
316     /// dep-graph -- see the [rustc dev guide] for more details on
317     /// the dep-graph). To this end, the task function gets exactly two
318     /// pieces of state: the context `cx` and an argument `arg`. Both
319     /// of these bits of state must be of some type that implements
320     /// `DepGraphSafe` and hence does not leak.
321     ///
322     /// The choice of two arguments is not fundamental. One argument
323     /// would work just as well, since multiple values can be
324     /// collected using tuples. However, using two arguments works out
325     /// to be quite convenient, since it is common to need a context
326     /// (`cx`) and some argument (e.g., a `DefId` identifying what
327     /// item to process).
328     ///
329     /// For cases where you need some other number of arguments:
330     ///
331     /// - If you only need one argument, just use `()` for the `arg`
332     ///   parameter.
333     /// - If you need 3+ arguments, use a tuple for the
334     ///   `arg` parameter.
335     ///
336     /// [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/incremental-compilation.html
337     #[inline(always)]
with_task<Ctxt: HasDepContext<DepKind = K>, A: Debug, R>( &self, key: DepNode<K>, cx: Ctxt, arg: A, task: fn(Ctxt, A) -> R, hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>, ) -> (R, DepNodeIndex)338     pub fn with_task<Ctxt: HasDepContext<DepKind = K>, A: Debug, R>(
339         &self,
340         key: DepNode<K>,
341         cx: Ctxt,
342         arg: A,
343         task: fn(Ctxt, A) -> R,
344         hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>,
345     ) -> (R, DepNodeIndex) {
346         // If the following assertion triggers, it can have two reasons:
347         // 1. Something is wrong with DepNode creation, either here or
348         //    in `DepGraph::try_mark_green()`.
349         // 2. Two distinct query keys get mapped to the same `DepNode`
350         //    (see for example #48923).
351         assert!(
352             !self.dep_node_exists(&key),
353             "forcing query with already existing `DepNode`\n\
354                  - query-key: {arg:?}\n\
355                  - dep-node: {key:?}"
356         );
357 
358         let with_deps = |task_deps| K::with_deps(task_deps, || task(cx, arg));
359         let (result, edges) = if cx.dep_context().is_eval_always(key.kind) {
360             (with_deps(TaskDepsRef::EvalAlways), smallvec![])
361         } else {
362             let task_deps = Lock::new(TaskDeps {
363                 #[cfg(debug_assertions)]
364                 node: Some(key),
365                 reads: SmallVec::new(),
366                 read_set: Default::default(),
367                 phantom_data: PhantomData,
368             });
369             (with_deps(TaskDepsRef::Allow(&task_deps)), task_deps.into_inner().reads)
370         };
371 
372         let dcx = cx.dep_context();
373         let hashing_timer = dcx.profiler().incr_result_hashing();
374         let current_fingerprint =
375             hash_result.map(|f| dcx.with_stable_hashing_context(|mut hcx| f(&mut hcx, &result)));
376 
377         let print_status = cfg!(debug_assertions) && dcx.sess().opts.unstable_opts.dep_tasks;
378 
379         // Intern the new `DepNode`.
380         let (dep_node_index, prev_and_color) = self.current.intern_node(
381             dcx.profiler(),
382             &self.previous,
383             key,
384             edges,
385             current_fingerprint,
386             print_status,
387         );
388 
389         hashing_timer.finish_with_query_invocation_id(dep_node_index.into());
390 
391         if let Some((prev_index, color)) = prev_and_color {
392             debug_assert!(
393                 self.colors.get(prev_index).is_none(),
394                 "DepGraph::with_task() - Duplicate DepNodeColor \
395                             insertion for {key:?}"
396             );
397 
398             self.colors.insert(prev_index, color);
399         }
400 
401         (result, dep_node_index)
402     }
403 
404     /// Executes something within an "anonymous" task, that is, a task the
405     /// `DepNode` of which is determined by the list of inputs it read from.
with_anon_task<Tcx: DepContext<DepKind = K>, OP, R>( &self, cx: Tcx, dep_kind: K, op: OP, ) -> (R, DepNodeIndex) where OP: FnOnce() -> R,406     pub fn with_anon_task<Tcx: DepContext<DepKind = K>, OP, R>(
407         &self,
408         cx: Tcx,
409         dep_kind: K,
410         op: OP,
411     ) -> (R, DepNodeIndex)
412     where
413         OP: FnOnce() -> R,
414     {
415         debug_assert!(!cx.is_eval_always(dep_kind));
416 
417         let task_deps = Lock::new(TaskDeps::default());
418         let result = K::with_deps(TaskDepsRef::Allow(&task_deps), op);
419         let task_deps = task_deps.into_inner();
420         let task_deps = task_deps.reads;
421 
422         let dep_node_index = match task_deps.len() {
423             0 => {
424                 // Because the dep-node id of anon nodes is computed from the sets of its
425                 // dependencies we already know what the ID of this dependency-less node is
426                 // going to be (i.e. equal to the precomputed
427                 // `SINGLETON_DEPENDENCYLESS_ANON_NODE`). As a consequence we can skip creating
428                 // a `StableHasher` and sending the node through interning.
429                 DepNodeIndex::SINGLETON_DEPENDENCYLESS_ANON_NODE
430             }
431             1 => {
432                 // When there is only one dependency, don't bother creating a node.
433                 task_deps[0]
434             }
435             _ => {
436                 // The dep node indices are hashed here instead of hashing the dep nodes of the
437                 // dependencies. These indices may refer to different nodes per session, but this isn't
438                 // a problem here because we that ensure the final dep node hash is per session only by
439                 // combining it with the per session random number `anon_id_seed`. This hash only need
440                 // to map the dependencies to a single value on a per session basis.
441                 let mut hasher = StableHasher::new();
442                 task_deps.hash(&mut hasher);
443 
444                 let target_dep_node = DepNode {
445                     kind: dep_kind,
446                     // Fingerprint::combine() is faster than sending Fingerprint
447                     // through the StableHasher (at least as long as StableHasher
448                     // is so slow).
449                     hash: self.current.anon_id_seed.combine(hasher.finish()).into(),
450                 };
451 
452                 self.current.intern_new_node(
453                     cx.profiler(),
454                     target_dep_node,
455                     task_deps,
456                     Fingerprint::ZERO,
457                 )
458             }
459         };
460 
461         (result, dep_node_index)
462     }
463 }
464 
465 impl<K: DepKind> DepGraph<K> {
466     #[inline]
read_index(&self, dep_node_index: DepNodeIndex)467     pub fn read_index(&self, dep_node_index: DepNodeIndex) {
468         if let Some(ref data) = self.data {
469             K::read_deps(|task_deps| {
470                 let mut task_deps = match task_deps {
471                     TaskDepsRef::Allow(deps) => deps.lock(),
472                     TaskDepsRef::EvalAlways => {
473                         // We don't need to record dependencies of eval_always
474                         // queries. They are re-evaluated unconditionally anyway.
475                         return;
476                     }
477                     TaskDepsRef::Ignore => return,
478                     TaskDepsRef::Forbid => {
479                         panic!("Illegal read of: {dep_node_index:?}")
480                     }
481                 };
482                 let task_deps = &mut *task_deps;
483 
484                 if cfg!(debug_assertions) {
485                     data.current.total_read_count.fetch_add(1, Relaxed);
486                 }
487 
488                 // As long as we only have a low number of reads we can avoid doing a hash
489                 // insert and potentially allocating/reallocating the hashmap
490                 let new_read = if task_deps.reads.len() < TASK_DEPS_READS_CAP {
491                     task_deps.reads.iter().all(|other| *other != dep_node_index)
492                 } else {
493                     task_deps.read_set.insert(dep_node_index)
494                 };
495                 if new_read {
496                     task_deps.reads.push(dep_node_index);
497                     if task_deps.reads.len() == TASK_DEPS_READS_CAP {
498                         // Fill `read_set` with what we have so far so we can use the hashset
499                         // next time
500                         task_deps.read_set.extend(task_deps.reads.iter().copied());
501                     }
502 
503                     #[cfg(debug_assertions)]
504                     {
505                         if let Some(target) = task_deps.node {
506                             if let Some(ref forbidden_edge) = data.current.forbidden_edge {
507                                 let src = forbidden_edge.index_to_node.lock()[&dep_node_index];
508                                 if forbidden_edge.test(&src, &target) {
509                                     panic!("forbidden edge {:?} -> {:?} created", src, target)
510                                 }
511                             }
512                         }
513                     }
514                 } else if cfg!(debug_assertions) {
515                     data.current.total_duplicate_read_count.fetch_add(1, Relaxed);
516                 }
517             })
518         }
519     }
520 
521     /// Create a node when we force-feed a value into the query cache.
522     /// This is used to remove cycles during type-checking const generic parameters.
523     ///
524     /// As usual in the query system, we consider the current state of the calling query
525     /// only depends on the list of dependencies up to now. As a consequence, the value
526     /// that this query gives us can only depend on those dependencies too. Therefore,
527     /// it is sound to use the current dependency set for the created node.
528     ///
529     /// During replay, the order of the nodes is relevant in the dependency graph.
530     /// So the unchanged replay will mark the caller query before trying to mark this one.
531     /// If there is a change to report, the caller query will be re-executed before this one.
532     ///
533     /// FIXME: If the code is changed enough for this node to be marked before requiring the
534     /// caller's node, we suppose that those changes will be enough to mark this node red and
535     /// force a recomputation using the "normal" way.
with_feed_task<Ctxt: DepContext<DepKind = K>, A: Debug, R: Debug>( &self, node: DepNode<K>, cx: Ctxt, key: A, result: &R, hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>, ) -> DepNodeIndex536     pub fn with_feed_task<Ctxt: DepContext<DepKind = K>, A: Debug, R: Debug>(
537         &self,
538         node: DepNode<K>,
539         cx: Ctxt,
540         key: A,
541         result: &R,
542         hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>,
543     ) -> DepNodeIndex {
544         if let Some(data) = self.data.as_ref() {
545             // The caller query has more dependencies than the node we are creating. We may
546             // encounter a case where this created node is marked as green, but the caller query is
547             // subsequently marked as red or recomputed. In this case, we will end up feeding a
548             // value to an existing node.
549             //
550             // For sanity, we still check that the loaded stable hash and the new one match.
551             if let Some(prev_index) = data.previous.node_to_index_opt(&node) {
552                 let dep_node_index = data.current.prev_index_to_index.lock()[prev_index];
553                 if let Some(dep_node_index) = dep_node_index {
554                     crate::query::incremental_verify_ich(
555                         cx,
556                         data,
557                         result,
558                         prev_index,
559                         hash_result,
560                         |value| format!("{:?}", value),
561                     );
562 
563                     #[cfg(debug_assertions)]
564                     if hash_result.is_some() {
565                         data.current.record_edge(
566                             dep_node_index,
567                             node,
568                             data.prev_fingerprint_of(prev_index),
569                         );
570                     }
571 
572                     return dep_node_index;
573                 }
574             }
575 
576             let mut edges = SmallVec::new();
577             K::read_deps(|task_deps| match task_deps {
578                 TaskDepsRef::Allow(deps) => edges.extend(deps.lock().reads.iter().copied()),
579                 TaskDepsRef::EvalAlways => {
580                     edges.push(DepNodeIndex::FOREVER_RED_NODE);
581                 }
582                 TaskDepsRef::Ignore => {}
583                 TaskDepsRef::Forbid => {
584                     panic!("Cannot summarize when dependencies are not recorded.")
585                 }
586             });
587 
588             let hashing_timer = cx.profiler().incr_result_hashing();
589             let current_fingerprint = hash_result.map(|hash_result| {
590                 cx.with_stable_hashing_context(|mut hcx| hash_result(&mut hcx, result))
591             });
592 
593             let print_status = cfg!(debug_assertions) && cx.sess().opts.unstable_opts.dep_tasks;
594 
595             // Intern the new `DepNode` with the dependencies up-to-now.
596             let (dep_node_index, prev_and_color) = data.current.intern_node(
597                 cx.profiler(),
598                 &data.previous,
599                 node,
600                 edges,
601                 current_fingerprint,
602                 print_status,
603             );
604 
605             hashing_timer.finish_with_query_invocation_id(dep_node_index.into());
606 
607             if let Some((prev_index, color)) = prev_and_color {
608                 debug_assert!(
609                     data.colors.get(prev_index).is_none(),
610                     "DepGraph::with_task() - Duplicate DepNodeColor insertion for {key:?}",
611                 );
612 
613                 data.colors.insert(prev_index, color);
614             }
615 
616             dep_node_index
617         } else {
618             // Incremental compilation is turned off. We just execute the task
619             // without tracking. We still provide a dep-node index that uniquely
620             // identifies the task so that we have a cheap way of referring to
621             // the query for self-profiling.
622             self.next_virtual_depnode_index()
623         }
624     }
625 }
626 
627 impl<K: DepKind> DepGraphData<K> {
628     #[inline]
dep_node_index_of_opt(&self, dep_node: &DepNode<K>) -> Option<DepNodeIndex>629     pub fn dep_node_index_of_opt(&self, dep_node: &DepNode<K>) -> Option<DepNodeIndex> {
630         if let Some(prev_index) = self.previous.node_to_index_opt(dep_node) {
631             self.current.prev_index_to_index.lock()[prev_index]
632         } else {
633             self.current
634                 .new_node_to_index
635                 .get_shard_by_value(dep_node)
636                 .lock()
637                 .get(dep_node)
638                 .copied()
639         }
640     }
641 
642     #[inline]
dep_node_exists(&self, dep_node: &DepNode<K>) -> bool643     pub fn dep_node_exists(&self, dep_node: &DepNode<K>) -> bool {
644         self.dep_node_index_of_opt(dep_node).is_some()
645     }
646 
node_color(&self, dep_node: &DepNode<K>) -> Option<DepNodeColor>647     fn node_color(&self, dep_node: &DepNode<K>) -> Option<DepNodeColor> {
648         if let Some(prev_index) = self.previous.node_to_index_opt(dep_node) {
649             self.colors.get(prev_index)
650         } else {
651             // This is a node that did not exist in the previous compilation session.
652             None
653         }
654     }
655 
656     /// Returns true if the given node has been marked as green during the
657     /// current compilation session. Used in various assertions
658     #[inline]
is_index_green(&self, prev_index: SerializedDepNodeIndex) -> bool659     pub fn is_index_green(&self, prev_index: SerializedDepNodeIndex) -> bool {
660         self.colors.get(prev_index).is_some_and(|c| c.is_green())
661     }
662 
663     #[inline]
prev_fingerprint_of(&self, prev_index: SerializedDepNodeIndex) -> Fingerprint664     pub fn prev_fingerprint_of(&self, prev_index: SerializedDepNodeIndex) -> Fingerprint {
665         self.previous.fingerprint_by_index(prev_index)
666     }
667 
668     #[inline]
prev_node_of(&self, prev_index: SerializedDepNodeIndex) -> DepNode<K>669     pub fn prev_node_of(&self, prev_index: SerializedDepNodeIndex) -> DepNode<K> {
670         self.previous.index_to_node(prev_index)
671     }
672 
mark_debug_loaded_from_disk(&self, dep_node: DepNode<K>)673     pub fn mark_debug_loaded_from_disk(&self, dep_node: DepNode<K>) {
674         self.debug_loaded_from_disk.lock().insert(dep_node);
675     }
676 }
677 
678 impl<K: DepKind> DepGraph<K> {
679     #[inline]
dep_node_exists(&self, dep_node: &DepNode<K>) -> bool680     pub fn dep_node_exists(&self, dep_node: &DepNode<K>) -> bool {
681         self.data.as_ref().is_some_and(|data| data.dep_node_exists(dep_node))
682     }
683 
684     /// Checks whether a previous work product exists for `v` and, if
685     /// so, return the path that leads to it. Used to skip doing work.
previous_work_product(&self, v: &WorkProductId) -> Option<WorkProduct>686     pub fn previous_work_product(&self, v: &WorkProductId) -> Option<WorkProduct> {
687         self.data.as_ref().and_then(|data| data.previous_work_products.get(v).cloned())
688     }
689 
690     /// Access the map of work-products created during the cached run. Only
691     /// used during saving of the dep-graph.
previous_work_products(&self) -> &FxIndexMap<WorkProductId, WorkProduct>692     pub fn previous_work_products(&self) -> &FxIndexMap<WorkProductId, WorkProduct> {
693         &self.data.as_ref().unwrap().previous_work_products
694     }
695 
debug_was_loaded_from_disk(&self, dep_node: DepNode<K>) -> bool696     pub fn debug_was_loaded_from_disk(&self, dep_node: DepNode<K>) -> bool {
697         self.data.as_ref().unwrap().debug_loaded_from_disk.lock().contains(&dep_node)
698     }
699 
700     #[inline(always)]
register_dep_node_debug_str<F>(&self, dep_node: DepNode<K>, debug_str_gen: F) where F: FnOnce() -> String,701     pub fn register_dep_node_debug_str<F>(&self, dep_node: DepNode<K>, debug_str_gen: F)
702     where
703         F: FnOnce() -> String,
704     {
705         let dep_node_debug = &self.data.as_ref().unwrap().dep_node_debug;
706 
707         if dep_node_debug.borrow().contains_key(&dep_node) {
708             return;
709         }
710         let debug_str = self.with_ignore(debug_str_gen);
711         dep_node_debug.borrow_mut().insert(dep_node, debug_str);
712     }
713 
dep_node_debug_str(&self, dep_node: DepNode<K>) -> Option<String>714     pub fn dep_node_debug_str(&self, dep_node: DepNode<K>) -> Option<String> {
715         self.data.as_ref()?.dep_node_debug.borrow().get(&dep_node).cloned()
716     }
717 
node_color(&self, dep_node: &DepNode<K>) -> Option<DepNodeColor>718     fn node_color(&self, dep_node: &DepNode<K>) -> Option<DepNodeColor> {
719         if let Some(ref data) = self.data {
720             return data.node_color(dep_node);
721         }
722 
723         None
724     }
725 
try_mark_green<Qcx: QueryContext<DepKind = K>>( &self, qcx: Qcx, dep_node: &DepNode<K>, ) -> Option<(SerializedDepNodeIndex, DepNodeIndex)>726     pub fn try_mark_green<Qcx: QueryContext<DepKind = K>>(
727         &self,
728         qcx: Qcx,
729         dep_node: &DepNode<K>,
730     ) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> {
731         self.data().and_then(|data| data.try_mark_green(qcx, dep_node))
732     }
733 }
734 
735 impl<K: DepKind> DepGraphData<K> {
736     /// Try to mark a node index for the node dep_node.
737     ///
738     /// A node will have an index, when it's already been marked green, or when we can mark it
739     /// green. This function will mark the current task as a reader of the specified node, when
740     /// a node index can be found for that node.
try_mark_green<Qcx: QueryContext<DepKind = K>>( &self, qcx: Qcx, dep_node: &DepNode<K>, ) -> Option<(SerializedDepNodeIndex, DepNodeIndex)>741     pub fn try_mark_green<Qcx: QueryContext<DepKind = K>>(
742         &self,
743         qcx: Qcx,
744         dep_node: &DepNode<K>,
745     ) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> {
746         debug_assert!(!qcx.dep_context().is_eval_always(dep_node.kind));
747 
748         // Return None if the dep node didn't exist in the previous session
749         let prev_index = self.previous.node_to_index_opt(dep_node)?;
750 
751         match self.colors.get(prev_index) {
752             Some(DepNodeColor::Green(dep_node_index)) => Some((prev_index, dep_node_index)),
753             Some(DepNodeColor::Red) => None,
754             None => {
755                 // This DepNode and the corresponding query invocation existed
756                 // in the previous compilation session too, so we can try to
757                 // mark it as green by recursively marking all of its
758                 // dependencies green.
759                 self.try_mark_previous_green(qcx, prev_index, &dep_node, None)
760                     .map(|dep_node_index| (prev_index, dep_node_index))
761             }
762         }
763     }
764 
765     #[instrument(skip(self, qcx, parent_dep_node_index, frame), level = "debug")]
try_mark_parent_green<Qcx: QueryContext<DepKind = K>>( &self, qcx: Qcx, parent_dep_node_index: SerializedDepNodeIndex, dep_node: &DepNode<K>, frame: Option<&MarkFrame<'_>>, ) -> Option<()>766     fn try_mark_parent_green<Qcx: QueryContext<DepKind = K>>(
767         &self,
768         qcx: Qcx,
769         parent_dep_node_index: SerializedDepNodeIndex,
770         dep_node: &DepNode<K>,
771         frame: Option<&MarkFrame<'_>>,
772     ) -> Option<()> {
773         let dep_dep_node_color = self.colors.get(parent_dep_node_index);
774         let dep_dep_node = &self.previous.index_to_node(parent_dep_node_index);
775 
776         match dep_dep_node_color {
777             Some(DepNodeColor::Green(_)) => {
778                 // This dependency has been marked as green before, we are
779                 // still fine and can continue with checking the other
780                 // dependencies.
781                 debug!("dependency {dep_dep_node:?} was immediately green");
782                 return Some(());
783             }
784             Some(DepNodeColor::Red) => {
785                 // We found a dependency the value of which has changed
786                 // compared to the previous compilation session. We cannot
787                 // mark the DepNode as green and also don't need to bother
788                 // with checking any of the other dependencies.
789                 debug!("dependency {dep_dep_node:?} was immediately red");
790                 return None;
791             }
792             None => {}
793         }
794 
795         // We don't know the state of this dependency. If it isn't
796         // an eval_always node, let's try to mark it green recursively.
797         if !qcx.dep_context().is_eval_always(dep_dep_node.kind) {
798             debug!(
799                 "state of dependency {:?} ({}) is unknown, trying to mark it green",
800                 dep_dep_node, dep_dep_node.hash,
801             );
802 
803             let node_index =
804                 self.try_mark_previous_green(qcx, parent_dep_node_index, dep_dep_node, frame);
805 
806             if node_index.is_some() {
807                 debug!("managed to MARK dependency {dep_dep_node:?} as green",);
808                 return Some(());
809             }
810         }
811 
812         // We failed to mark it green, so we try to force the query.
813         debug!("trying to force dependency {dep_dep_node:?}");
814         if !qcx.dep_context().try_force_from_dep_node(*dep_dep_node, frame) {
815             // The DepNode could not be forced.
816             debug!("dependency {dep_dep_node:?} could not be forced");
817             return None;
818         }
819 
820         let dep_dep_node_color = self.colors.get(parent_dep_node_index);
821 
822         match dep_dep_node_color {
823             Some(DepNodeColor::Green(_)) => {
824                 debug!("managed to FORCE dependency {dep_dep_node:?} to green");
825                 return Some(());
826             }
827             Some(DepNodeColor::Red) => {
828                 debug!("dependency {dep_dep_node:?} was red after forcing",);
829                 return None;
830             }
831             None => {}
832         }
833 
834         if let None = qcx.dep_context().sess().has_errors_or_delayed_span_bugs() {
835             panic!("try_mark_previous_green() - Forcing the DepNode should have set its color")
836         }
837 
838         // If the query we just forced has resulted in
839         // some kind of compilation error, we cannot rely on
840         // the dep-node color having been properly updated.
841         // This means that the query system has reached an
842         // invalid state. We let the compiler continue (by
843         // returning `None`) so it can emit error messages
844         // and wind down, but rely on the fact that this
845         // invalid state will not be persisted to the
846         // incremental compilation cache because of
847         // compilation errors being present.
848         debug!("dependency {dep_dep_node:?} resulted in compilation error",);
849         return None;
850     }
851 
852     /// Try to mark a dep-node which existed in the previous compilation session as green.
853     #[instrument(skip(self, qcx, prev_dep_node_index, frame), level = "debug")]
try_mark_previous_green<Qcx: QueryContext<DepKind = K>>( &self, qcx: Qcx, prev_dep_node_index: SerializedDepNodeIndex, dep_node: &DepNode<K>, frame: Option<&MarkFrame<'_>>, ) -> Option<DepNodeIndex>854     fn try_mark_previous_green<Qcx: QueryContext<DepKind = K>>(
855         &self,
856         qcx: Qcx,
857         prev_dep_node_index: SerializedDepNodeIndex,
858         dep_node: &DepNode<K>,
859         frame: Option<&MarkFrame<'_>>,
860     ) -> Option<DepNodeIndex> {
861         let frame = MarkFrame { index: prev_dep_node_index, parent: frame };
862 
863         #[cfg(not(parallel_compiler))]
864         {
865             debug_assert!(!self.dep_node_exists(dep_node));
866             debug_assert!(self.colors.get(prev_dep_node_index).is_none());
867         }
868 
869         // We never try to mark eval_always nodes as green
870         debug_assert!(!qcx.dep_context().is_eval_always(dep_node.kind));
871 
872         debug_assert_eq!(self.previous.index_to_node(prev_dep_node_index), *dep_node);
873 
874         let prev_deps = self.previous.edge_targets_from(prev_dep_node_index);
875 
876         for &dep_dep_node_index in prev_deps {
877             self.try_mark_parent_green(qcx, dep_dep_node_index, dep_node, Some(&frame))?;
878         }
879 
880         // If we got here without hitting a `return` that means that all
881         // dependencies of this DepNode could be marked as green. Therefore we
882         // can also mark this DepNode as green.
883 
884         // There may be multiple threads trying to mark the same dep node green concurrently
885 
886         // We allocating an entry for the node in the current dependency graph and
887         // adding all the appropriate edges imported from the previous graph
888         let dep_node_index = self.current.promote_node_and_deps_to_current(
889             qcx.dep_context().profiler(),
890             &self.previous,
891             prev_dep_node_index,
892         );
893 
894         // ... emitting any stored diagnostic ...
895 
896         // FIXME: Store the fact that a node has diagnostics in a bit in the dep graph somewhere
897         // Maybe store a list on disk and encode this fact in the DepNodeState
898         let side_effects = qcx.load_side_effects(prev_dep_node_index);
899 
900         #[cfg(not(parallel_compiler))]
901         debug_assert!(
902             self.colors.get(prev_dep_node_index).is_none(),
903             "DepGraph::try_mark_previous_green() - Duplicate DepNodeColor \
904                       insertion for {dep_node:?}"
905         );
906 
907         if !side_effects.is_empty() {
908             qcx.dep_context().dep_graph().with_query_deserialization(|| {
909                 self.emit_side_effects(qcx, dep_node_index, side_effects)
910             });
911         }
912 
913         // ... and finally storing a "Green" entry in the color map.
914         // Multiple threads can all write the same color here
915         self.colors.insert(prev_dep_node_index, DepNodeColor::Green(dep_node_index));
916 
917         debug!("successfully marked {dep_node:?} as green");
918         Some(dep_node_index)
919     }
920 
921     /// Atomically emits some loaded diagnostics.
922     /// This may be called concurrently on multiple threads for the same dep node.
923     #[cold]
924     #[inline(never)]
emit_side_effects<Qcx: QueryContext<DepKind = K>>( &self, qcx: Qcx, dep_node_index: DepNodeIndex, side_effects: QuerySideEffects, )925     fn emit_side_effects<Qcx: QueryContext<DepKind = K>>(
926         &self,
927         qcx: Qcx,
928         dep_node_index: DepNodeIndex,
929         side_effects: QuerySideEffects,
930     ) {
931         let mut processed = self.processed_side_effects.lock();
932 
933         if processed.insert(dep_node_index) {
934             // We were the first to insert the node in the set so this thread
935             // must process side effects
936 
937             // Promote the previous diagnostics to the current session.
938             qcx.store_side_effects(dep_node_index, side_effects.clone());
939 
940             let handle = qcx.dep_context().sess().diagnostic();
941 
942             for mut diagnostic in side_effects.diagnostics {
943                 handle.emit_diagnostic(&mut diagnostic);
944             }
945         }
946     }
947 }
948 
949 impl<K: DepKind> DepGraph<K> {
950     /// Returns true if the given node has been marked as red during the
951     /// current compilation session. Used in various assertions
is_red(&self, dep_node: &DepNode<K>) -> bool952     pub fn is_red(&self, dep_node: &DepNode<K>) -> bool {
953         self.node_color(dep_node) == Some(DepNodeColor::Red)
954     }
955 
956     /// Returns true if the given node has been marked as green during the
957     /// current compilation session. Used in various assertions
is_green(&self, dep_node: &DepNode<K>) -> bool958     pub fn is_green(&self, dep_node: &DepNode<K>) -> bool {
959         self.node_color(dep_node).is_some_and(|c| c.is_green())
960     }
961 
962     /// This method loads all on-disk cacheable query results into memory, so
963     /// they can be written out to the new cache file again. Most query results
964     /// will already be in memory but in the case where we marked something as
965     /// green but then did not need the value, that value will never have been
966     /// loaded from disk.
967     ///
968     /// This method will only load queries that will end up in the disk cache.
969     /// Other queries will not be executed.
exec_cache_promotions<Tcx: DepContext<DepKind = K>>(&self, tcx: Tcx)970     pub fn exec_cache_promotions<Tcx: DepContext<DepKind = K>>(&self, tcx: Tcx) {
971         let _prof_timer = tcx.profiler().generic_activity("incr_comp_query_cache_promotion");
972 
973         let data = self.data.as_ref().unwrap();
974         for prev_index in data.colors.values.indices() {
975             match data.colors.get(prev_index) {
976                 Some(DepNodeColor::Green(_)) => {
977                     let dep_node = data.previous.index_to_node(prev_index);
978                     tcx.try_load_from_on_disk_cache(dep_node);
979                 }
980                 None | Some(DepNodeColor::Red) => {
981                     // We can skip red nodes because a node can only be marked
982                     // as red if the query result was recomputed and thus is
983                     // already in memory.
984                 }
985             }
986         }
987     }
988 
print_incremental_info(&self)989     pub fn print_incremental_info(&self) {
990         if let Some(data) = &self.data {
991             data.current.encoder.borrow().print_incremental_info(
992                 data.current.total_read_count.load(Relaxed),
993                 data.current.total_duplicate_read_count.load(Relaxed),
994             )
995         }
996     }
997 
encode(&self, profiler: &SelfProfilerRef) -> FileEncodeResult998     pub fn encode(&self, profiler: &SelfProfilerRef) -> FileEncodeResult {
999         if let Some(data) = &self.data {
1000             data.current.encoder.steal().finish(profiler)
1001         } else {
1002             Ok(0)
1003         }
1004     }
1005 
next_virtual_depnode_index(&self) -> DepNodeIndex1006     pub(crate) fn next_virtual_depnode_index(&self) -> DepNodeIndex {
1007         debug_assert!(self.data.is_none());
1008         let index = self.virtual_dep_node_index.fetch_add(1, Relaxed);
1009         DepNodeIndex::from_u32(index)
1010     }
1011 }
1012 
1013 /// A "work product" is an intermediate result that we save into the
1014 /// incremental directory for later re-use. The primary example are
1015 /// the object files that we save for each partition at code
1016 /// generation time.
1017 ///
1018 /// Each work product is associated with a dep-node, representing the
1019 /// process that produced the work-product. If that dep-node is found
1020 /// to be dirty when we load up, then we will delete the work-product
1021 /// at load time. If the work-product is found to be clean, then we
1022 /// will keep a record in the `previous_work_products` list.
1023 ///
1024 /// In addition, work products have an associated hash. This hash is
1025 /// an extra hash that can be used to decide if the work-product from
1026 /// a previous compilation can be re-used (in addition to the dirty
1027 /// edges check).
1028 ///
1029 /// As the primary example, consider the object files we generate for
1030 /// each partition. In the first run, we create partitions based on
1031 /// the symbols that need to be compiled. For each partition P, we
1032 /// hash the symbols in P and create a `WorkProduct` record associated
1033 /// with `DepNode::CodegenUnit(P)`; the hash is the set of symbols
1034 /// in P.
1035 ///
1036 /// The next time we compile, if the `DepNode::CodegenUnit(P)` is
1037 /// judged to be clean (which means none of the things we read to
1038 /// generate the partition were found to be dirty), it will be loaded
1039 /// into previous work products. We will then regenerate the set of
1040 /// symbols in the partition P and hash them (note that new symbols
1041 /// may be added -- for example, new monomorphizations -- even if
1042 /// nothing in P changed!). We will compare that hash against the
1043 /// previous hash. If it matches up, we can reuse the object file.
1044 #[derive(Clone, Debug, Encodable, Decodable)]
1045 pub struct WorkProduct {
1046     pub cgu_name: String,
1047     /// Saved files associated with this CGU. In each key/value pair, the value is the path to the
1048     /// saved file and the key is some identifier for the type of file being saved.
1049     ///
1050     /// By convention, file extensions are currently used as identifiers, i.e. the key "o" maps to
1051     /// the object file's path, and "dwo" to the dwarf object file's path.
1052     pub saved_files: UnordMap<String, String>,
1053 }
1054 
1055 // Index type for `DepNodeData`'s edges.
1056 rustc_index::newtype_index! {
1057     struct EdgeIndex {}
1058 }
1059 
1060 /// `CurrentDepGraph` stores the dependency graph for the current session. It
1061 /// will be populated as we run queries or tasks. We never remove nodes from the
1062 /// graph: they are only added.
1063 ///
1064 /// The nodes in it are identified by a `DepNodeIndex`. We avoid keeping the nodes
1065 /// in memory. This is important, because these graph structures are some of the
1066 /// largest in the compiler.
1067 ///
1068 /// For this reason, we avoid storing `DepNode`s more than once as map
1069 /// keys. The `new_node_to_index` map only contains nodes not in the previous
1070 /// graph, and we map nodes in the previous graph to indices via a two-step
1071 /// mapping. `SerializedDepGraph` maps from `DepNode` to `SerializedDepNodeIndex`,
1072 /// and the `prev_index_to_index` vector (which is more compact and faster than
1073 /// using a map) maps from `SerializedDepNodeIndex` to `DepNodeIndex`.
1074 ///
1075 /// This struct uses three locks internally. The `data`, `new_node_to_index`,
1076 /// and `prev_index_to_index` fields are locked separately. Operations that take
1077 /// a `DepNodeIndex` typically just access the `data` field.
1078 ///
1079 /// We only need to manipulate at most two locks simultaneously:
1080 /// `new_node_to_index` and `data`, or `prev_index_to_index` and `data`. When
1081 /// manipulating both, we acquire `new_node_to_index` or `prev_index_to_index`
1082 /// first, and `data` second.
1083 pub(super) struct CurrentDepGraph<K: DepKind> {
1084     encoder: Steal<GraphEncoder<K>>,
1085     new_node_to_index: Sharded<FxHashMap<DepNode<K>, DepNodeIndex>>,
1086     prev_index_to_index: Lock<IndexVec<SerializedDepNodeIndex, Option<DepNodeIndex>>>,
1087 
1088     /// This is used to verify that fingerprints do not change between the creation of a node
1089     /// and its recomputation.
1090     #[cfg(debug_assertions)]
1091     fingerprints: Lock<IndexVec<DepNodeIndex, Option<Fingerprint>>>,
1092 
1093     /// Used to trap when a specific edge is added to the graph.
1094     /// This is used for debug purposes and is only active with `debug_assertions`.
1095     #[cfg(debug_assertions)]
1096     forbidden_edge: Option<EdgeFilter<K>>,
1097 
1098     /// Anonymous `DepNode`s are nodes whose IDs we compute from the list of
1099     /// their edges. This has the beneficial side-effect that multiple anonymous
1100     /// nodes can be coalesced into one without changing the semantics of the
1101     /// dependency graph. However, the merging of nodes can lead to a subtle
1102     /// problem during red-green marking: The color of an anonymous node from
1103     /// the current session might "shadow" the color of the node with the same
1104     /// ID from the previous session. In order to side-step this problem, we make
1105     /// sure that anonymous `NodeId`s allocated in different sessions don't overlap.
1106     /// This is implemented by mixing a session-key into the ID fingerprint of
1107     /// each anon node. The session-key is just a random number generated when
1108     /// the `DepGraph` is created.
1109     anon_id_seed: Fingerprint,
1110 
1111     /// These are simple counters that are for profiling and
1112     /// debugging and only active with `debug_assertions`.
1113     total_read_count: AtomicU64,
1114     total_duplicate_read_count: AtomicU64,
1115 
1116     /// The cached event id for profiling node interning. This saves us
1117     /// from having to look up the event id every time we intern a node
1118     /// which may incur too much overhead.
1119     /// This will be None if self-profiling is disabled.
1120     node_intern_event_id: Option<EventId>,
1121 }
1122 
1123 impl<K: DepKind> CurrentDepGraph<K> {
new( profiler: &SelfProfilerRef, prev_graph_node_count: usize, encoder: FileEncoder, record_graph: bool, record_stats: bool, ) -> CurrentDepGraph<K>1124     fn new(
1125         profiler: &SelfProfilerRef,
1126         prev_graph_node_count: usize,
1127         encoder: FileEncoder,
1128         record_graph: bool,
1129         record_stats: bool,
1130     ) -> CurrentDepGraph<K> {
1131         use std::time::{SystemTime, UNIX_EPOCH};
1132 
1133         let duration = SystemTime::now().duration_since(UNIX_EPOCH).unwrap();
1134         let nanos = duration.as_secs() * 1_000_000_000 + duration.subsec_nanos() as u64;
1135         let mut stable_hasher = StableHasher::new();
1136         nanos.hash(&mut stable_hasher);
1137         let anon_id_seed = stable_hasher.finish();
1138 
1139         #[cfg(debug_assertions)]
1140         let forbidden_edge = match env::var("RUST_FORBID_DEP_GRAPH_EDGE") {
1141             Ok(s) => match EdgeFilter::new(&s) {
1142                 Ok(f) => Some(f),
1143                 Err(err) => panic!("RUST_FORBID_DEP_GRAPH_EDGE invalid: {}", err),
1144             },
1145             Err(_) => None,
1146         };
1147 
1148         // We store a large collection of these in `prev_index_to_index` during
1149         // non-full incremental builds, and want to ensure that the element size
1150         // doesn't inadvertently increase.
1151         static_assert_size!(Option<DepNodeIndex>, 4);
1152 
1153         let new_node_count_estimate = 102 * prev_graph_node_count / 100 + 200;
1154 
1155         let node_intern_event_id = profiler
1156             .get_or_alloc_cached_string("incr_comp_intern_dep_graph_node")
1157             .map(EventId::from_label);
1158 
1159         CurrentDepGraph {
1160             encoder: Steal::new(GraphEncoder::new(
1161                 encoder,
1162                 prev_graph_node_count,
1163                 record_graph,
1164                 record_stats,
1165             )),
1166             new_node_to_index: Sharded::new(|| {
1167                 FxHashMap::with_capacity_and_hasher(
1168                     new_node_count_estimate / sharded::SHARDS,
1169                     Default::default(),
1170                 )
1171             }),
1172             prev_index_to_index: Lock::new(IndexVec::from_elem_n(None, prev_graph_node_count)),
1173             anon_id_seed,
1174             #[cfg(debug_assertions)]
1175             forbidden_edge,
1176             #[cfg(debug_assertions)]
1177             fingerprints: Lock::new(IndexVec::from_elem_n(None, new_node_count_estimate)),
1178             total_read_count: AtomicU64::new(0),
1179             total_duplicate_read_count: AtomicU64::new(0),
1180             node_intern_event_id,
1181         }
1182     }
1183 
1184     #[cfg(debug_assertions)]
record_edge(&self, dep_node_index: DepNodeIndex, key: DepNode<K>, fingerprint: Fingerprint)1185     fn record_edge(&self, dep_node_index: DepNodeIndex, key: DepNode<K>, fingerprint: Fingerprint) {
1186         if let Some(forbidden_edge) = &self.forbidden_edge {
1187             forbidden_edge.index_to_node.lock().insert(dep_node_index, key);
1188         }
1189         let previous = *self.fingerprints.lock().get_or_insert_with(dep_node_index, || fingerprint);
1190         assert_eq!(previous, fingerprint, "Unstable fingerprints for {:?}", key);
1191     }
1192 
1193     /// Writes the node to the current dep-graph and allocates a `DepNodeIndex` for it.
1194     /// Assumes that this is a node that has no equivalent in the previous dep-graph.
1195     #[inline(always)]
intern_new_node( &self, profiler: &SelfProfilerRef, key: DepNode<K>, edges: EdgesVec, current_fingerprint: Fingerprint, ) -> DepNodeIndex1196     fn intern_new_node(
1197         &self,
1198         profiler: &SelfProfilerRef,
1199         key: DepNode<K>,
1200         edges: EdgesVec,
1201         current_fingerprint: Fingerprint,
1202     ) -> DepNodeIndex {
1203         let dep_node_index = match self.new_node_to_index.get_shard_by_value(&key).lock().entry(key)
1204         {
1205             Entry::Occupied(entry) => *entry.get(),
1206             Entry::Vacant(entry) => {
1207                 let dep_node_index =
1208                     self.encoder.borrow().send(profiler, key, current_fingerprint, edges);
1209                 entry.insert(dep_node_index);
1210                 dep_node_index
1211             }
1212         };
1213 
1214         #[cfg(debug_assertions)]
1215         self.record_edge(dep_node_index, key, current_fingerprint);
1216 
1217         dep_node_index
1218     }
1219 
intern_node( &self, profiler: &SelfProfilerRef, prev_graph: &SerializedDepGraph<K>, key: DepNode<K>, edges: EdgesVec, fingerprint: Option<Fingerprint>, print_status: bool, ) -> (DepNodeIndex, Option<(SerializedDepNodeIndex, DepNodeColor)>)1220     fn intern_node(
1221         &self,
1222         profiler: &SelfProfilerRef,
1223         prev_graph: &SerializedDepGraph<K>,
1224         key: DepNode<K>,
1225         edges: EdgesVec,
1226         fingerprint: Option<Fingerprint>,
1227         print_status: bool,
1228     ) -> (DepNodeIndex, Option<(SerializedDepNodeIndex, DepNodeColor)>) {
1229         let print_status = cfg!(debug_assertions) && print_status;
1230 
1231         // Get timer for profiling `DepNode` interning
1232         let _node_intern_timer =
1233             self.node_intern_event_id.map(|eid| profiler.generic_activity_with_event_id(eid));
1234 
1235         if let Some(prev_index) = prev_graph.node_to_index_opt(&key) {
1236             let get_dep_node_index = |color, fingerprint| {
1237                 if print_status {
1238                     eprintln!("[task::{color:}] {key:?}");
1239                 }
1240 
1241                 let mut prev_index_to_index = self.prev_index_to_index.lock();
1242 
1243                 let dep_node_index = match prev_index_to_index[prev_index] {
1244                     Some(dep_node_index) => dep_node_index,
1245                     None => {
1246                         let dep_node_index =
1247                             self.encoder.borrow().send(profiler, key, fingerprint, edges);
1248                         prev_index_to_index[prev_index] = Some(dep_node_index);
1249                         dep_node_index
1250                     }
1251                 };
1252 
1253                 #[cfg(debug_assertions)]
1254                 self.record_edge(dep_node_index, key, fingerprint);
1255 
1256                 dep_node_index
1257             };
1258 
1259             // Determine the color and index of the new `DepNode`.
1260             if let Some(fingerprint) = fingerprint {
1261                 if fingerprint == prev_graph.fingerprint_by_index(prev_index) {
1262                     // This is a green node: it existed in the previous compilation,
1263                     // its query was re-executed, and it has the same result as before.
1264                     let dep_node_index = get_dep_node_index("green", fingerprint);
1265                     (dep_node_index, Some((prev_index, DepNodeColor::Green(dep_node_index))))
1266                 } else {
1267                     // This is a red node: it existed in the previous compilation, its query
1268                     // was re-executed, but it has a different result from before.
1269                     let dep_node_index = get_dep_node_index("red", fingerprint);
1270                     (dep_node_index, Some((prev_index, DepNodeColor::Red)))
1271                 }
1272             } else {
1273                 // This is a red node, effectively: it existed in the previous compilation
1274                 // session, its query was re-executed, but it doesn't compute a result hash
1275                 // (i.e. it represents a `no_hash` query), so we have no way of determining
1276                 // whether or not the result was the same as before.
1277                 let dep_node_index = get_dep_node_index("unknown", Fingerprint::ZERO);
1278                 (dep_node_index, Some((prev_index, DepNodeColor::Red)))
1279             }
1280         } else {
1281             if print_status {
1282                 eprintln!("[task::new] {key:?}");
1283             }
1284 
1285             let fingerprint = fingerprint.unwrap_or(Fingerprint::ZERO);
1286 
1287             // This is a new node: it didn't exist in the previous compilation session.
1288             let dep_node_index = self.intern_new_node(profiler, key, edges, fingerprint);
1289 
1290             (dep_node_index, None)
1291         }
1292     }
1293 
promote_node_and_deps_to_current( &self, profiler: &SelfProfilerRef, prev_graph: &SerializedDepGraph<K>, prev_index: SerializedDepNodeIndex, ) -> DepNodeIndex1294     fn promote_node_and_deps_to_current(
1295         &self,
1296         profiler: &SelfProfilerRef,
1297         prev_graph: &SerializedDepGraph<K>,
1298         prev_index: SerializedDepNodeIndex,
1299     ) -> DepNodeIndex {
1300         self.debug_assert_not_in_new_nodes(prev_graph, prev_index);
1301 
1302         let mut prev_index_to_index = self.prev_index_to_index.lock();
1303 
1304         match prev_index_to_index[prev_index] {
1305             Some(dep_node_index) => dep_node_index,
1306             None => {
1307                 let key = prev_graph.index_to_node(prev_index);
1308                 let edges = prev_graph
1309                     .edge_targets_from(prev_index)
1310                     .iter()
1311                     .map(|i| prev_index_to_index[*i].unwrap())
1312                     .collect();
1313                 let fingerprint = prev_graph.fingerprint_by_index(prev_index);
1314                 let dep_node_index = self.encoder.borrow().send(profiler, key, fingerprint, edges);
1315                 prev_index_to_index[prev_index] = Some(dep_node_index);
1316                 #[cfg(debug_assertions)]
1317                 self.record_edge(dep_node_index, key, fingerprint);
1318                 dep_node_index
1319             }
1320         }
1321     }
1322 
1323     #[inline]
debug_assert_not_in_new_nodes( &self, prev_graph: &SerializedDepGraph<K>, prev_index: SerializedDepNodeIndex, )1324     fn debug_assert_not_in_new_nodes(
1325         &self,
1326         prev_graph: &SerializedDepGraph<K>,
1327         prev_index: SerializedDepNodeIndex,
1328     ) {
1329         let node = &prev_graph.index_to_node(prev_index);
1330         debug_assert!(
1331             !self.new_node_to_index.get_shard_by_value(node).lock().contains_key(node),
1332             "node from previous graph present in new node collection"
1333         );
1334     }
1335 }
1336 
1337 /// The capacity of the `reads` field `SmallVec`
1338 const TASK_DEPS_READS_CAP: usize = 8;
1339 type EdgesVec = SmallVec<[DepNodeIndex; TASK_DEPS_READS_CAP]>;
1340 
1341 #[derive(Debug, Clone, Copy)]
1342 pub enum TaskDepsRef<'a, K: DepKind> {
1343     /// New dependencies can be added to the
1344     /// `TaskDeps`. This is used when executing a 'normal' query
1345     /// (no `eval_always` modifier)
1346     Allow(&'a Lock<TaskDeps<K>>),
1347     /// This is used when executing an `eval_always` query. We don't
1348     /// need to track dependencies for a query that's always
1349     /// re-executed -- but we need to know that this is an `eval_always`
1350     /// query in order to emit dependencies to `DepNodeIndex::FOREVER_RED_NODE`
1351     /// when directly feeding other queries.
1352     EvalAlways,
1353     /// New dependencies are ignored. This is also used for `dep_graph.with_ignore`.
1354     Ignore,
1355     /// Any attempt to add new dependencies will cause a panic.
1356     /// This is used when decoding a query result from disk,
1357     /// to ensure that the decoding process doesn't itself
1358     /// require the execution of any queries.
1359     Forbid,
1360 }
1361 
1362 #[derive(Debug)]
1363 pub struct TaskDeps<K: DepKind> {
1364     #[cfg(debug_assertions)]
1365     node: Option<DepNode<K>>,
1366     reads: EdgesVec,
1367     read_set: FxHashSet<DepNodeIndex>,
1368     phantom_data: PhantomData<DepNode<K>>,
1369 }
1370 
1371 impl<K: DepKind> Default for TaskDeps<K> {
default() -> Self1372     fn default() -> Self {
1373         Self {
1374             #[cfg(debug_assertions)]
1375             node: None,
1376             reads: EdgesVec::new(),
1377             read_set: FxHashSet::default(),
1378             phantom_data: PhantomData,
1379         }
1380     }
1381 }
1382 
1383 // A data structure that stores Option<DepNodeColor> values as a contiguous
1384 // array, using one u32 per entry.
1385 struct DepNodeColorMap {
1386     values: IndexVec<SerializedDepNodeIndex, AtomicU32>,
1387 }
1388 
1389 const COMPRESSED_NONE: u32 = 0;
1390 const COMPRESSED_RED: u32 = 1;
1391 const COMPRESSED_FIRST_GREEN: u32 = 2;
1392 
1393 impl DepNodeColorMap {
new(size: usize) -> DepNodeColorMap1394     fn new(size: usize) -> DepNodeColorMap {
1395         DepNodeColorMap { values: (0..size).map(|_| AtomicU32::new(COMPRESSED_NONE)).collect() }
1396     }
1397 
1398     #[inline]
get(&self, index: SerializedDepNodeIndex) -> Option<DepNodeColor>1399     fn get(&self, index: SerializedDepNodeIndex) -> Option<DepNodeColor> {
1400         match self.values[index].load(Ordering::Acquire) {
1401             COMPRESSED_NONE => None,
1402             COMPRESSED_RED => Some(DepNodeColor::Red),
1403             value => {
1404                 Some(DepNodeColor::Green(DepNodeIndex::from_u32(value - COMPRESSED_FIRST_GREEN)))
1405             }
1406         }
1407     }
1408 
1409     #[inline]
insert(&self, index: SerializedDepNodeIndex, color: DepNodeColor)1410     fn insert(&self, index: SerializedDepNodeIndex, color: DepNodeColor) {
1411         self.values[index].store(
1412             match color {
1413                 DepNodeColor::Red => COMPRESSED_RED,
1414                 DepNodeColor::Green(index) => index.as_u32() + COMPRESSED_FIRST_GREEN,
1415             },
1416             Ordering::Release,
1417         )
1418     }
1419 }
1420 
1421 #[inline(never)]
1422 #[cold]
print_markframe_trace<K: DepKind>( graph: &DepGraph<K>, frame: Option<&MarkFrame<'_>>, )1423 pub(crate) fn print_markframe_trace<K: DepKind>(
1424     graph: &DepGraph<K>,
1425     frame: Option<&MarkFrame<'_>>,
1426 ) {
1427     let data = graph.data.as_ref().unwrap();
1428 
1429     eprintln!("there was a panic while trying to force a dep node");
1430     eprintln!("try_mark_green dep node stack:");
1431 
1432     let mut i = 0;
1433     let mut current = frame;
1434     while let Some(frame) = current {
1435         let node = data.previous.index_to_node(frame.index);
1436         eprintln!("#{i} {:?}", node);
1437         current = frame.parent;
1438         i += 1;
1439     }
1440 
1441     eprintln!("end of try_mark_green dep node stack");
1442 }
1443