• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //! The implementation of the query system itself. This defines the macros that
2 //! generate the actual methods on tcx which find and execute the provider,
3 //! manage the caches, and so forth.
4 
5 use crate::dep_graph::{DepContext, DepKind, DepNode, DepNodeIndex, DepNodeParams};
6 use crate::dep_graph::{DepGraphData, HasDepContext};
7 use crate::ich::StableHashingContext;
8 use crate::query::caches::QueryCache;
9 #[cfg(parallel_compiler)]
10 use crate::query::job::QueryLatch;
11 use crate::query::job::{report_cycle, QueryInfo, QueryJob, QueryJobId, QueryJobInfo};
12 use crate::query::SerializedDepNodeIndex;
13 use crate::query::{QueryContext, QueryMap, QuerySideEffects, QueryStackFrame};
14 use crate::HandleCycleError;
15 use rustc_data_structures::fingerprint::Fingerprint;
16 use rustc_data_structures::fx::FxHashMap;
17 use rustc_data_structures::stack::ensure_sufficient_stack;
18 use rustc_data_structures::sync::Lock;
19 #[cfg(parallel_compiler)]
20 use rustc_data_structures::{cold_path, sharded::Sharded};
21 use rustc_errors::{DiagnosticBuilder, ErrorGuaranteed, FatalError};
22 use rustc_span::{Span, DUMMY_SP};
23 use std::cell::Cell;
24 use std::collections::hash_map::Entry;
25 use std::fmt::Debug;
26 use std::hash::Hash;
27 use std::mem;
28 use thin_vec::ThinVec;
29 
30 use super::QueryConfig;
31 
32 pub struct QueryState<K, D: DepKind> {
33     #[cfg(parallel_compiler)]
34     active: Sharded<FxHashMap<K, QueryResult<D>>>,
35     #[cfg(not(parallel_compiler))]
36     active: Lock<FxHashMap<K, QueryResult<D>>>,
37 }
38 
39 /// Indicates the state of a query for a given key in a query map.
40 enum QueryResult<D: DepKind> {
41     /// An already executing query. The query job can be used to await for its completion.
42     Started(QueryJob<D>),
43 
44     /// The query panicked. Queries trying to wait on this will raise a fatal error which will
45     /// silently panic.
46     Poisoned,
47 }
48 
49 impl<K, D> QueryState<K, D>
50 where
51     K: Eq + Hash + Copy + Debug,
52     D: DepKind,
53 {
all_inactive(&self) -> bool54     pub fn all_inactive(&self) -> bool {
55         #[cfg(parallel_compiler)]
56         {
57             let shards = self.active.lock_shards();
58             shards.iter().all(|shard| shard.is_empty())
59         }
60         #[cfg(not(parallel_compiler))]
61         {
62             self.active.lock().is_empty()
63         }
64     }
65 
try_collect_active_jobs<Qcx: Copy>( &self, qcx: Qcx, make_query: fn(Qcx, K) -> QueryStackFrame<D>, jobs: &mut QueryMap<D>, ) -> Option<()>66     pub fn try_collect_active_jobs<Qcx: Copy>(
67         &self,
68         qcx: Qcx,
69         make_query: fn(Qcx, K) -> QueryStackFrame<D>,
70         jobs: &mut QueryMap<D>,
71     ) -> Option<()> {
72         let mut active = Vec::new();
73 
74         #[cfg(parallel_compiler)]
75         {
76             // We use try_lock_shards here since we are called from the
77             // deadlock handler, and this shouldn't be locked.
78             let shards = self.active.try_lock_shards()?;
79             for shard in shards.iter() {
80                 for (k, v) in shard.iter() {
81                     if let QueryResult::Started(ref job) = *v {
82                         active.push((*k, job.clone()));
83                     }
84                 }
85             }
86         }
87         #[cfg(not(parallel_compiler))]
88         {
89             // We use try_lock here since we are called from the
90             // deadlock handler, and this shouldn't be locked.
91             // (FIXME: Is this relevant for non-parallel compilers? It doesn't
92             // really hurt much.)
93             for (k, v) in self.active.try_lock()?.iter() {
94                 if let QueryResult::Started(ref job) = *v {
95                     active.push((*k, job.clone()));
96                 }
97             }
98         }
99 
100         // Call `make_query` while we're not holding a `self.active` lock as `make_query` may call
101         // queries leading to a deadlock.
102         for (key, job) in active {
103             let query = make_query(qcx, key);
104             jobs.insert(job.id, QueryJobInfo { query, job });
105         }
106 
107         Some(())
108     }
109 }
110 
111 impl<K, D: DepKind> Default for QueryState<K, D> {
default() -> QueryState<K, D>112     fn default() -> QueryState<K, D> {
113         QueryState { active: Default::default() }
114     }
115 }
116 
117 /// A type representing the responsibility to execute the job in the `job` field.
118 /// This will poison the relevant query if dropped.
119 struct JobOwner<'tcx, K, D: DepKind>
120 where
121     K: Eq + Hash + Copy,
122 {
123     state: &'tcx QueryState<K, D>,
124     key: K,
125 }
126 
127 #[cold]
128 #[inline(never)]
mk_cycle<Q, Qcx>(query: Q, qcx: Qcx, cycle_error: CycleError<Qcx::DepKind>) -> Q::Value where Q: QueryConfig<Qcx>, Qcx: QueryContext,129 fn mk_cycle<Q, Qcx>(query: Q, qcx: Qcx, cycle_error: CycleError<Qcx::DepKind>) -> Q::Value
130 where
131     Q: QueryConfig<Qcx>,
132     Qcx: QueryContext,
133 {
134     let error = report_cycle(qcx.dep_context().sess(), &cycle_error);
135     handle_cycle_error(query, qcx, &cycle_error, error)
136 }
137 
handle_cycle_error<Q, Qcx>( query: Q, qcx: Qcx, cycle_error: &CycleError<Qcx::DepKind>, mut error: DiagnosticBuilder<'_, ErrorGuaranteed>, ) -> Q::Value where Q: QueryConfig<Qcx>, Qcx: QueryContext,138 fn handle_cycle_error<Q, Qcx>(
139     query: Q,
140     qcx: Qcx,
141     cycle_error: &CycleError<Qcx::DepKind>,
142     mut error: DiagnosticBuilder<'_, ErrorGuaranteed>,
143 ) -> Q::Value
144 where
145     Q: QueryConfig<Qcx>,
146     Qcx: QueryContext,
147 {
148     use HandleCycleError::*;
149     match query.handle_cycle_error() {
150         Error => {
151             error.emit();
152             query.value_from_cycle_error(*qcx.dep_context(), &cycle_error.cycle)
153         }
154         Fatal => {
155             error.emit();
156             qcx.dep_context().sess().abort_if_errors();
157             unreachable!()
158         }
159         DelayBug => {
160             error.delay_as_bug();
161             query.value_from_cycle_error(*qcx.dep_context(), &cycle_error.cycle)
162         }
163     }
164 }
165 
166 impl<'tcx, K, D: DepKind> JobOwner<'tcx, K, D>
167 where
168     K: Eq + Hash + Copy,
169 {
170     /// Completes the query by updating the query cache with the `result`,
171     /// signals the waiter and forgets the JobOwner, so it won't poison the query
complete<C>(self, cache: &C, result: C::Value, dep_node_index: DepNodeIndex) where C: QueryCache<Key = K>,172     fn complete<C>(self, cache: &C, result: C::Value, dep_node_index: DepNodeIndex)
173     where
174         C: QueryCache<Key = K>,
175     {
176         let key = self.key;
177         let state = self.state;
178 
179         // Forget ourself so our destructor won't poison the query
180         mem::forget(self);
181 
182         // Mark as complete before we remove the job from the active state
183         // so no other thread can re-execute this query.
184         cache.complete(key, result, dep_node_index);
185 
186         let job = {
187             #[cfg(parallel_compiler)]
188             let mut lock = state.active.get_shard_by_value(&key).lock();
189             #[cfg(not(parallel_compiler))]
190             let mut lock = state.active.lock();
191             match lock.remove(&key).unwrap() {
192                 QueryResult::Started(job) => job,
193                 QueryResult::Poisoned => panic!(),
194             }
195         };
196 
197         job.signal_complete();
198     }
199 }
200 
201 impl<'tcx, K, D> Drop for JobOwner<'tcx, K, D>
202 where
203     K: Eq + Hash + Copy,
204     D: DepKind,
205 {
206     #[inline(never)]
207     #[cold]
drop(&mut self)208     fn drop(&mut self) {
209         // Poison the query so jobs waiting on it panic.
210         let state = self.state;
211         let job = {
212             #[cfg(parallel_compiler)]
213             let mut shard = state.active.get_shard_by_value(&self.key).lock();
214             #[cfg(not(parallel_compiler))]
215             let mut shard = state.active.lock();
216             let job = match shard.remove(&self.key).unwrap() {
217                 QueryResult::Started(job) => job,
218                 QueryResult::Poisoned => panic!(),
219             };
220             shard.insert(self.key, QueryResult::Poisoned);
221             job
222         };
223         // Also signal the completion of the job, so waiters
224         // will continue execution.
225         job.signal_complete();
226     }
227 }
228 
229 #[derive(Clone)]
230 pub(crate) struct CycleError<D: DepKind> {
231     /// The query and related span that uses the cycle.
232     pub usage: Option<(Span, QueryStackFrame<D>)>,
233     pub cycle: Vec<QueryInfo<D>>,
234 }
235 
236 /// Checks if the query is already computed and in the cache.
237 /// It returns the shard index and a lock guard to the shard,
238 /// which will be used if the query is not in the cache and we need
239 /// to compute it.
240 #[inline(always)]
try_get_cached<Tcx, C>(tcx: Tcx, cache: &C, key: &C::Key) -> Option<C::Value> where C: QueryCache, Tcx: DepContext,241 pub fn try_get_cached<Tcx, C>(tcx: Tcx, cache: &C, key: &C::Key) -> Option<C::Value>
242 where
243     C: QueryCache,
244     Tcx: DepContext,
245 {
246     match cache.lookup(&key) {
247         Some((value, index)) => {
248             tcx.profiler().query_cache_hit(index.into());
249             tcx.dep_graph().read_index(index);
250             Some(value)
251         }
252         None => None,
253     }
254 }
255 
256 #[cold]
257 #[inline(never)]
258 #[cfg(not(parallel_compiler))]
cycle_error<Q, Qcx>( query: Q, qcx: Qcx, try_execute: QueryJobId, span: Span, ) -> (Q::Value, Option<DepNodeIndex>) where Q: QueryConfig<Qcx>, Qcx: QueryContext,259 fn cycle_error<Q, Qcx>(
260     query: Q,
261     qcx: Qcx,
262     try_execute: QueryJobId,
263     span: Span,
264 ) -> (Q::Value, Option<DepNodeIndex>)
265 where
266     Q: QueryConfig<Qcx>,
267     Qcx: QueryContext,
268 {
269     let error = try_execute.find_cycle_in_stack(
270         qcx.try_collect_active_jobs().unwrap(),
271         &qcx.current_query_job(),
272         span,
273     );
274     (mk_cycle(query, qcx, error), None)
275 }
276 
277 #[inline(always)]
278 #[cfg(parallel_compiler)]
wait_for_query<Q, Qcx>( query: Q, qcx: Qcx, span: Span, key: Q::Key, latch: QueryLatch<Qcx::DepKind>, current: Option<QueryJobId>, ) -> (Q::Value, Option<DepNodeIndex>) where Q: QueryConfig<Qcx>, Qcx: QueryContext,279 fn wait_for_query<Q, Qcx>(
280     query: Q,
281     qcx: Qcx,
282     span: Span,
283     key: Q::Key,
284     latch: QueryLatch<Qcx::DepKind>,
285     current: Option<QueryJobId>,
286 ) -> (Q::Value, Option<DepNodeIndex>)
287 where
288     Q: QueryConfig<Qcx>,
289     Qcx: QueryContext,
290 {
291     // For parallel queries, we'll block and wait until the query running
292     // in another thread has completed. Record how long we wait in the
293     // self-profiler.
294     let query_blocked_prof_timer = qcx.dep_context().profiler().query_blocked();
295 
296     // With parallel queries we might just have to wait on some other
297     // thread.
298     let result = latch.wait_on(current, span);
299 
300     match result {
301         Ok(()) => {
302             let Some((v, index)) = query.query_cache(qcx).lookup(&key) else {
303                 cold_path(|| panic!("value must be in cache after waiting"))
304             };
305 
306             qcx.dep_context().profiler().query_cache_hit(index.into());
307             query_blocked_prof_timer.finish_with_query_invocation_id(index.into());
308 
309             (v, Some(index))
310         }
311         Err(cycle) => (mk_cycle(query, qcx, cycle), None),
312     }
313 }
314 
315 #[inline(never)]
try_execute_query<Q, Qcx, const INCR: bool>( query: Q, qcx: Qcx, span: Span, key: Q::Key, dep_node: Option<DepNode<Qcx::DepKind>>, ) -> (Q::Value, Option<DepNodeIndex>) where Q: QueryConfig<Qcx>, Qcx: QueryContext,316 fn try_execute_query<Q, Qcx, const INCR: bool>(
317     query: Q,
318     qcx: Qcx,
319     span: Span,
320     key: Q::Key,
321     dep_node: Option<DepNode<Qcx::DepKind>>,
322 ) -> (Q::Value, Option<DepNodeIndex>)
323 where
324     Q: QueryConfig<Qcx>,
325     Qcx: QueryContext,
326 {
327     let state = query.query_state(qcx);
328     #[cfg(parallel_compiler)]
329     let mut state_lock = state.active.get_shard_by_value(&key).lock();
330     #[cfg(not(parallel_compiler))]
331     let mut state_lock = state.active.lock();
332 
333     // For the parallel compiler we need to check both the query cache and query state structures
334     // while holding the state lock to ensure that 1) the query has not yet completed and 2) the
335     // query is not still executing. Without checking the query cache here, we can end up
336     // re-executing the query since `try_start` only checks that the query is not currently
337     // executing, but another thread may have already completed the query and stores it result
338     // in the query cache.
339     if cfg!(parallel_compiler) && qcx.dep_context().sess().threads() > 1 {
340         if let Some((value, index)) = query.query_cache(qcx).lookup(&key) {
341             qcx.dep_context().profiler().query_cache_hit(index.into());
342             return (value, Some(index));
343         }
344     }
345 
346     let current_job_id = qcx.current_query_job();
347 
348     match state_lock.entry(key) {
349         Entry::Vacant(entry) => {
350             // Nothing has computed or is computing the query, so we start a new job and insert it in the
351             // state map.
352             let id = qcx.next_job_id();
353             let job = QueryJob::new(id, span, current_job_id);
354             entry.insert(QueryResult::Started(job));
355 
356             // Drop the lock before we start executing the query
357             drop(state_lock);
358 
359             execute_job::<_, _, INCR>(query, qcx, state, key, id, dep_node)
360         }
361         Entry::Occupied(mut entry) => {
362             match entry.get_mut() {
363                 #[cfg(not(parallel_compiler))]
364                 QueryResult::Started(job) => {
365                     let id = job.id;
366                     drop(state_lock);
367 
368                     // If we are single-threaded we know that we have cycle error,
369                     // so we just return the error.
370                     cycle_error(query, qcx, id, span)
371                 }
372                 #[cfg(parallel_compiler)]
373                 QueryResult::Started(job) => {
374                     // Get the latch out
375                     let latch = job.latch();
376                     drop(state_lock);
377 
378                     wait_for_query(query, qcx, span, key, latch, current_job_id)
379                 }
380                 QueryResult::Poisoned => FatalError.raise(),
381             }
382         }
383     }
384 }
385 
386 #[inline(always)]
execute_job<Q, Qcx, const INCR: bool>( query: Q, qcx: Qcx, state: &QueryState<Q::Key, Qcx::DepKind>, key: Q::Key, id: QueryJobId, dep_node: Option<DepNode<Qcx::DepKind>>, ) -> (Q::Value, Option<DepNodeIndex>) where Q: QueryConfig<Qcx>, Qcx: QueryContext,387 fn execute_job<Q, Qcx, const INCR: bool>(
388     query: Q,
389     qcx: Qcx,
390     state: &QueryState<Q::Key, Qcx::DepKind>,
391     key: Q::Key,
392     id: QueryJobId,
393     dep_node: Option<DepNode<Qcx::DepKind>>,
394 ) -> (Q::Value, Option<DepNodeIndex>)
395 where
396     Q: QueryConfig<Qcx>,
397     Qcx: QueryContext,
398 {
399     // Use `JobOwner` so the query will be poisoned if executing it panics.
400     let job_owner = JobOwner { state, key };
401 
402     debug_assert_eq!(qcx.dep_context().dep_graph().is_fully_enabled(), INCR);
403 
404     let (result, dep_node_index) = if INCR {
405         execute_job_incr(
406             query,
407             qcx,
408             qcx.dep_context().dep_graph().data().unwrap(),
409             key,
410             dep_node,
411             id,
412         )
413     } else {
414         execute_job_non_incr(query, qcx, key, id)
415     };
416 
417     let cache = query.query_cache(qcx);
418     if query.feedable() {
419         // We should not compute queries that also got a value via feeding.
420         // This can't happen, as query feeding adds the very dependencies to the fed query
421         // as its feeding query had. So if the fed query is red, so is its feeder, which will
422         // get evaluated first, and re-feed the query.
423         if let Some((cached_result, _)) = cache.lookup(&key) {
424             let Some(hasher) = query.hash_result() else {
425                 panic!(
426                     "no_hash fed query later has its value computed.\n\
427                     Remove `no_hash` modifier to allow recomputation.\n\
428                     The already cached value: {}",
429                     (query.format_value())(&cached_result)
430                 );
431             };
432 
433             let (old_hash, new_hash) = qcx.dep_context().with_stable_hashing_context(|mut hcx| {
434                 (hasher(&mut hcx, &cached_result), hasher(&mut hcx, &result))
435             });
436             let formatter = query.format_value();
437             if old_hash != new_hash {
438                 // We have an inconsistency. This can happen if one of the two
439                 // results is tainted by errors. In this case, delay a bug to
440                 // ensure compilation is doomed.
441                 qcx.dep_context().sess().delay_span_bug(
442                     DUMMY_SP,
443                     format!(
444                         "Computed query value for {:?}({:?}) is inconsistent with fed value,\n\
445                         computed={:#?}\nfed={:#?}",
446                         query.dep_kind(),
447                         key,
448                         formatter(&result),
449                         formatter(&cached_result),
450                     ),
451                 );
452             }
453         }
454     }
455     job_owner.complete(cache, result, dep_node_index);
456 
457     (result, Some(dep_node_index))
458 }
459 
460 // Fast path for when incr. comp. is off.
461 #[inline(always)]
execute_job_non_incr<Q, Qcx>( query: Q, qcx: Qcx, key: Q::Key, job_id: QueryJobId, ) -> (Q::Value, DepNodeIndex) where Q: QueryConfig<Qcx>, Qcx: QueryContext,462 fn execute_job_non_incr<Q, Qcx>(
463     query: Q,
464     qcx: Qcx,
465     key: Q::Key,
466     job_id: QueryJobId,
467 ) -> (Q::Value, DepNodeIndex)
468 where
469     Q: QueryConfig<Qcx>,
470     Qcx: QueryContext,
471 {
472     debug_assert!(!qcx.dep_context().dep_graph().is_fully_enabled());
473 
474     // Fingerprint the key, just to assert that it doesn't
475     // have anything we don't consider hashable
476     if cfg!(debug_assertions) {
477         let _ = key.to_fingerprint(*qcx.dep_context());
478     }
479 
480     let prof_timer = qcx.dep_context().profiler().query_provider();
481     let result = qcx.start_query(job_id, query.depth_limit(), None, || query.compute(qcx, key));
482     let dep_node_index = qcx.dep_context().dep_graph().next_virtual_depnode_index();
483     prof_timer.finish_with_query_invocation_id(dep_node_index.into());
484 
485     // Similarly, fingerprint the result to assert that
486     // it doesn't have anything not considered hashable.
487     if cfg!(debug_assertions) && let Some(hash_result) = query.hash_result() {
488         qcx.dep_context().with_stable_hashing_context(|mut hcx| {
489             hash_result(&mut hcx, &result);
490         });
491     }
492 
493     (result, dep_node_index)
494 }
495 
496 #[inline(always)]
execute_job_incr<Q, Qcx>( query: Q, qcx: Qcx, dep_graph_data: &DepGraphData<Qcx::DepKind>, key: Q::Key, mut dep_node_opt: Option<DepNode<Qcx::DepKind>>, job_id: QueryJobId, ) -> (Q::Value, DepNodeIndex) where Q: QueryConfig<Qcx>, Qcx: QueryContext,497 fn execute_job_incr<Q, Qcx>(
498     query: Q,
499     qcx: Qcx,
500     dep_graph_data: &DepGraphData<Qcx::DepKind>,
501     key: Q::Key,
502     mut dep_node_opt: Option<DepNode<Qcx::DepKind>>,
503     job_id: QueryJobId,
504 ) -> (Q::Value, DepNodeIndex)
505 where
506     Q: QueryConfig<Qcx>,
507     Qcx: QueryContext,
508 {
509     if !query.anon() && !query.eval_always() {
510         // `to_dep_node` is expensive for some `DepKind`s.
511         let dep_node =
512             dep_node_opt.get_or_insert_with(|| query.construct_dep_node(*qcx.dep_context(), &key));
513 
514         // The diagnostics for this query will be promoted to the current session during
515         // `try_mark_green()`, so we can ignore them here.
516         if let Some(ret) = qcx.start_query(job_id, false, None, || {
517             try_load_from_disk_and_cache_in_memory(query, dep_graph_data, qcx, &key, &dep_node)
518         }) {
519             return ret;
520         }
521     }
522 
523     let prof_timer = qcx.dep_context().profiler().query_provider();
524     let diagnostics = Lock::new(ThinVec::new());
525 
526     let (result, dep_node_index) =
527         qcx.start_query(job_id, query.depth_limit(), Some(&diagnostics), || {
528             if query.anon() {
529                 return dep_graph_data.with_anon_task(*qcx.dep_context(), query.dep_kind(), || {
530                     query.compute(qcx, key)
531                 });
532             }
533 
534             // `to_dep_node` is expensive for some `DepKind`s.
535             let dep_node =
536                 dep_node_opt.unwrap_or_else(|| query.construct_dep_node(*qcx.dep_context(), &key));
537 
538             dep_graph_data.with_task(
539                 dep_node,
540                 (qcx, query),
541                 key,
542                 |(qcx, query), key| query.compute(qcx, key),
543                 query.hash_result(),
544             )
545         });
546 
547     prof_timer.finish_with_query_invocation_id(dep_node_index.into());
548 
549     let diagnostics = diagnostics.into_inner();
550     let side_effects = QuerySideEffects { diagnostics };
551 
552     if std::intrinsics::unlikely(!side_effects.is_empty()) {
553         if query.anon() {
554             qcx.store_side_effects_for_anon_node(dep_node_index, side_effects);
555         } else {
556             qcx.store_side_effects(dep_node_index, side_effects);
557         }
558     }
559 
560     (result, dep_node_index)
561 }
562 
563 #[inline(always)]
try_load_from_disk_and_cache_in_memory<Q, Qcx>( query: Q, dep_graph_data: &DepGraphData<Qcx::DepKind>, qcx: Qcx, key: &Q::Key, dep_node: &DepNode<Qcx::DepKind>, ) -> Option<(Q::Value, DepNodeIndex)> where Q: QueryConfig<Qcx>, Qcx: QueryContext,564 fn try_load_from_disk_and_cache_in_memory<Q, Qcx>(
565     query: Q,
566     dep_graph_data: &DepGraphData<Qcx::DepKind>,
567     qcx: Qcx,
568     key: &Q::Key,
569     dep_node: &DepNode<Qcx::DepKind>,
570 ) -> Option<(Q::Value, DepNodeIndex)>
571 where
572     Q: QueryConfig<Qcx>,
573     Qcx: QueryContext,
574 {
575     // Note this function can be called concurrently from the same query
576     // We must ensure that this is handled correctly.
577 
578     let (prev_dep_node_index, dep_node_index) = dep_graph_data.try_mark_green(qcx, &dep_node)?;
579 
580     debug_assert!(dep_graph_data.is_index_green(prev_dep_node_index));
581 
582     // First we try to load the result from the on-disk cache.
583     // Some things are never cached on disk.
584     if let Some(result) = query.try_load_from_disk(qcx, key, prev_dep_node_index, dep_node_index) {
585         if std::intrinsics::unlikely(qcx.dep_context().sess().opts.unstable_opts.query_dep_graph) {
586             dep_graph_data.mark_debug_loaded_from_disk(*dep_node)
587         }
588 
589         let prev_fingerprint = dep_graph_data.prev_fingerprint_of(prev_dep_node_index);
590         // If `-Zincremental-verify-ich` is specified, re-hash results from
591         // the cache and make sure that they have the expected fingerprint.
592         //
593         // If not, we still seek to verify a subset of fingerprints loaded
594         // from disk. Re-hashing results is fairly expensive, so we can't
595         // currently afford to verify every hash. This subset should still
596         // give us some coverage of potential bugs though.
597         let try_verify = prev_fingerprint.split().1.as_u64() % 32 == 0;
598         if std::intrinsics::unlikely(
599             try_verify || qcx.dep_context().sess().opts.unstable_opts.incremental_verify_ich,
600         ) {
601             incremental_verify_ich(
602                 *qcx.dep_context(),
603                 dep_graph_data,
604                 &result,
605                 prev_dep_node_index,
606                 query.hash_result(),
607                 query.format_value(),
608             );
609         }
610 
611         return Some((result, dep_node_index));
612     }
613 
614     // We always expect to find a cached result for things that
615     // can be forced from `DepNode`.
616     debug_assert!(
617         !query.cache_on_disk(*qcx.dep_context(), key)
618             || !qcx.dep_context().fingerprint_style(dep_node.kind).reconstructible(),
619         "missing on-disk cache entry for {dep_node:?}"
620     );
621 
622     // Sanity check for the logic in `ensure`: if the node is green and the result loadable,
623     // we should actually be able to load it.
624     debug_assert!(
625         !query.loadable_from_disk(qcx, &key, prev_dep_node_index),
626         "missing on-disk cache entry for loadable {dep_node:?}"
627     );
628 
629     // We could not load a result from the on-disk cache, so
630     // recompute.
631     let prof_timer = qcx.dep_context().profiler().query_provider();
632 
633     // The dep-graph for this computation is already in-place.
634     let result = qcx.dep_context().dep_graph().with_ignore(|| query.compute(qcx, *key));
635 
636     prof_timer.finish_with_query_invocation_id(dep_node_index.into());
637 
638     // Verify that re-running the query produced a result with the expected hash
639     // This catches bugs in query implementations, turning them into ICEs.
640     // For example, a query might sort its result by `DefId` - since `DefId`s are
641     // not stable across compilation sessions, the result could get up getting sorted
642     // in a different order when the query is re-run, even though all of the inputs
643     // (e.g. `DefPathHash` values) were green.
644     //
645     // See issue #82920 for an example of a miscompilation that would get turned into
646     // an ICE by this check
647     incremental_verify_ich(
648         *qcx.dep_context(),
649         dep_graph_data,
650         &result,
651         prev_dep_node_index,
652         query.hash_result(),
653         query.format_value(),
654     );
655 
656     Some((result, dep_node_index))
657 }
658 
659 #[inline]
660 #[instrument(skip(tcx, dep_graph_data, result, hash_result, format_value), level = "debug")]
incremental_verify_ich<Tcx, V>( tcx: Tcx, dep_graph_data: &DepGraphData<Tcx::DepKind>, result: &V, prev_index: SerializedDepNodeIndex, hash_result: Option<fn(&mut StableHashingContext<'_>, &V) -> Fingerprint>, format_value: fn(&V) -> String, ) where Tcx: DepContext,661 pub(crate) fn incremental_verify_ich<Tcx, V>(
662     tcx: Tcx,
663     dep_graph_data: &DepGraphData<Tcx::DepKind>,
664     result: &V,
665     prev_index: SerializedDepNodeIndex,
666     hash_result: Option<fn(&mut StableHashingContext<'_>, &V) -> Fingerprint>,
667     format_value: fn(&V) -> String,
668 ) where
669     Tcx: DepContext,
670 {
671     if !dep_graph_data.is_index_green(prev_index) {
672         incremental_verify_ich_not_green(tcx, prev_index)
673     }
674 
675     let new_hash = hash_result.map_or(Fingerprint::ZERO, |f| {
676         tcx.with_stable_hashing_context(|mut hcx| f(&mut hcx, result))
677     });
678 
679     let old_hash = dep_graph_data.prev_fingerprint_of(prev_index);
680 
681     if new_hash != old_hash {
682         incremental_verify_ich_failed(tcx, prev_index, &|| format_value(&result));
683     }
684 }
685 
686 #[cold]
687 #[inline(never)]
incremental_verify_ich_not_green<Tcx>(tcx: Tcx, prev_index: SerializedDepNodeIndex) where Tcx: DepContext,688 fn incremental_verify_ich_not_green<Tcx>(tcx: Tcx, prev_index: SerializedDepNodeIndex)
689 where
690     Tcx: DepContext,
691 {
692     panic!(
693         "fingerprint for green query instance not loaded from cache: {:?}",
694         tcx.dep_graph().data().unwrap().prev_node_of(prev_index)
695     )
696 }
697 
698 // Note that this is marked #[cold] and intentionally takes `dyn Debug` for `result`,
699 // as we want to avoid generating a bunch of different implementations for LLVM to
700 // chew on (and filling up the final binary, too).
701 #[cold]
702 #[inline(never)]
incremental_verify_ich_failed<Tcx>( tcx: Tcx, prev_index: SerializedDepNodeIndex, result: &dyn Fn() -> String, ) where Tcx: DepContext,703 fn incremental_verify_ich_failed<Tcx>(
704     tcx: Tcx,
705     prev_index: SerializedDepNodeIndex,
706     result: &dyn Fn() -> String,
707 ) where
708     Tcx: DepContext,
709 {
710     // When we emit an error message and panic, we try to debug-print the `DepNode`
711     // and query result. Unfortunately, this can cause us to run additional queries,
712     // which may result in another fingerprint mismatch while we're in the middle
713     // of processing this one. To avoid a double-panic (which kills the process
714     // before we can print out the query static), we print out a terse
715     // but 'safe' message if we detect a reentrant call to this method.
716     thread_local! {
717         static INSIDE_VERIFY_PANIC: Cell<bool> = const { Cell::new(false) };
718     };
719 
720     let old_in_panic = INSIDE_VERIFY_PANIC.with(|in_panic| in_panic.replace(true));
721 
722     if old_in_panic {
723         tcx.sess().emit_err(crate::error::Reentrant);
724     } else {
725         let run_cmd = if let Some(crate_name) = &tcx.sess().opts.crate_name {
726             format!("`cargo clean -p {crate_name}` or `cargo clean`")
727         } else {
728             "`cargo clean`".to_string()
729         };
730 
731         let dep_node = tcx.dep_graph().data().unwrap().prev_node_of(prev_index);
732         tcx.sess().emit_err(crate::error::IncrementCompilation {
733             run_cmd,
734             dep_node: format!("{dep_node:?}"),
735         });
736         panic!("Found unstable fingerprints for {dep_node:?}: {}", result());
737     }
738 
739     INSIDE_VERIFY_PANIC.with(|in_panic| in_panic.set(old_in_panic));
740 }
741 
742 /// Ensure that either this query has all green inputs or been executed.
743 /// Executing `query::ensure(D)` is considered a read of the dep-node `D`.
744 /// Returns true if the query should still run.
745 ///
746 /// This function is particularly useful when executing passes for their
747 /// side-effects -- e.g., in order to report errors for erroneous programs.
748 ///
749 /// Note: The optimization is only available during incr. comp.
750 #[inline(never)]
ensure_must_run<Q, Qcx>( query: Q, qcx: Qcx, key: &Q::Key, check_cache: bool, ) -> (bool, Option<DepNode<Qcx::DepKind>>) where Q: QueryConfig<Qcx>, Qcx: QueryContext,751 fn ensure_must_run<Q, Qcx>(
752     query: Q,
753     qcx: Qcx,
754     key: &Q::Key,
755     check_cache: bool,
756 ) -> (bool, Option<DepNode<Qcx::DepKind>>)
757 where
758     Q: QueryConfig<Qcx>,
759     Qcx: QueryContext,
760 {
761     if query.eval_always() {
762         return (true, None);
763     }
764 
765     // Ensuring an anonymous query makes no sense
766     assert!(!query.anon());
767 
768     let dep_node = query.construct_dep_node(*qcx.dep_context(), key);
769 
770     let dep_graph = qcx.dep_context().dep_graph();
771     let serialized_dep_node_index = match dep_graph.try_mark_green(qcx, &dep_node) {
772         None => {
773             // A None return from `try_mark_green` means that this is either
774             // a new dep node or that the dep node has already been marked red.
775             // Either way, we can't call `dep_graph.read()` as we don't have the
776             // DepNodeIndex. We must invoke the query itself. The performance cost
777             // this introduces should be negligible as we'll immediately hit the
778             // in-memory cache, or another query down the line will.
779             return (true, Some(dep_node));
780         }
781         Some((serialized_dep_node_index, dep_node_index)) => {
782             dep_graph.read_index(dep_node_index);
783             qcx.dep_context().profiler().query_cache_hit(dep_node_index.into());
784             serialized_dep_node_index
785         }
786     };
787 
788     // We do not need the value at all, so do not check the cache.
789     if !check_cache {
790         return (false, None);
791     }
792 
793     let loadable = query.loadable_from_disk(qcx, key, serialized_dep_node_index);
794     (!loadable, Some(dep_node))
795 }
796 
797 #[derive(Debug)]
798 pub enum QueryMode {
799     Get,
800     Ensure { check_cache: bool },
801 }
802 
803 #[inline(always)]
get_query_non_incr<Q, Qcx>(query: Q, qcx: Qcx, span: Span, key: Q::Key) -> Q::Value where Q: QueryConfig<Qcx>, Qcx: QueryContext,804 pub fn get_query_non_incr<Q, Qcx>(query: Q, qcx: Qcx, span: Span, key: Q::Key) -> Q::Value
805 where
806     Q: QueryConfig<Qcx>,
807     Qcx: QueryContext,
808 {
809     debug_assert!(!qcx.dep_context().dep_graph().is_fully_enabled());
810 
811     ensure_sufficient_stack(|| try_execute_query::<Q, Qcx, false>(query, qcx, span, key, None).0)
812 }
813 
814 #[inline(always)]
get_query_incr<Q, Qcx>( query: Q, qcx: Qcx, span: Span, key: Q::Key, mode: QueryMode, ) -> Option<Q::Value> where Q: QueryConfig<Qcx>, Qcx: QueryContext,815 pub fn get_query_incr<Q, Qcx>(
816     query: Q,
817     qcx: Qcx,
818     span: Span,
819     key: Q::Key,
820     mode: QueryMode,
821 ) -> Option<Q::Value>
822 where
823     Q: QueryConfig<Qcx>,
824     Qcx: QueryContext,
825 {
826     debug_assert!(qcx.dep_context().dep_graph().is_fully_enabled());
827 
828     let dep_node = if let QueryMode::Ensure { check_cache } = mode {
829         let (must_run, dep_node) = ensure_must_run(query, qcx, &key, check_cache);
830         if !must_run {
831             return None;
832         }
833         dep_node
834     } else {
835         None
836     };
837 
838     let (result, dep_node_index) = ensure_sufficient_stack(|| {
839         try_execute_query::<_, _, true>(query, qcx, span, key, dep_node)
840     });
841     if let Some(dep_node_index) = dep_node_index {
842         qcx.dep_context().dep_graph().read_index(dep_node_index)
843     }
844     Some(result)
845 }
846 
force_query<Q, Qcx>( query: Q, qcx: Qcx, key: Q::Key, dep_node: DepNode<<Qcx as HasDepContext>::DepKind>, ) where Q: QueryConfig<Qcx>, Qcx: QueryContext,847 pub fn force_query<Q, Qcx>(
848     query: Q,
849     qcx: Qcx,
850     key: Q::Key,
851     dep_node: DepNode<<Qcx as HasDepContext>::DepKind>,
852 ) where
853     Q: QueryConfig<Qcx>,
854     Qcx: QueryContext,
855 {
856     // We may be concurrently trying both execute and force a query.
857     // Ensure that only one of them runs the query.
858     if let Some((_, index)) = query.query_cache(qcx).lookup(&key) {
859         qcx.dep_context().profiler().query_cache_hit(index.into());
860         return;
861     }
862 
863     debug_assert!(!query.anon());
864 
865     ensure_sufficient_stack(|| {
866         try_execute_query::<_, _, true>(query, qcx, DUMMY_SP, key, Some(dep_node))
867     });
868 }
869