Skip to content

Commit bc93a15

Browse files
committed
Auto merge of #154076 - nnethercote:DepNode-improvements, r=<try>
`DepNode` use improvements
2 parents fd0c901 + de1e5b3 commit bc93a15

1 file changed

Lines changed: 38 additions & 74 deletions

File tree

compiler/rustc_query_impl/src/execution.rs

Lines changed: 38 additions & 74 deletions
Original file line numberDiff line numberDiff line change
@@ -256,14 +256,12 @@ fn wait_for_query<'tcx, C: QueryCache>(
256256

257257
/// Shared main part of both [`execute_query_incr_inner`] and [`execute_query_non_incr_inner`].
258258
#[inline(never)]
259-
fn try_execute_query<'tcx, C: QueryCache, const INCR: bool>(
259+
fn try_execute_query<'tcx, C: QueryCache>(
260260
query: &'tcx QueryVTable<'tcx, C>,
261261
tcx: TyCtxt<'tcx>,
262262
span: Span,
263263
key: C::Key,
264-
// If present, some previous step has already created a `DepNode` for this
265-
// query+key, which we should reuse instead of creating a new one.
266-
dep_node: Option<DepNode>,
264+
dep_node: Option<DepNode>, // `None` for non-incremental, `Some` for incremental
267265
) -> (C::Value, Option<DepNodeIndex>) {
268266
let key_hash = sharded::make_hash(&key);
269267
let mut state_lock = query.state.active.lock_shard_by_hash(key_hash);
@@ -298,10 +296,8 @@ fn try_execute_query<'tcx, C: QueryCache, const INCR: bool>(
298296
// panic occurs while executing the query (or any intermediate plumbing).
299297
let job_guard = ActiveJobGuard { state: &query.state, key, key_hash };
300298

301-
debug_assert_eq!(tcx.dep_graph.is_fully_enabled(), INCR);
302-
303299
// Delegate to another function to actually execute the query job.
304-
let (value, dep_node_index) = if INCR {
300+
let (value, dep_node_index) = if let Some(dep_node) = dep_node {
305301
execute_job_incr(query, tcx, key, dep_node, id)
306302
} else {
307303
execute_job_non_incr(query, tcx, key, id)
@@ -418,27 +414,23 @@ fn execute_job_incr<'tcx, C: QueryCache>(
418414
query: &'tcx QueryVTable<'tcx, C>,
419415
tcx: TyCtxt<'tcx>,
420416
key: C::Key,
421-
mut dep_node_opt: Option<DepNode>,
417+
dep_node: DepNode,
422418
job_id: QueryJobId,
423419
) -> (C::Value, DepNodeIndex) {
424420
let dep_graph_data =
425421
tcx.dep_graph.data().expect("should always be present in incremental mode");
426422

427423
if !query.anon && !query.eval_always {
428-
// `to_dep_node` is expensive for some `DepKind`s.
429-
let dep_node =
430-
dep_node_opt.get_or_insert_with(|| DepNode::construct(tcx, query.dep_kind, &key));
431-
432424
// The diagnostics for this query will be promoted to the current session during
433425
// `try_mark_green()`, so we can ignore them here.
434426
if let Some(ret) = start_query(job_id, false, || try {
435-
let (prev_index, dep_node_index) = dep_graph_data.try_mark_green(tcx, dep_node)?;
427+
let (prev_index, dep_node_index) = dep_graph_data.try_mark_green(tcx, &dep_node)?;
436428
let value = load_from_disk_or_invoke_provider_green(
437429
tcx,
438430
dep_graph_data,
439431
query,
440432
key,
441-
dep_node,
433+
&dep_node,
442434
prev_index,
443435
dep_node_index,
444436
);
@@ -458,10 +450,6 @@ fn execute_job_incr<'tcx, C: QueryCache>(
458450
});
459451
}
460452

461-
// `to_dep_node` is expensive for some `DepKind`s.
462-
let dep_node =
463-
dep_node_opt.unwrap_or_else(|| DepNode::construct(tcx, query.dep_kind, &key));
464-
465453
// Call the query provider.
466454
dep_graph_data.with_task(
467455
dep_node,
@@ -573,69 +561,56 @@ fn load_from_disk_or_invoke_provider_green<'tcx, C: QueryCache>(
573561
value
574562
}
575563

576-
/// Return value struct for [`check_if_ensure_can_skip_execution`].
577-
struct EnsureCanSkip {
578-
/// If true, the current `tcx.ensure_ok()` or `tcx.ensure_done()` query
579-
/// can return early without actually trying to execute.
580-
skip_execution: bool,
581-
/// A dep node that was prepared while checking whether execution can be
582-
/// skipped, to be reused by execution itself if _not_ skipped.
583-
dep_node: Option<DepNode>,
584-
}
585-
586564
/// Checks whether a `tcx.ensure_ok()` or `tcx.ensure_done()` query call can
587565
/// return early without actually trying to execute.
588566
///
589567
/// This only makes sense during incremental compilation, because it relies
590568
/// on having the dependency graph (and in some cases a disk-cached value)
591569
/// from the previous incr-comp session.
592570
#[inline(never)]
593-
fn check_if_ensure_can_skip_execution<'tcx, C: QueryCache>(
571+
fn can_ensure_skip_execution<'tcx, C: QueryCache>(
594572
query: &'tcx QueryVTable<'tcx, C>,
595573
tcx: TyCtxt<'tcx>,
596574
key: C::Key,
575+
dep_node: DepNode,
597576
ensure_mode: EnsureMode,
598-
) -> EnsureCanSkip {
577+
) -> bool {
599578
// Queries with `eval_always` should never skip execution.
600579
if query.eval_always {
601-
return EnsureCanSkip { skip_execution: false, dep_node: None };
580+
return false;
602581
}
603582

604583
// Ensuring an anonymous query makes no sense
605584
assert!(!query.anon);
606585

607-
let dep_node = DepNode::construct(tcx, query.dep_kind, &key);
608-
609-
let serialized_dep_node_index = match tcx.dep_graph.try_mark_green(tcx, &dep_node) {
586+
match tcx.dep_graph.try_mark_green(tcx, &dep_node) {
610587
None => {
611588
// A None return from `try_mark_green` means that this is either
612589
// a new dep node or that the dep node has already been marked red.
613590
// Either way, we can't call `dep_graph.read()` as we don't have the
614591
// DepNodeIndex. We must invoke the query itself. The performance cost
615592
// this introduces should be negligible as we'll immediately hit the
616593
// in-memory cache, or another query down the line will.
617-
return EnsureCanSkip { skip_execution: false, dep_node: Some(dep_node) };
594+
false
618595
}
619596
Some((serialized_dep_node_index, dep_node_index)) => {
620597
tcx.dep_graph.read_index(dep_node_index);
621598
tcx.prof.query_cache_hit(dep_node_index.into());
622-
serialized_dep_node_index
623-
}
624-
};
625-
626-
match ensure_mode {
627-
EnsureMode::Ok => {
628-
// In ensure-ok mode, we can skip execution for this key if the node
629-
// is green. It must have succeeded in the previous session, and
630-
// therefore would succeed in the current session if executed.
631-
EnsureCanSkip { skip_execution: true, dep_node: None }
632-
}
633-
EnsureMode::Done => {
634-
// In ensure-done mode, we can only skip execution for this key if
635-
// there's a disk-cached value available to load later if needed,
636-
// which guarantees the query provider will never run for this key.
637-
let is_loadable = (query.is_loadable_from_disk_fn)(tcx, key, serialized_dep_node_index);
638-
EnsureCanSkip { skip_execution: is_loadable, dep_node: Some(dep_node) }
599+
match ensure_mode {
600+
// In ensure-ok mode, we can skip execution for this key if the
601+
// node is green. It must have succeeded in the previous
602+
// session, and therefore would succeed in the current session
603+
// if executed.
604+
EnsureMode::Ok => true,
605+
606+
// In ensure-done mode, we can only skip execution for this key
607+
// if there's a disk-cached value available to load later if
608+
// needed, which guarantees the query provider will never run
609+
// for this key.
610+
EnsureMode::Done => {
611+
(query.is_loadable_from_disk_fn)(tcx, key, serialized_dep_node_index)
612+
}
613+
}
639614
}
640615
}
641616
}
@@ -649,9 +624,7 @@ pub(super) fn execute_query_non_incr_inner<'tcx, C: QueryCache>(
649624
span: Span,
650625
key: C::Key,
651626
) -> C::Value {
652-
debug_assert!(!tcx.dep_graph.is_fully_enabled());
653-
654-
ensure_sufficient_stack(|| try_execute_query::<C, false>(query, tcx, span, key, None).0)
627+
ensure_sufficient_stack(|| try_execute_query::<C>(query, tcx, span, key, None).0)
655628
}
656629

657630
/// Called by a macro-generated impl of [`QueryVTable::execute_query_fn`],
@@ -664,26 +637,19 @@ pub(super) fn execute_query_incr_inner<'tcx, C: QueryCache>(
664637
key: C::Key,
665638
mode: QueryMode,
666639
) -> Option<C::Value> {
667-
debug_assert!(tcx.dep_graph.is_fully_enabled());
640+
// There are scenarios where this `dep_node` is never used (e.g. for `anon` queries) but they
641+
// are rare enough that the wasted effort doesn't materially hurt performance.
642+
let dep_node = DepNode::construct(tcx, query.dep_kind, &key);
668643

669644
// Check if query execution can be skipped, for `ensure_ok` or `ensure_done`.
670-
// This might have the side-effect of creating a suitable DepNode, which
671-
// we should reuse for execution instead of creating a new one.
672-
let dep_node: Option<DepNode> = match mode {
673-
QueryMode::Ensure { ensure_mode } => {
674-
let EnsureCanSkip { skip_execution, dep_node } =
675-
check_if_ensure_can_skip_execution(query, tcx, key, ensure_mode);
676-
if skip_execution {
677-
// Return early to skip execution.
678-
return None;
679-
}
680-
dep_node
681-
}
682-
QueryMode::Get => None,
683-
};
645+
if let QueryMode::Ensure { ensure_mode } = mode
646+
&& can_ensure_skip_execution(query, tcx, key, dep_node, ensure_mode)
647+
{
648+
return None;
649+
}
684650

685651
let (result, dep_node_index) =
686-
ensure_sufficient_stack(|| try_execute_query::<C, true>(query, tcx, span, key, dep_node));
652+
ensure_sufficient_stack(|| try_execute_query::<C>(query, tcx, span, key, Some(dep_node)));
687653
if let Some(dep_node_index) = dep_node_index {
688654
tcx.dep_graph.read_index(dep_node_index)
689655
}
@@ -705,7 +671,5 @@ pub(crate) fn force_query<'tcx, C: QueryCache>(
705671

706672
debug_assert!(!query.anon);
707673

708-
ensure_sufficient_stack(|| {
709-
try_execute_query::<C, true>(query, tcx, DUMMY_SP, key, Some(dep_node))
710-
});
674+
ensure_sufficient_stack(|| try_execute_query::<C>(query, tcx, DUMMY_SP, key, Some(dep_node)));
711675
}

0 commit comments

Comments
 (0)