@@ -104,7 +104,7 @@ impl<'a, 'tcx, Q: QueryDescription<'tcx>> JobOwner<'a, 'tcx, Q> {
104104 if let Some ( ( _, value) ) =
105105 lock. results . raw_entry ( ) . from_key_hashed_nocheck ( key_hash, key)
106106 {
107- tcx. prof . query_cache_hit ( Q :: NAME ) ;
107+ tcx. prof . query_cache_hit ( value . index . into ( ) ) ;
108108 let result = ( value. value . clone ( ) , value. index ) ;
109109 #[ cfg( debug_assertions) ]
110110 {
@@ -356,7 +356,7 @@ impl<'tcx> TyCtxt<'tcx> {
356356 #[ inline( never) ]
357357 pub ( super ) fn get_query < Q : QueryDescription < ' tcx > > ( self , span : Span , key : Q :: Key ) -> Q :: Value {
358358 debug ! ( "ty::query::get_query<{}>(key={:?}, span={:?})" ,
359- Q :: NAME . as_str ( ) ,
359+ Q :: NAME ,
360360 key,
361361 span) ;
362362
@@ -378,7 +378,7 @@ impl<'tcx> TyCtxt<'tcx> {
378378
379379 if Q :: ANON {
380380
381- let prof_timer = self . prof . query_provider ( Q :: NAME ) ;
381+ let prof_timer = self . prof . query_provider ( ) ;
382382
383383 let ( ( result, dep_node_index) , diagnostics) = with_diagnostics ( |diagnostics| {
384384 self . start_query ( job. job . clone ( ) , diagnostics, |tcx| {
@@ -388,7 +388,7 @@ impl<'tcx> TyCtxt<'tcx> {
388388 } )
389389 } ) ;
390390
391- drop ( prof_timer) ;
391+ prof_timer. finish_with_query_invocation_id ( dep_node_index . into ( ) ) ;
392392
393393 self . dep_graph . read_index ( dep_node_index) ;
394394
@@ -445,8 +445,9 @@ impl<'tcx> TyCtxt<'tcx> {
445445 // First we try to load the result from the on-disk cache.
446446 let result = if Q :: cache_on_disk ( self , key. clone ( ) , None ) &&
447447 self . sess . opts . debugging_opts . incremental_queries {
448- let _prof_timer = self . prof . incr_cache_loading ( Q :: NAME ) ;
448+ let prof_timer = self . prof . incr_cache_loading ( ) ;
449449 let result = Q :: try_load_from_disk ( self , prev_dep_node_index) ;
450+ prof_timer. finish_with_query_invocation_id ( dep_node_index. into ( ) ) ;
450451
451452 // We always expect to find a cached result for things that
452453 // can be forced from `DepNode`.
@@ -465,13 +466,15 @@ impl<'tcx> TyCtxt<'tcx> {
465466 } else {
466467 // We could not load a result from the on-disk cache, so
467468 // recompute.
468- let _prof_timer = self . prof . query_provider ( Q :: NAME ) ;
469+ let prof_timer = self . prof . query_provider ( ) ;
469470
470471 // The dep-graph for this computation is already in-place.
471472 let result = self . dep_graph . with_ignore ( || {
472473 Q :: compute ( self , key)
473474 } ) ;
474475
476+ prof_timer. finish_with_query_invocation_id ( dep_node_index. into ( ) ) ;
477+
475478 result
476479 } ;
477480
@@ -534,7 +537,7 @@ impl<'tcx> TyCtxt<'tcx> {
534537 - dep-node: {:?}",
535538 key, dep_node) ;
536539
537- let prof_timer = self . prof . query_provider ( Q :: NAME ) ;
540+ let prof_timer = self . prof . query_provider ( ) ;
538541
539542 let ( ( result, dep_node_index) , diagnostics) = with_diagnostics ( |diagnostics| {
540543 self . start_query ( job. job . clone ( ) , diagnostics, |tcx| {
@@ -554,7 +557,7 @@ impl<'tcx> TyCtxt<'tcx> {
554557 } )
555558 } ) ;
556559
557- drop ( prof_timer) ;
560+ prof_timer. finish_with_query_invocation_id ( dep_node_index . into ( ) ) ;
558561
559562 if unlikely ! ( !diagnostics. is_empty( ) ) {
560563 if dep_node. kind != crate :: dep_graph:: DepKind :: Null {
@@ -586,17 +589,19 @@ impl<'tcx> TyCtxt<'tcx> {
586589
587590 let dep_node = Q :: to_dep_node ( self , & key) ;
588591
589- if self . dep_graph . try_mark_green_and_read ( self , & dep_node) . is_none ( ) {
590- // A None return from `try_mark_green_and_read` means that this is either
591- // a new dep node or that the dep node has already been marked red.
592- // Either way, we can't call `dep_graph.read()` as we don't have the
593- // DepNodeIndex. We must invoke the query itself. The performance cost
594- // this introduces should be negligible as we'll immediately hit the
595- // in-memory cache, or another query down the line will.
596-
597- let _ = self . get_query :: < Q > ( DUMMY_SP , key) ;
598- } else {
599- self . prof . query_cache_hit ( Q :: NAME ) ;
592+ match self . dep_graph . try_mark_green_and_read ( self , & dep_node) {
593+ None => {
594+ // A None return from `try_mark_green_and_read` means that this is either
595+ // a new dep node or that the dep node has already been marked red.
596+ // Either way, we can't call `dep_graph.read()` as we don't have the
597+ // DepNodeIndex. We must invoke the query itself. The performance cost
598+ // this introduces should be negligible as we'll immediately hit the
599+ // in-memory cache, or another query down the line will.
600+ let _ = self . get_query :: < Q > ( DUMMY_SP , key) ;
601+ }
602+ Some ( ( _, dep_node_index) ) => {
603+ self . prof . query_cache_hit ( dep_node_index. into ( ) ) ;
604+ }
600605 }
601606 }
602607
@@ -713,6 +718,42 @@ macro_rules! define_queries_inner {
713718 }
714719 }
715720
721+ /// All self-profiling events generated by the query engine use a
722+ /// virtual `StringId`s for their `event_id`. This method makes all
723+ /// those virtual `StringId`s point to actual strings.
724+ ///
725+ /// If we are recording only summary data, the ids will point to
726+ /// just the query names. If we are recording query keys too, we
727+ /// allocate the corresponding strings here. (The latter is not yet
728+ /// implemented.)
729+ pub fn allocate_self_profile_query_strings(
730+ & self ,
731+ profiler: & rustc_data_structures:: profiling:: SelfProfiler
732+ ) {
733+ // Walk the entire query cache and allocate the appropriate
734+ // string representation. Each cache entry is uniquely
735+ // identified by its dep_node_index.
736+ $( {
737+ let query_name_string_id =
738+ profiler. get_or_alloc_cached_string( stringify!( $name) ) ;
739+
740+ let result_cache = self . $name. lock_shards( ) ;
741+
742+ for shard in result_cache. iter( ) {
743+ let query_invocation_ids = shard
744+ . results
745+ . values( )
746+ . map( |v| v. index)
747+ . map( |dep_node_index| dep_node_index. into( ) ) ;
748+
749+ profiler. bulk_map_query_invocation_id_to_single_string(
750+ query_invocation_ids,
751+ query_name_string_id
752+ ) ;
753+ }
754+ } ) *
755+ }
756+
716757 #[ cfg( parallel_compiler) ]
717758 pub fn collect_active_jobs( & self ) -> Vec <Lrc <QueryJob <$tcx>>> {
718759 let mut jobs = Vec :: new( ) ;
@@ -830,36 +871,6 @@ macro_rules! define_queries_inner {
830871 }
831872 }
832873
833- #[ allow( nonstandard_style) ]
834- #[ derive( Clone , Copy ) ]
835- pub enum QueryName {
836- $( $name) ,*
837- }
838-
839- impl rustc_data_structures:: profiling:: QueryName for QueryName {
840- fn discriminant( self ) -> std:: mem:: Discriminant <QueryName > {
841- std:: mem:: discriminant( & self )
842- }
843-
844- fn as_str( self ) -> & ' static str {
845- QueryName :: as_str( & self )
846- }
847- }
848-
849- impl QueryName {
850- pub fn register_with_profiler(
851- profiler: & rustc_data_structures:: profiling:: SelfProfiler ,
852- ) {
853- $( profiler. register_query_name( QueryName :: $name) ; ) *
854- }
855-
856- pub fn as_str( & self ) -> & ' static str {
857- match self {
858- $( QueryName :: $name => stringify!( $name) , ) *
859- }
860- }
861- }
862-
863874 #[ allow( nonstandard_style) ]
864875 #[ derive( Clone , Debug ) ]
865876 pub enum Query <$tcx> {
@@ -900,12 +911,6 @@ macro_rules! define_queries_inner {
900911 $( Query :: $name( key) => key. default_span( tcx) , ) *
901912 }
902913 }
903-
904- pub fn query_name( & self ) -> QueryName {
905- match self {
906- $( Query :: $name( _) => QueryName :: $name, ) *
907- }
908- }
909914 }
910915
911916 impl <' a, $tcx> HashStable <StableHashingContext <' a>> for Query <$tcx> {
@@ -940,7 +945,7 @@ macro_rules! define_queries_inner {
940945 type Key = $K;
941946 type Value = $V;
942947
943- const NAME : QueryName = QueryName :: $name;
948+ const NAME : & ' static str = stringify! ( $name) ;
944949 const CATEGORY : ProfileCategory = $category;
945950 }
946951
0 commit comments