Skip to content

Commit

Permalink
Auto merge of #17439 - Veykril:paralleler-prime-caches, r=Veykril
Browse files Browse the repository at this point in the history
Properly prime all crate def maps in parallel_prime_caches
  • Loading branch information
bors committed Jun 17, 2024
2 parents 1bb376c + 158626b commit 7b38f8a
Show file tree
Hide file tree
Showing 2 changed files with 138 additions and 35 deletions.
123 changes: 122 additions & 1 deletion crates/hir-expand/src/fixup.rs
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,6 @@ pub(crate) fn fixup_syntax(
preorder.skip_subtree();
continue;
}

// In some other situations, we can fix things by just appending some tokens.
match_ast! {
match node {
Expand Down Expand Up @@ -276,6 +275,62 @@ pub(crate) fn fixup_syntax(
]);
}
},
ast::RecordExprField(it) => {
if let Some(colon) = it.colon_token() {
if it.name_ref().is_some() {
append.insert(colon.into(), vec![
Leaf::Ident(Ident {
text: "__ra_fixup".into(),
span: fake_span(node_range)
})
]);
}
}
},
ast::Path(it) => {
if let Some(colon) = it.coloncolon_token() {
if it.segment().is_none() {
append.insert(colon.into(), vec![
Leaf::Ident(Ident {
text: "__ra_fixup".into(),
span: fake_span(node_range)
})
]);
}
}
},
ast::ArgList(it) => {
if it.r_paren_token().is_none() {
append.insert(node.into(), vec![
Leaf::Punct(Punct {
span: fake_span(node_range),
char: ')',
spacing: Spacing::Alone
})
]);
}
},
ast::ArgList(it) => {
if it.r_paren_token().is_none() {
append.insert(node.into(), vec![
Leaf::Punct(Punct {
span: fake_span(node_range),
char: ')',
spacing: Spacing::Alone
})
]);
}
},
ast::ClosureExpr(it) => {
if it.body().is_none() {
append.insert(node.into(), vec![
Leaf::Ident(Ident {
text: "__ra_fixup".into(),
span: fake_span(node_range)
})
]);
}
},
_ => (),
}
}
Expand Down Expand Up @@ -759,4 +814,70 @@ fn foo () {loop { }}
"#]],
)
}

#[test]
fn fixup_path() {
check(
r#"
fn foo() {
path::
}
"#,
expect![[r#"
fn foo () {path :: __ra_fixup}
"#]],
)
}

#[test]
fn fixup_record_ctor_field() {
check(
r#"
fn foo() {
R { f: }
}
"#,
expect![[r#"
fn foo () {R {f : __ra_fixup}}
"#]],
)
}

#[test]
fn fixup_arg_list() {
check(
r#"
fn foo() {
foo(a
}
"#,
expect![[r#"
fn foo () { foo ( a ) }
"#]],
);
check(
r#"
fn foo() {
bar.foo(a
}
"#,
expect![[r#"
fn foo () { bar . foo ( a ) }
"#]],
);
}

#[test]
fn fixup_closure() {
check(
r#"
fn foo() {
||
}
"#,
expect![[r#"
fn foo () {|| __ra_fixup}
"#]],
);
}
}
50 changes: 16 additions & 34 deletions crates/ide-db/src/prime_caches.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,9 +11,9 @@ use hir::db::DefDatabase;
use crate::{
base_db::{
salsa::{Database, ParallelDatabase, Snapshot},
Cancelled, CrateGraph, CrateId, SourceDatabase, SourceDatabaseExt,
Cancelled, CrateId, SourceDatabase, SourceDatabaseExt,
},
FxHashSet, FxIndexMap, RootDatabase,
FxIndexMap, RootDatabase,
};

/// We're indexing many crates.
Expand All @@ -36,19 +36,10 @@ pub fn parallel_prime_caches(

let graph = db.crate_graph();
let mut crates_to_prime = {
let crate_ids = compute_crates_to_prime(db, &graph);

let mut builder = topologic_sort::TopologicalSortIter::builder();

for &crate_id in &crate_ids {
let crate_data = &graph[crate_id];
let dependencies = crate_data
.dependencies
.iter()
.map(|d| d.crate_id)
.filter(|i| crate_ids.contains(i));

builder.add(crate_id, dependencies);
for crate_id in graph.iter() {
builder.add(crate_id, graph[crate_id].dependencies.iter().map(|d| d.crate_id));
}

builder.build()
Expand All @@ -62,27 +53,34 @@ pub fn parallel_prime_caches(
let (work_sender, progress_receiver) = {
let (progress_sender, progress_receiver) = crossbeam_channel::unbounded();
let (work_sender, work_receiver) = crossbeam_channel::unbounded();
let graph = graph.clone();
let prime_caches_worker = move |db: Snapshot<RootDatabase>| {
while let Ok((crate_id, crate_name)) = work_receiver.recv() {
progress_sender
.send(ParallelPrimeCacheWorkerProgress::BeginCrate { crate_id, crate_name })?;

// This also computes the DefMap
db.import_map(crate_id);
let file_id = graph[crate_id].root_file_id;
let root_id = db.file_source_root(file_id);
if db.source_root(root_id).is_library {
db.crate_def_map(crate_id);
} else {
// This also computes the DefMap
db.import_map(crate_id);
}

progress_sender.send(ParallelPrimeCacheWorkerProgress::EndCrate { crate_id })?;
}

Ok::<_, crossbeam_channel::SendError<_>>(())
};

for _ in 0..num_worker_threads {
for id in 0..num_worker_threads {
let worker = prime_caches_worker.clone();
let db = db.snapshot();

stdx::thread::Builder::new(stdx::thread::ThreadIntent::Worker)
.allow_leak(true)
.name("PrimeCaches".to_owned())
.name(format!("PrimeCaches#{id}"))
.spawn(move || Cancelled::catch(|| worker(db)))
.expect("failed to spawn thread");
}
Expand All @@ -96,7 +94,7 @@ pub fn parallel_prime_caches(
// an index map is used to preserve ordering so we can sort the progress report in order of
// "longest crate to index" first
let mut crates_currently_indexing =
FxIndexMap::with_capacity_and_hasher(num_worker_threads as _, Default::default());
FxIndexMap::with_capacity_and_hasher(num_worker_threads, Default::default());

while crates_done < crates_total {
db.unwind_if_cancelled();
Expand Down Expand Up @@ -144,19 +142,3 @@ pub fn parallel_prime_caches(
cb(progress);
}
}

fn compute_crates_to_prime(db: &RootDatabase, graph: &CrateGraph) -> FxHashSet<CrateId> {
// We're only interested in the workspace crates and the `ImportMap`s of their direct
// dependencies, though in practice the latter also compute the `DefMap`s.
// We don't prime transitive dependencies because they're generally not visible in
// the current workspace.
graph
.iter()
.filter(|&id| {
let file_id = graph[id].root_file_id;
let root_id = db.file_source_root(file_id);
!db.source_root(root_id).is_library
})
.flat_map(|id| graph[id].dependencies.iter().map(|krate| krate.crate_id))
.collect()
}

0 comments on commit 7b38f8a

Please sign in to comment.