Handle proc macro fetching via OpQueue

This commit is contained in:
Lukas Wirth 2023-03-26 08:39:28 +02:00
parent 27c076a367
commit ee02213e65
17 changed files with 121 additions and 92 deletions

View file

@ -101,7 +101,10 @@ pub fn load_workspace(
.map(|(crate_id, path)| {
(
crate_id,
path.and_then(|(_, path)| load_proc_macro(proc_macro_server, &path, &[])),
path.map_or_else(
|| Err("proc macro crate is missing dylib".to_owned()),
|(_, path)| load_proc_macro(proc_macro_server, &path, &[]),
),
)
})
.collect()

View file

@ -1106,6 +1106,10 @@ impl Config {
&self.data.procMacro_ignored
}
pub fn expand_proc_macros(&self) -> bool {
self.data.procMacro_enable
}
pub fn expand_proc_attr_macros(&self) -> bool {
self.data.procMacro_enable && self.data.procMacro_attributes_enable
}

View file

@ -8,7 +8,7 @@ use std::{sync::Arc, time::Instant};
use crossbeam_channel::{unbounded, Receiver, Sender};
use flycheck::FlycheckHandle;
use ide::{Analysis, AnalysisHost, Cancellable, Change, FileId};
use ide_db::base_db::{CrateId, FileLoader, SourceDatabase};
use ide_db::base_db::{CrateId, FileLoader, ProcMacroPaths, SourceDatabase};
use lsp_types::{SemanticTokens, Url};
use parking_lot::{Mutex, RwLock};
use proc_macro_api::ProcMacroServer;
@ -101,11 +101,12 @@ pub(crate) struct GlobalState {
/// the user just adds comments or whitespace to Cargo.toml, we do not want
/// to invalidate any salsa caches.
pub(crate) workspaces: Arc<Vec<ProjectWorkspace>>,
pub(crate) fetch_workspaces_queue: OpQueue<Option<Vec<anyhow::Result<ProjectWorkspace>>>>,
pub(crate) fetch_workspaces_queue: OpQueue<(), Option<Vec<anyhow::Result<ProjectWorkspace>>>>,
pub(crate) fetch_build_data_queue:
OpQueue<(Arc<Vec<ProjectWorkspace>>, Vec<anyhow::Result<WorkspaceBuildScripts>>)>,
OpQueue<(), (Arc<Vec<ProjectWorkspace>>, Vec<anyhow::Result<WorkspaceBuildScripts>>)>,
pub(crate) fetch_proc_macros_queue: OpQueue<Vec<ProcMacroPaths>, bool>,
pub(crate) prime_caches_queue: OpQueue<()>,
pub(crate) prime_caches_queue: OpQueue,
}
/// An immutable snapshot of the world's state at a point in time.
@ -117,6 +118,7 @@ pub(crate) struct GlobalStateSnapshot {
pub(crate) semantic_tokens_cache: Arc<Mutex<FxHashMap<Url, SemanticTokens>>>,
vfs: Arc<RwLock<(vfs::Vfs, NoHashHashMap<FileId, LineEndings>)>>,
pub(crate) workspaces: Arc<Vec<ProjectWorkspace>>,
// used to signal semantic highlighting to fall back to syntax based highlighting until proc-macros have been loaded
pub(crate) proc_macros_loaded: bool,
pub(crate) flycheck: Arc<[FlycheckHandle]>,
}
@ -170,9 +172,10 @@ impl GlobalState {
workspaces: Arc::new(Vec::new()),
fetch_workspaces_queue: OpQueue::default(),
prime_caches_queue: OpQueue::default(),
fetch_build_data_queue: OpQueue::default(),
fetch_proc_macros_queue: OpQueue::default(),
prime_caches_queue: OpQueue::default(),
};
// Apply any required database inputs from the config.
this.update_configuration(config);
@ -286,7 +289,7 @@ impl GlobalState {
// crate see https://github.com/rust-lang/rust-analyzer/issues/13029
if let Some(path) = workspace_structure_change {
self.fetch_workspaces_queue
.request_op(format!("workspace vfs file change: {}", path.display()));
.request_op(format!("workspace vfs file change: {}", path.display()), ());
}
self.proc_macro_changed =
changed_files.iter().filter(|file| !file.is_created_or_deleted()).any(|file| {
@ -309,7 +312,8 @@ impl GlobalState {
check_fixes: Arc::clone(&self.diagnostics.check_fixes),
mem_docs: self.mem_docs.clone(),
semantic_tokens_cache: Arc::clone(&self.semantic_tokens_cache),
proc_macros_loaded: !self.fetch_build_data_queue.last_op_result().0.is_empty(),
proc_macros_loaded: !self.config.expand_proc_macros()
|| *self.fetch_proc_macros_queue.last_op_result(),
flycheck: self.flycheck.clone(),
}
}

View file

@ -48,16 +48,15 @@ pub(crate) fn handle_workspace_reload(state: &mut GlobalState, _: ()) -> Result<
state.proc_macro_clients = Arc::new([]);
state.proc_macro_changed = false;
state.fetch_workspaces_queue.request_op("reload workspace request".to_string());
state.fetch_build_data_queue.request_op("reload workspace request".to_string());
state.fetch_workspaces_queue.request_op("reload workspace request".to_string(), ());
Ok(())
}
pub(crate) fn handle_proc_macros_reload(state: &mut GlobalState, _: ()) -> Result<()> {
pub(crate) fn handle_proc_macros_rebuild(state: &mut GlobalState, _: ()) -> Result<()> {
state.proc_macro_clients = Arc::new([]);
state.proc_macro_changed = false;
state.fetch_build_data_queue.request_op("reload proc macros request".to_string());
state.fetch_build_data_queue.request_op("rebuild proc macros request".to_string(), ());
Ok(())
}

View file

@ -51,12 +51,12 @@ impl Request for ReloadWorkspace {
const METHOD: &'static str = "rust-analyzer/reloadWorkspace";
}
pub enum ReloadProcMacros {}
pub enum RebuildProcMacros {}
impl Request for ReloadProcMacros {
impl Request for RebuildProcMacros {
type Params = ();
type Result = ();
const METHOD: &'static str = "rust-analyzer/reloadProcMacros";
const METHOD: &'static str = "rust-analyzer/rebuildProcMacros";
}
pub enum SyntaxTree {}

View file

@ -149,8 +149,8 @@ impl GlobalState {
);
}
self.fetch_workspaces_queue.request_op("startup".to_string());
if let Some(cause) = self.fetch_workspaces_queue.should_start_op() {
self.fetch_workspaces_queue.request_op("startup".to_string(), ());
if let Some((cause, ())) = self.fetch_workspaces_queue.should_start_op() {
self.fetch_workspaces(cause);
}
@ -248,7 +248,7 @@ impl GlobalState {
self.prime_caches_queue.op_completed(());
if cancelled {
self.prime_caches_queue
.request_op("restart after cancellation".to_string());
.request_op("restart after cancellation".to_string(), ());
}
}
};
@ -280,7 +280,8 @@ impl GlobalState {
if self.is_quiescent() {
let became_quiescent = !(was_quiescent
|| self.fetch_workspaces_queue.op_requested()
|| self.fetch_build_data_queue.op_requested());
|| self.fetch_build_data_queue.op_requested()
|| self.fetch_proc_macros_queue.op_requested());
if became_quiescent {
if self.config.check_on_save() {
@ -288,7 +289,7 @@ impl GlobalState {
self.flycheck.iter().for_each(FlycheckHandle::restart);
}
if self.config.prefill_caches() {
self.prime_caches_queue.request_op("became quiescent".to_string());
self.prime_caches_queue.request_op("became quiescent".to_string(), ());
}
}
@ -358,18 +359,20 @@ impl GlobalState {
}
if self.config.cargo_autoreload() {
if let Some(cause) = self.fetch_workspaces_queue.should_start_op() {
if let Some((cause, ())) = self.fetch_workspaces_queue.should_start_op() {
self.fetch_workspaces(cause);
}
}
if !self.fetch_workspaces_queue.op_in_progress() {
if let Some(cause) = self.fetch_build_data_queue.should_start_op() {
if let Some((cause, ())) = self.fetch_build_data_queue.should_start_op() {
self.fetch_build_data(cause);
} else if let Some((cause, paths)) = self.fetch_proc_macros_queue.should_start_op() {
self.fetch_proc_macros(cause, paths);
}
}
if let Some(cause) = self.prime_caches_queue.should_start_op() {
if let Some((cause, ())) = self.prime_caches_queue.should_start_op() {
tracing::debug!(%cause, "will prime caches");
let num_worker_threads = self.config.prime_caches_num_threads();
@ -463,7 +466,8 @@ impl GlobalState {
let workspaces_updated = !Arc::ptr_eq(&old, &self.workspaces);
if self.config.run_build_scripts() && workspaces_updated {
self.fetch_build_data_queue.request_op(format!("workspace updated"));
self.fetch_build_data_queue
.request_op(format!("workspace updated"), ());
}
(Progress::End, None)
@ -497,6 +501,7 @@ impl GlobalState {
ProcMacroProgress::Begin => (Some(Progress::Begin), None),
ProcMacroProgress::Report(msg) => (Some(Progress::Report), Some(msg)),
ProcMacroProgress::End(proc_macro_load_result) => {
self.fetch_proc_macros_queue.op_completed(true);
self.set_proc_macros(proc_macro_load_result);
(Some(Progress::End), None)
@ -649,7 +654,7 @@ impl GlobalState {
dispatcher
.on_sync_mut::<lsp_ext::ReloadWorkspace>(handlers::handle_workspace_reload)
.on_sync_mut::<lsp_ext::ReloadProcMacros>(handlers::handle_proc_macros_reload)
.on_sync_mut::<lsp_ext::RebuildProcMacros>(handlers::handle_proc_macros_rebuild)
.on_sync_mut::<lsp_ext::MemoryUsage>(handlers::handle_memory_usage)
.on_sync_mut::<lsp_ext::ShuffleCrateGraph>(handlers::handle_shuffle_crate_graph)
.on_sync::<lsp_ext::JoinLines>(handlers::handle_join_lines)
@ -904,7 +909,7 @@ impl GlobalState {
if let Some(abs_path) = vfs_path.as_path() {
if reload::should_refresh_for_change(abs_path, ChangeKind::Modify) {
this.fetch_workspaces_queue
.request_op(format!("DidSaveTextDocument {}", abs_path.display()));
.request_op(format!("DidSaveTextDocument {}", abs_path.display()), ());
}
}
@ -980,7 +985,7 @@ impl GlobalState {
config.workspace_roots.extend(added);
if !config.has_linked_projects() && config.detached_files().is_empty() {
config.rediscover_workspaces();
this.fetch_workspaces_queue.request_op("client workspaces changed".to_string())
this.fetch_workspaces_queue.request_op("client workspaces changed".to_string(), ())
}
Ok(())

View file

@ -3,23 +3,23 @@
pub(crate) type Cause = String;
pub(crate) struct OpQueue<Output> {
op_requested: Option<Cause>,
pub(crate) struct OpQueue<Args = (), Output = ()> {
op_requested: Option<(Cause, Args)>,
op_in_progress: bool,
last_op_result: Output,
}
impl<Output: Default> Default for OpQueue<Output> {
impl<Args, Output: Default> Default for OpQueue<Args, Output> {
fn default() -> Self {
Self { op_requested: None, op_in_progress: false, last_op_result: Default::default() }
}
}
impl<Output> OpQueue<Output> {
pub(crate) fn request_op(&mut self, reason: Cause) {
self.op_requested = Some(reason);
impl<Args, Output> OpQueue<Args, Output> {
pub(crate) fn request_op(&mut self, reason: Cause, args: Args) {
self.op_requested = Some((reason, args));
}
pub(crate) fn should_start_op(&mut self) -> Option<Cause> {
pub(crate) fn should_start_op(&mut self) -> Option<(Cause, Args)> {
if self.op_in_progress {
return None;
}

View file

@ -66,6 +66,7 @@ impl GlobalState {
!(self.last_reported_status.is_none()
|| self.fetch_workspaces_queue.op_in_progress()
|| self.fetch_build_data_queue.op_in_progress()
|| self.fetch_proc_macros_queue.op_in_progress()
|| self.vfs_progress_config_version < self.vfs_config_version
|| self.vfs_progress_n_done < self.vfs_progress_n_total)
}
@ -77,7 +78,7 @@ impl GlobalState {
self.analysis_host.update_lru_capacity(self.config.lru_capacity());
}
if self.config.linked_projects() != old_config.linked_projects() {
self.fetch_workspaces_queue.request_op("linked projects changed".to_string())
self.fetch_workspaces_queue.request_op("linked projects changed".to_string(), ())
} else if self.config.flycheck() != old_config.flycheck() {
self.reload_flycheck();
}
@ -101,7 +102,7 @@ impl GlobalState {
if self.proc_macro_changed {
status.health = lsp_ext::Health::Warning;
message.push_str("Reload required due to source changes of a procedural macro.\n\n");
message.push_str("Proc-macros have changed and need to be rebuild.\n\n");
}
if let Err(_) = self.fetch_build_data_error() {
status.health = lsp_ext::Health::Warning;
@ -223,8 +224,8 @@ impl GlobalState {
});
}
pub(crate) fn load_proc_macros(&mut self, paths: Vec<ProcMacroPaths>) {
tracing::info!("will load proc macros");
pub(crate) fn fetch_proc_macros(&mut self, cause: Cause, paths: Vec<ProcMacroPaths>) {
tracing::info!(%cause, "will load proc macros");
let dummy_replacements = self.config.dummy_replacements().clone();
let proc_macro_clients = self.proc_macro_clients.clone();
@ -240,28 +241,30 @@ impl GlobalState {
};
let mut res = FxHashMap::default();
for (client, paths) in proc_macro_clients
let chain = proc_macro_clients
.iter()
.map(|res| res.as_ref().map_err(|e| &**e))
.chain(iter::repeat_with(|| Err("Proc macros are disabled")))
.zip(paths)
{
.chain(iter::repeat_with(|| Err("Proc macros servers are not running")));
for (client, paths) in chain.zip(paths) {
res.extend(paths.into_iter().map(move |(crate_id, res)| {
(
crate_id,
res.and_then(|(crate_name, path)| {
progress(path.display().to_string());
load_proc_macro(
client,
&path,
crate_name
.as_deref()
.and_then(|crate_name| {
dummy_replacements.get(crate_name).map(|v| &**v)
})
.unwrap_or_default(),
)
}),
res.map_or_else(
|| Err("proc macro crate is missing dylib".to_owned()),
|(crate_name, path)| {
progress(path.display().to_string());
load_proc_macro(
client,
&path,
crate_name
.as_deref()
.and_then(|crate_name| {
dummy_replacements.get(crate_name).map(|v| &**v)
})
.unwrap_or_default(),
)
},
),
)
}));
}
@ -443,14 +446,25 @@ impl GlobalState {
(crate_graph, proc_macros)
};
let mut change = Change::new();
if same_workspaces {
if self.config.expand_proc_macros() {
self.fetch_proc_macros_queue.request_op(cause, proc_macro_paths);
}
} else {
// Set up errors for proc-macros upfront that we haven't run build scripts yet
let mut proc_macros = FxHashMap::default();
for paths in proc_macro_paths {
proc_macros.extend(paths.into_iter().map(move |(crate_id, _)| {
(crate_id, Err("crate has not yet been build".to_owned()))
}));
}
change.set_proc_macros(proc_macros);
}
change.set_crate_graph(crate_graph);
self.analysis_host.apply_change(change);
self.process_changes();
if same_workspaces && !self.fetch_workspaces_queue.op_requested() {
self.load_proc_macros(proc_macro_paths);
}
self.reload_flycheck();
tracing::info!("did switch workspaces");