internal: Give rustfmt jobs a separate thread

This commit is contained in:
Lukas Wirth 2023-06-11 19:56:24 +02:00
parent b7497fcdfa
commit 52bb94d697
4 changed files with 54 additions and 29 deletions

View file

@ -135,7 +135,7 @@ impl<'a> RequestDispatcher<'a> {
R::Params: DeserializeOwned + panic::UnwindSafe + Send + fmt::Debug, R::Params: DeserializeOwned + panic::UnwindSafe + Send + fmt::Debug,
R::Result: Serialize, R::Result: Serialize,
{ {
self.on_with_thread_intent::<R>(ThreadIntent::Worker, f) self.on_with_thread_intent::<true, R>(ThreadIntent::Worker, f)
} }
/// Dispatches a latency-sensitive request onto the thread pool. /// Dispatches a latency-sensitive request onto the thread pool.
@ -148,7 +148,22 @@ impl<'a> RequestDispatcher<'a> {
R::Params: DeserializeOwned + panic::UnwindSafe + Send + fmt::Debug, R::Params: DeserializeOwned + panic::UnwindSafe + Send + fmt::Debug,
R::Result: Serialize, R::Result: Serialize,
{ {
self.on_with_thread_intent::<R>(ThreadIntent::LatencySensitive, f) self.on_with_thread_intent::<true, R>(ThreadIntent::LatencySensitive, f)
}
/// Formatting requests should never block on waiting a for task thread to open up, editors will wait
/// on the response and a late formatting update might mess with the document and user.
/// We can't run this on the main thread though as we invoke rustfmt which may take arbitrary time to complete!
pub(crate) fn on_fmt_thread<R>(
&mut self,
f: fn(GlobalStateSnapshot, R::Params) -> Result<R::Result>,
) -> &mut Self
where
R: lsp_types::request::Request + 'static,
R::Params: DeserializeOwned + panic::UnwindSafe + Send + fmt::Debug,
R::Result: Serialize,
{
self.on_with_thread_intent::<false, R>(ThreadIntent::LatencySensitive, f)
} }
pub(crate) fn finish(&mut self) { pub(crate) fn finish(&mut self) {
@ -163,7 +178,7 @@ impl<'a> RequestDispatcher<'a> {
} }
} }
fn on_with_thread_intent<R>( fn on_with_thread_intent<const MAIN_POOL: bool, R>(
&mut self, &mut self,
intent: ThreadIntent, intent: ThreadIntent,
f: fn(GlobalStateSnapshot, R::Params) -> Result<R::Result>, f: fn(GlobalStateSnapshot, R::Params) -> Result<R::Result>,
@ -178,17 +193,20 @@ impl<'a> RequestDispatcher<'a> {
None => return self, None => return self,
}; };
self.global_state.task_pool.handle.spawn(intent, { let world = self.global_state.snapshot();
let world = self.global_state.snapshot(); if MAIN_POOL {
move || { &mut self.global_state.task_pool.handle
let result = panic::catch_unwind(move || { } else {
let _pctx = stdx::panic_context::enter(panic_context); &mut self.global_state.fmt_pool.handle
f(world, params) }
}); .spawn(intent, move || {
match thread_result_to_response::<R>(req.id.clone(), result) { let result = panic::catch_unwind(move || {
Ok(response) => Task::Response(response), let _pctx = stdx::panic_context::enter(panic_context);
Err(_) => Task::Retry(req), f(world, params)
} });
match thread_result_to_response::<R>(req.id.clone(), result) {
Ok(response) => Task::Response(response),
Err(_) => Task::Retry(req),
} }
}); });

View file

@ -54,6 +54,7 @@ pub(crate) struct GlobalState {
req_queue: ReqQueue, req_queue: ReqQueue,
pub(crate) task_pool: Handle<TaskPool<Task>, Receiver<Task>>, pub(crate) task_pool: Handle<TaskPool<Task>, Receiver<Task>>,
pub(crate) fmt_pool: Handle<TaskPool<Task>, Receiver<Task>>,
pub(crate) config: Arc<Config>, pub(crate) config: Arc<Config>,
pub(crate) config_errors: Option<ConfigError>, pub(crate) config_errors: Option<ConfigError>,
@ -151,6 +152,11 @@ impl GlobalState {
let handle = TaskPool::new_with_threads(sender, config.main_loop_num_threads()); let handle = TaskPool::new_with_threads(sender, config.main_loop_num_threads());
Handle { handle, receiver } Handle { handle, receiver }
}; };
let fmt_pool = {
let (sender, receiver) = unbounded();
let handle = TaskPool::new_with_threads(sender, 1);
Handle { handle, receiver }
};
let mut analysis_host = AnalysisHost::new(config.lru_parse_query_capacity()); let mut analysis_host = AnalysisHost::new(config.lru_parse_query_capacity());
if let Some(capacities) = config.lru_query_capacities() { if let Some(capacities) = config.lru_query_capacities() {
@ -161,6 +167,7 @@ impl GlobalState {
sender, sender,
req_queue: ReqQueue::default(), req_queue: ReqQueue::default(),
task_pool, task_pool,
fmt_pool,
loader, loader,
config: Arc::new(config.clone()), config: Arc::new(config.clone()),
analysis_host, analysis_host,

View file

@ -18,12 +18,11 @@ use lsp_server::ErrorCode;
use lsp_types::{ use lsp_types::{
CallHierarchyIncomingCall, CallHierarchyIncomingCallsParams, CallHierarchyItem, CallHierarchyIncomingCall, CallHierarchyIncomingCallsParams, CallHierarchyItem,
CallHierarchyOutgoingCall, CallHierarchyOutgoingCallsParams, CallHierarchyPrepareParams, CallHierarchyOutgoingCall, CallHierarchyOutgoingCallsParams, CallHierarchyPrepareParams,
CodeLens, CompletionItem, DocumentFormattingParams, FoldingRange, FoldingRangeParams, CodeLens, CompletionItem, FoldingRange, FoldingRangeParams, HoverContents, InlayHint,
HoverContents, InlayHint, InlayHintParams, Location, LocationLink, Position, InlayHintParams, Location, LocationLink, Position, PrepareRenameResponse, Range, RenameParams,
PrepareRenameResponse, Range, RenameParams, SemanticTokensDeltaParams, SemanticTokensDeltaParams, SemanticTokensFullDeltaResult, SemanticTokensParams,
SemanticTokensFullDeltaResult, SemanticTokensParams, SemanticTokensRangeParams, SemanticTokensRangeParams, SemanticTokensRangeResult, SemanticTokensResult, SymbolInformation,
SemanticTokensRangeResult, SemanticTokensResult, SymbolInformation, SymbolTag, SymbolTag, TextDocumentIdentifier, Url, WorkspaceEdit,
TextDocumentIdentifier, Url, WorkspaceEdit,
}; };
use project_model::{ManifestPath, ProjectWorkspace, TargetKind}; use project_model::{ManifestPath, ProjectWorkspace, TargetKind};
use serde_json::json; use serde_json::json;
@ -1077,7 +1076,7 @@ pub(crate) fn handle_references(
pub(crate) fn handle_formatting( pub(crate) fn handle_formatting(
snap: GlobalStateSnapshot, snap: GlobalStateSnapshot,
params: DocumentFormattingParams, params: lsp_types::DocumentFormattingParams,
) -> Result<Option<Vec<lsp_types::TextEdit>>> { ) -> Result<Option<Vec<lsp_types::TextEdit>>> {
let _p = profile::span("handle_formatting"); let _p = profile::span("handle_formatting");

View file

@ -175,6 +175,9 @@ impl GlobalState {
msg.ok().map(Event::Lsp), msg.ok().map(Event::Lsp),
recv(self.task_pool.receiver) -> task => recv(self.task_pool.receiver) -> task =>
Some(Event::Task(task.unwrap())),
recv(self.fmt_pool.receiver) -> task =>
Some(Event::Task(task.unwrap())), Some(Event::Task(task.unwrap())),
recv(self.loader.receiver) -> task => recv(self.loader.receiver) -> task =>
@ -678,6 +681,12 @@ impl GlobalState {
.on_sync::<lsp_types::request::SelectionRangeRequest>(handlers::handle_selection_range) .on_sync::<lsp_types::request::SelectionRangeRequest>(handlers::handle_selection_range)
.on_sync::<lsp_ext::MatchingBrace>(handlers::handle_matching_brace) .on_sync::<lsp_ext::MatchingBrace>(handlers::handle_matching_brace)
.on_sync::<lsp_ext::OnTypeFormatting>(handlers::handle_on_type_formatting) .on_sync::<lsp_ext::OnTypeFormatting>(handlers::handle_on_type_formatting)
// Formatting should be done immediately as the editor might wait on it, but we can't
// put it on the main thread as we do not want the main thread to block on rustfmt.
// So we have an extra thread just for formatting requests to make sure it gets handled
// as fast as possible.
.on_fmt_thread::<lsp_types::request::Formatting>(handlers::handle_formatting)
.on_fmt_thread::<lsp_types::request::RangeFormatting>(handlers::handle_range_formatting)
// We cant run latency-sensitive request handlers which do semantic // We cant run latency-sensitive request handlers which do semantic
// analysis on the main thread because that would block other // analysis on the main thread because that would block other
// requests. Instead, we run these request handlers on higher priority // requests. Instead, we run these request handlers on higher priority
@ -695,14 +704,6 @@ impl GlobalState {
.on_latency_sensitive::<lsp_types::request::SemanticTokensRangeRequest>( .on_latency_sensitive::<lsp_types::request::SemanticTokensRangeRequest>(
handlers::handle_semantic_tokens_range, handlers::handle_semantic_tokens_range,
) )
// Formatting is not caused by the user typing,
// but it does qualify as latency-sensitive
// because a delay before formatting is applied
// can be confusing for the user.
.on_latency_sensitive::<lsp_types::request::Formatting>(handlers::handle_formatting)
.on_latency_sensitive::<lsp_types::request::RangeFormatting>(
handlers::handle_range_formatting,
)
// All other request handlers // All other request handlers
.on::<lsp_ext::FetchDependencyList>(handlers::fetch_dependency_list) .on::<lsp_ext::FetchDependencyList>(handlers::fetch_dependency_list)
.on::<lsp_ext::AnalyzerStatus>(handlers::handle_analyzer_status) .on::<lsp_ext::AnalyzerStatus>(handlers::handle_analyzer_status)