[ty] Add progress reporting to workspace diagnostics (#19616)

This commit is contained in:
Micha Reiser 2025-07-30 12:27:34 +02:00 committed by GitHub
parent 2a5ace6e55
commit 6237ecb4db
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
11 changed files with 450 additions and 79 deletions

View file

@ -18,13 +18,16 @@ use std::panic::{PanicHookInfo, RefUnwindSafe};
use std::sync::Arc;
mod api;
mod lazy_work_done_progress;
mod main_loop;
mod schedule;
use crate::session::client::Client;
pub(crate) use api::Error;
pub(crate) use api::publish_settings_diagnostics;
pub(crate) use main_loop::{Action, ConnectionSender, Event, MainLoopReceiver, MainLoopSender};
pub(crate) use main_loop::{
Action, ConnectionSender, Event, MainLoopReceiver, MainLoopSender, SendRequest,
};
pub(crate) type Result<T> = std::result::Result<T, api::Error>;
pub struct Server {
@ -198,7 +201,9 @@ impl Server {
inter_file_dependencies: true,
// TODO: Dynamically register for workspace diagnostics.
workspace_diagnostics: diagnostic_mode.is_workspace(),
..Default::default()
work_done_progress_options: WorkDoneProgressOptions {
work_done_progress: Some(diagnostic_mode.is_workspace()),
},
})),
text_document_sync: Some(TextDocumentSyncCapability::Options(
TextDocumentSyncOptions {

View file

@ -1,13 +1,3 @@
use std::collections::BTreeMap;
use crate::server::Result;
use crate::server::api::diagnostics::{Diagnostics, to_lsp_diagnostic};
use crate::server::api::traits::{
BackgroundRequestHandler, RequestHandler, RetriableRequestHandler,
};
use crate::session::SessionSnapshot;
use crate::session::client::Client;
use crate::system::file_to_url;
use lsp_types::request::WorkspaceDiagnosticRequest;
use lsp_types::{
FullDocumentDiagnosticReport, UnchangedDocumentDiagnosticReport, Url,
@ -15,6 +5,20 @@ use lsp_types::{
WorkspaceDocumentDiagnosticReport, WorkspaceFullDocumentDiagnosticReport,
WorkspaceUnchangedDocumentDiagnosticReport,
};
use ruff_db::files::File;
use std::collections::BTreeMap;
use std::sync::atomic::{AtomicUsize, Ordering};
use ty_project::ProgressReporter;
use crate::server::Result;
use crate::server::api::diagnostics::{Diagnostics, to_lsp_diagnostic};
use crate::server::api::traits::{
BackgroundRequestHandler, RequestHandler, RetriableRequestHandler,
};
use crate::server::lazy_work_done_progress::LazyWorkDoneProgress;
use crate::session::SessionSnapshot;
use crate::session::client::Client;
use crate::system::file_to_url;
pub(crate) struct WorkspaceDiagnosticRequestHandler;
@ -25,7 +29,7 @@ impl RequestHandler for WorkspaceDiagnosticRequestHandler {
impl BackgroundRequestHandler for WorkspaceDiagnosticRequestHandler {
fn run(
snapshot: SessionSnapshot,
_client: &Client,
client: &Client,
params: WorkspaceDiagnosticParams,
) -> Result<WorkspaceDiagnosticReportResult> {
let index = snapshot.index();
@ -44,10 +48,24 @@ impl BackgroundRequestHandler for WorkspaceDiagnosticRequestHandler {
.map(|prev| (prev.uri, prev.value))
.collect();
// Use the work done progress token from the client request, if provided
// Note: neither VS Code nor Zed currently support this,
// see https://github.com/microsoft/vscode-languageserver-node/issues/528
// That's why we fall back to server-initiated progress if no token is provided.
let work_done_progress = LazyWorkDoneProgress::new(
client,
params.work_done_progress_params.work_done_token,
"Checking",
snapshot.resolved_client_capabilities(),
);
// Collect all diagnostics from all projects with their database references
let mut items = Vec::new();
for db in snapshot.projects() {
let diagnostics = db.check();
let diagnostics = db.check_with_reporter(
&mut WorkspaceDiagnosticsProgressReporter::new(work_done_progress.clone()),
);
// Group diagnostics by URL
let mut diagnostics_by_url: BTreeMap<Url, Vec<_>> = BTreeMap::default();
@ -152,3 +170,55 @@ impl RetriableRequestHandler for WorkspaceDiagnosticRequestHandler {
}
}
}
struct WorkspaceDiagnosticsProgressReporter {
total_files: usize,
checked_files: AtomicUsize,
work_done: LazyWorkDoneProgress,
}
impl WorkspaceDiagnosticsProgressReporter {
fn new(work_done: LazyWorkDoneProgress) -> Self {
Self {
total_files: 0,
checked_files: AtomicUsize::new(0),
work_done,
}
}
fn report_progress(&self) {
let checked = self.checked_files.load(Ordering::Relaxed);
let total = self.total_files;
#[allow(clippy::cast_possible_truncation)]
let percentage = if total > 0 {
Some((checked * 100 / total) as u32)
} else {
None
};
self.work_done
.report_progress(format!("{checked}/{total} files"), percentage);
if checked == total {
self.work_done
.set_finish_message(format!("Checked {total} files"));
}
}
}
impl ProgressReporter for WorkspaceDiagnosticsProgressReporter {
fn set_files(&mut self, files: usize) {
self.total_files += files;
self.report_progress();
}
fn report_file(&self, _file: &File) {
let checked = self.checked_files.fetch_add(1, Ordering::Relaxed) + 1;
if checked % 10 == 0 || checked == self.total_files {
// Report progress every 10 files or when all files are checked
self.report_progress();
}
}
}

View file

@ -0,0 +1,167 @@
use crate::session::ResolvedClientCapabilities;
use crate::session::client::Client;
use lsp_types::request::WorkDoneProgressCreate;
use lsp_types::{
ProgressParams, ProgressParamsValue, ProgressToken, WorkDoneProgress, WorkDoneProgressBegin,
WorkDoneProgressCreateParams, WorkDoneProgressEnd, WorkDoneProgressReport,
};
use std::fmt::Display;
use std::sync::Arc;
use std::sync::atomic::{AtomicUsize, Ordering};
static SERVER_WORK_DONE_TOKENS: AtomicUsize = AtomicUsize::new(0);
/// A [work done progress][work-done-progress] that uses the client provided token if available,
/// but falls back to a server initiated progress if supported by the client.
///
/// The LSP specification supports client and server initiated work done progress reporting:
/// * Client: Many requests have a work done progress token or extend `WorkDoneProgressParams`.
/// For those requests, a server can ask clients to start a work done progress report by
/// setting the work done capability for that request in the server's capabilities during initialize.
/// However, as of today (July 2025), VS code and Zed don't support client initiated work done progress
/// tokens except for the `initialize` request (<https://github.com/microsoft/vscode-languageserver-node/issues/528>).
/// * Server: A server can initiate a work done progress report by sending a `WorkDoneProgressCreate` request
/// with a token, which the client can then use to report progress (except during `initialize`).
///
/// This work done progress supports both clients that provide a work done progress token in their requests
/// and clients that do not. If the client does not provide a token, the server will
/// initiate a work done progress report using a unique string token.
///
/// ## Server Initiated Progress
///
/// The implementation initiates a work done progress report lazily when no token is provided in the request.
/// This creation happens async and the LSP specification requires that a server only
/// sends `$/progress` notifications with that token if the create request was successful (no error):
///
/// > code and message set in case an exception happens during the 'window/workDoneProgress/create' request.
/// > In case an error occurs a server must not send any progress notification
/// > using the token provided in the WorkDoneProgressCreateParams.
///
/// The implementation doesn't block on the server response because it feels unfortunate to delay
/// a client request only so that ty can show a progress bar. Therefore, the progress reporting
/// will not be available immediately.
///
/// [work-done-progress]: https://microsoft.github.io/language-server-protocol/specifications/lsp/3.17/specification/#workDoneProgress
#[derive(Clone)]
pub(super) struct LazyWorkDoneProgress {
inner: Arc<Inner>,
}
impl LazyWorkDoneProgress {
pub(super) fn new(
client: &Client,
request_token: Option<ProgressToken>,
title: &str,
capabilities: ResolvedClientCapabilities,
) -> Self {
if let Some(token) = &request_token {
Self::send_begin(client, token.clone(), title.to_string());
}
let is_server_initiated = request_token.is_none();
let once_token = std::sync::OnceLock::new();
if let Some(token) = request_token {
// SAFETY: The token is guaranteed to be not set yet because we only created it above.
once_token.set(token).unwrap();
}
let work_done = Self {
inner: Arc::new(Inner {
token: once_token,
finish_message: std::sync::Mutex::default(),
client: client.clone(),
}),
};
if is_server_initiated && capabilities.supports_work_done_progress() {
// Use a string token because Zed does not support numeric tokens
let token = ProgressToken::String(format!(
"ty-{}",
SERVER_WORK_DONE_TOKENS.fetch_add(1, Ordering::Relaxed)
));
let work_done = work_done.clone();
let title = title.to_string();
client.send_deferred_request::<WorkDoneProgressCreate>(
WorkDoneProgressCreateParams {
token: token.clone(),
},
move |client, ()| {
Self::send_begin(client, token.clone(), title);
// SAFETY: We only take this branch if `request_token` was `None`
// and we only issue a single request (without retry).
work_done.inner.token.set(token).unwrap();
},
);
}
work_done
}
pub(super) fn set_finish_message(&self, message: String) {
let mut finish_message = self.inner.finish_message.lock().unwrap();
*finish_message = Some(message);
}
fn send_begin(client: &Client, token: ProgressToken, title: String) {
client.send_notification::<lsp_types::notification::Progress>(ProgressParams {
token,
value: ProgressParamsValue::WorkDone(WorkDoneProgress::Begin(WorkDoneProgressBegin {
title,
cancellable: Some(false),
message: None,
percentage: Some(0),
})),
});
}
/// Sends a progress report with the given message and optional percentage.
pub(super) fn report_progress(&self, message: impl Display, percentage: Option<u32>) {
let Some(token) = self.inner.token.get() else {
return;
};
self.inner
.client
.send_notification::<lsp_types::notification::Progress>(ProgressParams {
token: token.clone(),
value: ProgressParamsValue::WorkDone(WorkDoneProgress::Report(
WorkDoneProgressReport {
cancellable: Some(false),
message: Some(message.to_string()),
percentage,
},
)),
});
}
}
struct Inner {
token: std::sync::OnceLock<ProgressToken>,
finish_message: std::sync::Mutex<Option<String>>,
client: Client,
}
impl Drop for Inner {
fn drop(&mut self) {
let Some(token) = self.token.get() else {
return;
};
let finish_message = self
.finish_message
.lock()
.ok()
.and_then(|mut message| message.take());
self.client
.send_notification::<lsp_types::notification::Progress>(ProgressParams {
token: token.clone(),
value: ProgressParamsValue::WorkDone(WorkDoneProgress::End(WorkDoneProgressEnd {
message: finish_message,
})),
});
}
}

View file

@ -1,7 +1,7 @@
use crate::server::schedule::Scheduler;
use crate::server::{Server, api};
use crate::session::ClientOptions;
use crate::session::client::Client;
use crate::session::client::{Client, ClientResponseHandler};
use anyhow::anyhow;
use crossbeam::select;
use lsp_server::Message;
@ -87,7 +87,7 @@ impl Server {
.outgoing_mut()
.complete(&response.id)
{
handler(&client, response);
handler.handle_response(&client, response);
} else {
tracing::error!(
"Received a response with ID {}, which was not expected",
@ -139,6 +139,9 @@ impl Server {
);
}
}
Action::SendRequest(request) => client.send_request_raw(&self.session, request),
Action::InitializeWorkspaces(workspaces_with_options) => {
self.session
.initialize_workspaces(workspaces_with_options, &client);
@ -300,6 +303,9 @@ pub(crate) enum Action {
/// Retry a request that previously failed due to a salsa cancellation.
RetryRequest(lsp_server::Request),
/// Send a request from the server to the client.
SendRequest(SendRequest),
/// Initialize the workspace after the server received
/// the options from the client.
InitializeWorkspaces(Vec<(Url, ClientOptions)>),
@ -312,3 +318,18 @@ pub(crate) enum Event {
Action(Action),
}
pub(crate) struct SendRequest {
pub(crate) method: String,
pub(crate) params: serde_json::Value,
pub(crate) response_handler: ClientResponseHandler,
}
impl std::fmt::Debug for SendRequest {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("SendRequest")
.field("method", &self.method)
.field("params", &self.params)
.finish_non_exhaustive()
}
}

View file

@ -1,10 +1,5 @@
//! Data model, state management, and configuration resolution.
use std::collections::{BTreeMap, VecDeque};
use std::ops::{Deref, DerefMut};
use std::panic::RefUnwindSafe;
use std::sync::Arc;
use anyhow::{Context, anyhow};
use index::DocumentQueryError;
use lsp_server::Message;
@ -15,6 +10,10 @@ use options::GlobalOptions;
use ruff_db::Db;
use ruff_db::files::File;
use ruff_db::system::{System, SystemPath, SystemPathBuf};
use std::collections::{BTreeMap, VecDeque};
use std::ops::{Deref, DerefMut};
use std::panic::RefUnwindSafe;
use std::sync::Arc;
use ty_project::metadata::Options;
use ty_project::watch::ChangeEvent;
use ty_project::{ChangeResult, Db as _, ProjectDatabase, ProjectMetadata};
@ -339,7 +338,6 @@ impl Session {
client: &Client,
) {
assert!(!self.workspaces.all_initialized());
for (url, options) in workspace_settings {
tracing::debug!("Initializing workspace `{url}`");
@ -453,6 +451,7 @@ impl Session {
.collect(),
index: self.index.clone().unwrap(),
position_encoding: self.position_encoding,
resolved_client_capabilities: self.resolved_client_capabilities,
}
}
@ -643,6 +642,7 @@ pub(crate) struct SessionSnapshot {
projects: Vec<ProjectDatabase>,
index: Arc<Index>,
position_encoding: PositionEncoding,
resolved_client_capabilities: ResolvedClientCapabilities,
}
impl SessionSnapshot {
@ -657,6 +657,10 @@ impl SessionSnapshot {
pub(crate) fn position_encoding(&self) -> PositionEncoding {
self.position_encoding
}
pub(crate) fn resolved_client_capabilities(&self) -> ResolvedClientCapabilities {
self.resolved_client_capabilities
}
}
#[derive(Debug, Default)]

View file

@ -17,6 +17,7 @@ bitflags::bitflags! {
const SIGNATURE_LABEL_OFFSET_SUPPORT = 1 << 8;
const SIGNATURE_ACTIVE_PARAMETER_SUPPORT = 1 << 9;
const HIERARCHICAL_DOCUMENT_SYMBOL_SUPPORT = 1 << 10;
const WORK_DONE_PROGRESS = 1 << 11;
}
}
@ -76,6 +77,11 @@ impl ResolvedClientCapabilities {
self.contains(Self::HIERARCHICAL_DOCUMENT_SYMBOL_SUPPORT)
}
/// Returns `true` if the client supports work done progress.
pub(crate) const fn supports_work_done_progress(self) -> bool {
self.contains(Self::WORK_DONE_PROGRESS)
}
pub(super) fn new(client_capabilities: &ClientCapabilities) -> Self {
let mut flags = Self::empty();
@ -191,6 +197,15 @@ impl ResolvedClientCapabilities {
flags |= Self::HIERARCHICAL_DOCUMENT_SYMBOL_SUPPORT;
}
if client_capabilities
.window
.as_ref()
.and_then(|window| window.work_done_progress)
.unwrap_or_default()
{
flags |= Self::WORK_DONE_PROGRESS;
}
flags
}
}

View file

@ -1,14 +1,12 @@
use crate::Session;
use crate::server::{Action, ConnectionSender};
use crate::server::{Action, ConnectionSender, SendRequest};
use crate::server::{Event, MainLoopSender};
use lsp_server::{ErrorCode, Message, Notification, RequestId, ResponseError};
use serde_json::Value;
use std::any::TypeId;
use std::fmt::Display;
pub(crate) type ClientResponseHandler = Box<dyn FnOnce(&Client, lsp_server::Response) + Send>;
#[derive(Debug)]
#[derive(Debug, Clone)]
pub(crate) struct Client {
/// Channel to send messages back to the main loop.
main_loop_sender: MainLoopSender,
@ -33,12 +31,9 @@ impl Client {
/// The `response_handler` will be dispatched as soon as the client response
/// is processed on the main-loop. The handler always runs on the main-loop thread.
///
/// # Note
/// This method takes a `session` so that we can register the pending-request
/// and send the response directly to the client. If this ever becomes too limiting (because we
/// need to send a request from somewhere where we don't have access to session), consider introducing
/// a new `send_deferred_request` method that doesn't take a session and instead sends
/// an `Action` to the main loop to send the request (the main loop has always access to session).
/// Use [`self.send_deferred_request`] if you are in a background task
/// where you don't have access to the session. But note, that the
/// request won't be send immediately, but rather queued up in the main loop
pub(crate) fn send_request<R>(
&self,
session: &Session,
@ -47,63 +42,56 @@ impl Client {
) where
R: lsp_types::request::Request,
{
let response_handler = Box::new(move |client: &Client, response: lsp_server::Response| {
let _span =
tracing::debug_span!("client_response", id=%response.id, method = R::METHOD)
.entered();
self.send_request_raw(
session,
SendRequest {
method: R::METHOD.to_string(),
params: serde_json::to_value(params).expect("Params to be serializable"),
response_handler: ClientResponseHandler::for_request::<R>(response_handler),
},
);
}
match (response.error, response.result) {
(Some(err), _) => {
tracing::error!(
"Got an error from the client (code {code}, method {method}): {message}",
code = err.code,
message = err.message,
method = R::METHOD
);
}
(None, Some(response)) => match serde_json::from_value(response) {
Ok(response) => response_handler(client, response),
Err(error) => {
tracing::error!(
"Failed to deserialize client response (method={method}): {error}",
method = R::METHOD
);
}
},
(None, None) => {
if TypeId::of::<R::Result>() == TypeId::of::<()>() {
// We can't call `response_handler(())` directly here, but
// since we _know_ the type expected is `()`, we can use
// `from_value(Value::Null)`. `R::Result` implements `DeserializeOwned`,
// so this branch works in the general case but we'll only
// hit it if the concrete type is `()`, so the `unwrap()` is safe here.
response_handler(client, serde_json::from_value(Value::Null).unwrap());
} else {
tracing::error!(
"Invalid client response: did not contain a result or error (method={method})",
method = R::METHOD
);
}
}
}
});
/// Sends a request of kind `R` to the client, with associated parameters.
///
/// The request isn't sent immediately, but rather queued up in the main loop.
/// The `response_handler` will be dispatched as soon as the client response
/// is processed on the main-loop. The handler always runs on the main-loop thread.
///
/// Use [`self.send_request`] if you are in a foreground task and have access to the session.
pub(crate) fn send_deferred_request<R>(
&self,
params: R::Params,
response_handler: impl FnOnce(&Client, R::Result) + Send + 'static,
) where
R: lsp_types::request::Request,
{
self.main_loop_sender
.send(Event::Action(Action::SendRequest(SendRequest {
method: R::METHOD.to_string(),
params: serde_json::to_value(params).expect("Params to be serializable"),
response_handler: ClientResponseHandler::for_request::<R>(response_handler),
})))
.unwrap();
}
pub(crate) fn send_request_raw(&self, session: &Session, request: SendRequest) {
let id = session
.request_queue()
.outgoing()
.register(response_handler);
.register(request.response_handler);
if let Err(err) = self
.client_sender
.send(Message::Request(lsp_server::Request {
id,
method: R::METHOD.to_string(),
params: serde_json::to_value(params).expect("Params to be serializable"),
method: request.method.clone(),
params: request.params,
}))
{
tracing::error!(
"Failed to send request `{}` because the client sender is closed: {err}",
R::METHOD
request.method
);
}
}
@ -250,3 +238,62 @@ impl Client {
}
}
}
/// Type erased handler for client responses.
#[allow(clippy::type_complexity)]
pub(crate) struct ClientResponseHandler(Box<dyn FnOnce(&Client, lsp_server::Response) + Send>);
impl ClientResponseHandler {
fn for_request<R>(response_handler: impl FnOnce(&Client, R::Result) + Send + 'static) -> Self
where
R: lsp_types::request::Request,
{
Self(Box::new(
move |client: &Client, response: lsp_server::Response| {
let _span =
tracing::debug_span!("client_response", id=%response.id, method = R::METHOD)
.entered();
match (response.error, response.result) {
(Some(err), _) => {
tracing::error!(
"Got an error from the client (code {code}, method {method}): {message}",
code = err.code,
message = &err.message,
method = R::METHOD
);
}
(None, Some(response)) => match serde_json::from_value(response) {
Ok(response) => response_handler(client, response),
Err(error) => {
tracing::error!(
"Failed to deserialize client response (method={method}): {error}",
method = R::METHOD
);
}
},
(None, None) => {
if TypeId::of::<R::Result>() == TypeId::of::<()>() {
// We can't call `response_handler(())` directly here, but
// since we _know_ the type expected is `()`, we can use
// `from_value(Value::Null)`. `R::Result` implements `DeserializeOwned`,
// so this branch works in the general case but we'll only
// hit it if the concrete type is `()`, so the `unwrap()` is safe here.
response_handler(client, serde_json::from_value(Value::Null).unwrap());
} else {
tracing::error!(
"Invalid client response: did not contain a result or error (method={method})",
method = R::METHOD
);
}
}
}
},
))
}
pub(crate) fn handle_response(self, client: &Client, response: lsp_server::Response) {
let handler = self.0;
handler(client, response);
}
}