mirror of
https://github.com/astral-sh/ruff.git
synced 2025-10-01 06:11:43 +00:00
[red-knot] Implement basic LSP server (#12624)
## Summary This PR adds basic LSP implementation for the Red Knot project. This is basically a fork of the existing `ruff_server` crate into a `red_knot_server` crate. The following are the main differences: 1. The `Session` stores a map from workspace root to the corresponding Red Knot database (`RootDatabase`). 2. The database is initialized with the newly implemented `LSPSystem` (implementation of `System` trait) 3. The `LSPSystem` contains the server index corresponding to each workspace and an underlying OS system implementation. For certain methods, the system first checks if there's an open document in LSP system and returns the information from that. Otherwise, it falls back to the OS system to get that information. These methods are `path_metadata`, `read_to_string` and `read_to_notebook` 4. Add `as_any_mut` method for `System` **Why fork?** Forking allows us to experiment with the functionalities that are specific to Red Knot. The architecture is completely different and so the requirements for an LSP implementation are different as well. For example, Red Knot only supports a single workspace, so the LSP system needs to map the multi-workspace support to each Red Knot instance. In the end, the server code isn't too big, it will be easier to implement Red Knot specific functionality without worrying about existing server limitations and it shouldn't be difficult to port the existing server. ## Review Most of the server files hasn't been changed. I'm going to list down the files that have been changed along with highlight the specific part of the file that's changed from the existing server code. Changed files: * Red Knot CLI implementation: https://github.com/astral-sh/ruff/pull/12624/files#diff-579596339a29d3212a641232e674778c339b446de33b890c7fdad905b5eb50e1 * In https://github.com/astral-sh/ruff/pull/12624/files#diff-b9a9041a8a2bace014bf3687c3ef0512f25e0541f112fad6131b14242f408db6, server capabilities have been updated, dynamic capability registration is removed * In https://github.com/astral-sh/ruff/pull/12624/files#diff-b9a9041a8a2bace014bf3687c3ef0512f25e0541f112fad6131b14242f408db6, the API for `clear_diagnostics` now take in a `Url` instead of `DocumentQuery` as the document version doesn't matter when clearing diagnostics after a document is closed * [`did_close`](https://github.com/astral-sh/ruff/pull/12624/files#diff-9271370102a6f3be8defaca40c82485b0048731942520b491a3bdd2ee0e25493), [`did_close_notebook`](https://github.com/astral-sh/ruff/pull/12624/files#diff-96fb53ffb12c1694356e17313e4bb37b3f0931e887878b5d7c896c19ff60283b), [`did_open`](https://github.com/astral-sh/ruff/pull/12624/files#diff-60e852cf1aa771e993131cabf98eb4c467963a8328f10eccdb43b3e8f0f1fb12), [`did_open_notebook`](https://github.com/astral-sh/ruff/pull/12624/files#diff-ac356eb5e36c3b2c1c135eda9dfbcab5c12574d1cb77c71f7da8dbcfcfb2d2f1) are updated to open / close file from the corresponding Red Knot workspace * The [diagnostic handler](https://github.com/astral-sh/ruff/pull/12624/files#diff-4475f318fd0290d0292834569a7df5699debdcc0a453b411b8c3d329f1b879d9) is updated to request diagnostics from Red Knot * The [`Session::new`] method in https://github.com/astral-sh/ruff/pull/12624/files#diff-55c96201296200c1cab37c8b0407b6c733381374b94be7ae50563bfe95264e4d is updated to construct the Red Knot databases for each workspace. It also contains the `index_mut` and `MutIndexGuard` implementation * And, `LSPSystem` implementation is in https://github.com/astral-sh/ruff/pull/12624/files#diff-4ed62bd359c43b0bf1a13f04349dcd954966934bb8d544de7813f974182b489e ## Test Plan First, configure VS Code to use the `red_knot` binary 1. Build the `red_knot` binary by `cargo build` 2. Update the VS Code extension to specify the path to this binary ```json { "ruff.path": ["/path/to/ruff/target/debug/red_knot"] } ``` 3. Restart VS Code Now, open a file containing red-knot specific diagnostics, close the file and validate that diagnostics disappear.
This commit is contained in:
parent
d2c627efb3
commit
e91a0fe94a
44 changed files with 3912 additions and 1 deletions
27
Cargo.lock
generated
27
Cargo.lock
generated
|
@ -1879,6 +1879,7 @@ dependencies = [
|
||||||
"filetime",
|
"filetime",
|
||||||
"rayon",
|
"rayon",
|
||||||
"red_knot_module_resolver",
|
"red_knot_module_resolver",
|
||||||
|
"red_knot_server",
|
||||||
"red_knot_workspace",
|
"red_knot_workspace",
|
||||||
"ruff_db",
|
"ruff_db",
|
||||||
"salsa",
|
"salsa",
|
||||||
|
@ -1928,6 +1929,32 @@ dependencies = [
|
||||||
"tracing",
|
"tracing",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "red_knot_server"
|
||||||
|
version = "0.0.0"
|
||||||
|
dependencies = [
|
||||||
|
"anyhow",
|
||||||
|
"crossbeam",
|
||||||
|
"jod-thread",
|
||||||
|
"libc",
|
||||||
|
"lsp-server",
|
||||||
|
"lsp-types",
|
||||||
|
"red_knot_workspace",
|
||||||
|
"ruff_db",
|
||||||
|
"ruff_linter",
|
||||||
|
"ruff_notebook",
|
||||||
|
"ruff_python_ast",
|
||||||
|
"ruff_source_file",
|
||||||
|
"ruff_text_size",
|
||||||
|
"rustc-hash 2.0.0",
|
||||||
|
"salsa",
|
||||||
|
"serde",
|
||||||
|
"serde_json",
|
||||||
|
"shellexpand",
|
||||||
|
"tracing",
|
||||||
|
"tracing-subscriber",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "red_knot_wasm"
|
name = "red_knot_wasm"
|
||||||
version = "0.0.0"
|
version = "0.0.0"
|
||||||
|
|
|
@ -37,6 +37,7 @@ ruff_workspace = { path = "crates/ruff_workspace" }
|
||||||
|
|
||||||
red_knot_module_resolver = { path = "crates/red_knot_module_resolver" }
|
red_knot_module_resolver = { path = "crates/red_knot_module_resolver" }
|
||||||
red_knot_python_semantic = { path = "crates/red_knot_python_semantic" }
|
red_knot_python_semantic = { path = "crates/red_knot_python_semantic" }
|
||||||
|
red_knot_server = { path = "crates/red_knot_server" }
|
||||||
red_knot_workspace = { path = "crates/red_knot_workspace" }
|
red_knot_workspace = { path = "crates/red_knot_workspace" }
|
||||||
|
|
||||||
aho-corasick = { version = "1.1.3" }
|
aho-corasick = { version = "1.1.3" }
|
||||||
|
|
|
@ -14,6 +14,7 @@ license.workspace = true
|
||||||
[dependencies]
|
[dependencies]
|
||||||
red_knot_module_resolver = { workspace = true }
|
red_knot_module_resolver = { workspace = true }
|
||||||
red_knot_workspace = { workspace = true }
|
red_knot_workspace = { workspace = true }
|
||||||
|
red_knot_server = { workspace = true }
|
||||||
|
|
||||||
ruff_db = { workspace = true, features = ["os", "cache"] }
|
ruff_db = { workspace = true, features = ["os", "cache"] }
|
||||||
|
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
use std::num::NonZeroUsize;
|
||||||
use std::sync::Mutex;
|
use std::sync::Mutex;
|
||||||
|
|
||||||
use clap::Parser;
|
use clap::Parser;
|
||||||
|
@ -29,6 +30,9 @@ mod cli;
|
||||||
)]
|
)]
|
||||||
#[command(version)]
|
#[command(version)]
|
||||||
struct Args {
|
struct Args {
|
||||||
|
#[command(subcommand)]
|
||||||
|
pub(crate) command: Command,
|
||||||
|
|
||||||
#[arg(
|
#[arg(
|
||||||
long,
|
long,
|
||||||
help = "Changes the current working directory.",
|
help = "Changes the current working directory.",
|
||||||
|
@ -65,6 +69,11 @@ struct Args {
|
||||||
watch: bool,
|
watch: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, clap::Subcommand)]
|
||||||
|
pub enum Command {
|
||||||
|
Server,
|
||||||
|
}
|
||||||
|
|
||||||
#[allow(
|
#[allow(
|
||||||
clippy::print_stdout,
|
clippy::print_stdout,
|
||||||
clippy::unnecessary_wraps,
|
clippy::unnecessary_wraps,
|
||||||
|
@ -73,6 +82,7 @@ struct Args {
|
||||||
)]
|
)]
|
||||||
pub fn main() -> anyhow::Result<()> {
|
pub fn main() -> anyhow::Result<()> {
|
||||||
let Args {
|
let Args {
|
||||||
|
command,
|
||||||
current_directory,
|
current_directory,
|
||||||
custom_typeshed_dir,
|
custom_typeshed_dir,
|
||||||
extra_search_path: extra_paths,
|
extra_search_path: extra_paths,
|
||||||
|
@ -83,6 +93,18 @@ pub fn main() -> anyhow::Result<()> {
|
||||||
|
|
||||||
let verbosity = verbosity.level();
|
let verbosity = verbosity.level();
|
||||||
countme::enable(verbosity == Some(VerbosityLevel::Trace));
|
countme::enable(verbosity == Some(VerbosityLevel::Trace));
|
||||||
|
|
||||||
|
if matches!(command, Command::Server) {
|
||||||
|
let four = NonZeroUsize::new(4).unwrap();
|
||||||
|
|
||||||
|
// by default, we set the number of worker threads to `num_cpus`, with a maximum of 4.
|
||||||
|
let worker_threads = std::thread::available_parallelism()
|
||||||
|
.unwrap_or(four)
|
||||||
|
.max(four);
|
||||||
|
|
||||||
|
return red_knot_server::Server::new(worker_threads)?.run();
|
||||||
|
}
|
||||||
|
|
||||||
setup_tracing(verbosity);
|
setup_tracing(verbosity);
|
||||||
|
|
||||||
let cwd = if let Some(cwd) = current_directory {
|
let cwd = if let Some(cwd) = current_directory {
|
||||||
|
|
|
@ -74,6 +74,10 @@ pub(crate) mod tests {
|
||||||
&self.system
|
&self.system
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn system_mut(&mut self) -> &mut dyn ruff_db::system::System {
|
||||||
|
&mut self.system
|
||||||
|
}
|
||||||
|
|
||||||
fn files(&self) -> &Files {
|
fn files(&self) -> &Files {
|
||||||
&self.files
|
&self.files
|
||||||
}
|
}
|
||||||
|
|
|
@ -77,6 +77,10 @@ pub(crate) mod tests {
|
||||||
&self.system
|
&self.system
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn system_mut(&mut self) -> &mut dyn System {
|
||||||
|
&mut self.system
|
||||||
|
}
|
||||||
|
|
||||||
fn files(&self) -> &Files {
|
fn files(&self) -> &Files {
|
||||||
&self.files
|
&self.files
|
||||||
}
|
}
|
||||||
|
|
41
crates/red_knot_server/Cargo.toml
Normal file
41
crates/red_knot_server/Cargo.toml
Normal file
|
@ -0,0 +1,41 @@
|
||||||
|
[package]
|
||||||
|
name = "red_knot_server"
|
||||||
|
version = "0.0.0"
|
||||||
|
publish = false
|
||||||
|
authors = { workspace = true }
|
||||||
|
edition = { workspace = true }
|
||||||
|
rust-version = { workspace = true }
|
||||||
|
homepage = { workspace = true }
|
||||||
|
documentation = { workspace = true }
|
||||||
|
repository = { workspace = true }
|
||||||
|
license = { workspace = true }
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
red_knot_workspace = { workspace = true }
|
||||||
|
ruff_db = { workspace = true }
|
||||||
|
ruff_linter = { workspace = true }
|
||||||
|
ruff_notebook = { workspace = true }
|
||||||
|
ruff_python_ast = { workspace = true }
|
||||||
|
ruff_source_file = { workspace = true }
|
||||||
|
ruff_text_size = { workspace = true }
|
||||||
|
|
||||||
|
anyhow = { workspace = true }
|
||||||
|
crossbeam = { workspace = true }
|
||||||
|
jod-thread = { workspace = true }
|
||||||
|
lsp-server = { workspace = true }
|
||||||
|
lsp-types = { workspace = true }
|
||||||
|
rustc-hash = { workspace = true }
|
||||||
|
salsa = { workspace = true }
|
||||||
|
serde = { workspace = true }
|
||||||
|
serde_json = { workspace = true }
|
||||||
|
shellexpand = { workspace = true }
|
||||||
|
tracing = { workspace = true }
|
||||||
|
tracing-subscriber = { workspace = true }
|
||||||
|
|
||||||
|
[dev-dependencies]
|
||||||
|
|
||||||
|
[target.'cfg(target_vendor = "apple")'.dependencies]
|
||||||
|
libc = { workspace = true }
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
80
crates/red_knot_server/src/edit.rs
Normal file
80
crates/red_knot_server/src/edit.rs
Normal file
|
@ -0,0 +1,80 @@
|
||||||
|
//! Types and utilities for working with text, modifying source files, and `Ruff <-> LSP` type conversion.
|
||||||
|
|
||||||
|
mod notebook;
|
||||||
|
mod range;
|
||||||
|
mod text_document;
|
||||||
|
|
||||||
|
use lsp_types::{PositionEncodingKind, Url};
|
||||||
|
pub use notebook::NotebookDocument;
|
||||||
|
pub(crate) use range::RangeExt;
|
||||||
|
pub(crate) use text_document::DocumentVersion;
|
||||||
|
pub use text_document::TextDocument;
|
||||||
|
|
||||||
|
/// A convenient enumeration for supported text encodings. Can be converted to [`lsp_types::PositionEncodingKind`].
|
||||||
|
// Please maintain the order from least to greatest priority for the derived `Ord` impl.
|
||||||
|
#[derive(Default, Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)]
|
||||||
|
pub enum PositionEncoding {
|
||||||
|
/// UTF 16 is the encoding supported by all LSP clients.
|
||||||
|
#[default]
|
||||||
|
UTF16,
|
||||||
|
|
||||||
|
/// Second choice because UTF32 uses a fixed 4 byte encoding for each character (makes conversion relatively easy)
|
||||||
|
UTF32,
|
||||||
|
|
||||||
|
/// Ruff's preferred encoding
|
||||||
|
UTF8,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A unique document ID, derived from a URL passed as part of an LSP request.
|
||||||
|
/// This document ID can point to either be a standalone Python file, a full notebook, or a cell within a notebook.
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub enum DocumentKey {
|
||||||
|
Notebook(Url),
|
||||||
|
NotebookCell(Url),
|
||||||
|
Text(Url),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DocumentKey {
|
||||||
|
/// Returns the URL associated with the key.
|
||||||
|
pub(crate) fn url(&self) -> &Url {
|
||||||
|
match self {
|
||||||
|
DocumentKey::NotebookCell(url)
|
||||||
|
| DocumentKey::Notebook(url)
|
||||||
|
| DocumentKey::Text(url) => url,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::fmt::Display for DocumentKey {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
match self {
|
||||||
|
Self::NotebookCell(url) | Self::Notebook(url) | Self::Text(url) => url.fmt(f),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<PositionEncoding> for lsp_types::PositionEncodingKind {
|
||||||
|
fn from(value: PositionEncoding) -> Self {
|
||||||
|
match value {
|
||||||
|
PositionEncoding::UTF8 => lsp_types::PositionEncodingKind::UTF8,
|
||||||
|
PositionEncoding::UTF16 => lsp_types::PositionEncodingKind::UTF16,
|
||||||
|
PositionEncoding::UTF32 => lsp_types::PositionEncodingKind::UTF32,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TryFrom<&lsp_types::PositionEncodingKind> for PositionEncoding {
|
||||||
|
type Error = ();
|
||||||
|
|
||||||
|
fn try_from(value: &PositionEncodingKind) -> Result<Self, Self::Error> {
|
||||||
|
Ok(if value == &PositionEncodingKind::UTF8 {
|
||||||
|
PositionEncoding::UTF8
|
||||||
|
} else if value == &PositionEncodingKind::UTF16 {
|
||||||
|
PositionEncoding::UTF16
|
||||||
|
} else if value == &PositionEncodingKind::UTF32 {
|
||||||
|
PositionEncoding::UTF32
|
||||||
|
} else {
|
||||||
|
return Err(());
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
239
crates/red_knot_server/src/edit/notebook.rs
Normal file
239
crates/red_knot_server/src/edit/notebook.rs
Normal file
|
@ -0,0 +1,239 @@
|
||||||
|
use anyhow::Ok;
|
||||||
|
use lsp_types::NotebookCellKind;
|
||||||
|
use rustc_hash::{FxBuildHasher, FxHashMap};
|
||||||
|
|
||||||
|
use crate::{PositionEncoding, TextDocument};
|
||||||
|
|
||||||
|
use super::DocumentVersion;
|
||||||
|
|
||||||
|
pub(super) type CellId = usize;
|
||||||
|
|
||||||
|
/// The state of a notebook document in the server. Contains an array of cells whose
|
||||||
|
/// contents are internally represented by [`TextDocument`]s.
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub struct NotebookDocument {
|
||||||
|
cells: Vec<NotebookCell>,
|
||||||
|
metadata: ruff_notebook::RawNotebookMetadata,
|
||||||
|
version: DocumentVersion,
|
||||||
|
// Used to quickly find the index of a cell for a given URL.
|
||||||
|
cell_index: FxHashMap<lsp_types::Url, CellId>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A single cell within a notebook, which has text contents represented as a `TextDocument`.
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
struct NotebookCell {
|
||||||
|
url: lsp_types::Url,
|
||||||
|
kind: NotebookCellKind,
|
||||||
|
document: TextDocument,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl NotebookDocument {
|
||||||
|
pub fn new(
|
||||||
|
version: DocumentVersion,
|
||||||
|
cells: Vec<lsp_types::NotebookCell>,
|
||||||
|
metadata: serde_json::Map<String, serde_json::Value>,
|
||||||
|
cell_documents: Vec<lsp_types::TextDocumentItem>,
|
||||||
|
) -> crate::Result<Self> {
|
||||||
|
let mut cell_contents: FxHashMap<_, _> = cell_documents
|
||||||
|
.into_iter()
|
||||||
|
.map(|document| (document.uri, document.text))
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let cells: Vec<_> = cells
|
||||||
|
.into_iter()
|
||||||
|
.map(|cell| {
|
||||||
|
let contents = cell_contents.remove(&cell.document).unwrap_or_default();
|
||||||
|
NotebookCell::new(cell, contents, version)
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
Ok(Self {
|
||||||
|
version,
|
||||||
|
cell_index: Self::make_cell_index(cells.as_slice()),
|
||||||
|
metadata: serde_json::from_value(serde_json::Value::Object(metadata))?,
|
||||||
|
cells,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Generates a pseudo-representation of a notebook that lacks per-cell metadata and contextual information
|
||||||
|
/// but should still work with Ruff's linter.
|
||||||
|
pub fn make_ruff_notebook(&self) -> ruff_notebook::Notebook {
|
||||||
|
let cells = self
|
||||||
|
.cells
|
||||||
|
.iter()
|
||||||
|
.map(|cell| match cell.kind {
|
||||||
|
NotebookCellKind::Code => ruff_notebook::Cell::Code(ruff_notebook::CodeCell {
|
||||||
|
execution_count: None,
|
||||||
|
id: None,
|
||||||
|
metadata: serde_json::Value::Null,
|
||||||
|
outputs: vec![],
|
||||||
|
source: ruff_notebook::SourceValue::String(
|
||||||
|
cell.document.contents().to_string(),
|
||||||
|
),
|
||||||
|
}),
|
||||||
|
NotebookCellKind::Markup => {
|
||||||
|
ruff_notebook::Cell::Markdown(ruff_notebook::MarkdownCell {
|
||||||
|
attachments: None,
|
||||||
|
id: None,
|
||||||
|
metadata: serde_json::Value::Null,
|
||||||
|
source: ruff_notebook::SourceValue::String(
|
||||||
|
cell.document.contents().to_string(),
|
||||||
|
),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
let raw_notebook = ruff_notebook::RawNotebook {
|
||||||
|
cells,
|
||||||
|
metadata: self.metadata.clone(),
|
||||||
|
nbformat: 4,
|
||||||
|
nbformat_minor: 5,
|
||||||
|
};
|
||||||
|
|
||||||
|
ruff_notebook::Notebook::from_raw_notebook(raw_notebook, false)
|
||||||
|
.unwrap_or_else(|err| panic!("Server notebook document could not be converted to Ruff's notebook document format: {err}"))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn update(
|
||||||
|
&mut self,
|
||||||
|
cells: Option<lsp_types::NotebookDocumentCellChange>,
|
||||||
|
metadata_change: Option<serde_json::Map<String, serde_json::Value>>,
|
||||||
|
version: DocumentVersion,
|
||||||
|
encoding: PositionEncoding,
|
||||||
|
) -> crate::Result<()> {
|
||||||
|
self.version = version;
|
||||||
|
|
||||||
|
if let Some(lsp_types::NotebookDocumentCellChange {
|
||||||
|
structure,
|
||||||
|
data,
|
||||||
|
text_content,
|
||||||
|
}) = cells
|
||||||
|
{
|
||||||
|
// The structural changes should be done first, as they may affect the cell index.
|
||||||
|
if let Some(structure) = structure {
|
||||||
|
let start = structure.array.start as usize;
|
||||||
|
let delete = structure.array.delete_count as usize;
|
||||||
|
|
||||||
|
// This is required because of the way the `NotebookCell` is modelled. We include
|
||||||
|
// the `TextDocument` within the `NotebookCell` so when it's deleted, the
|
||||||
|
// corresponding `TextDocument` is removed as well. But, when cells are
|
||||||
|
// re-oredered, the change request doesn't provide the actual contents of the cell.
|
||||||
|
// Instead, it only provides that (a) these cell URIs were removed, and (b) these
|
||||||
|
// cell URIs were added.
|
||||||
|
// https://github.com/astral-sh/ruff/issues/12573
|
||||||
|
let mut deleted_cells = FxHashMap::default();
|
||||||
|
|
||||||
|
// First, delete the cells and remove them from the index.
|
||||||
|
if delete > 0 {
|
||||||
|
for cell in self.cells.drain(start..start + delete) {
|
||||||
|
self.cell_index.remove(&cell.url);
|
||||||
|
deleted_cells.insert(cell.url, cell.document);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Second, insert the new cells with the available information. This array does not
|
||||||
|
// provide the actual contents of the cells, so we'll initialize them with empty
|
||||||
|
// contents.
|
||||||
|
for cell in structure.array.cells.into_iter().flatten().rev() {
|
||||||
|
if let Some(text_document) = deleted_cells.remove(&cell.document) {
|
||||||
|
let version = text_document.version();
|
||||||
|
self.cells.push(NotebookCell::new(
|
||||||
|
cell,
|
||||||
|
text_document.into_contents(),
|
||||||
|
version,
|
||||||
|
));
|
||||||
|
} else {
|
||||||
|
self.cells
|
||||||
|
.insert(start, NotebookCell::new(cell, String::new(), 0));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Third, register the new cells in the index and update existing ones that came
|
||||||
|
// after the insertion.
|
||||||
|
for (index, cell) in self.cells.iter().enumerate().skip(start) {
|
||||||
|
self.cell_index.insert(cell.url.clone(), index);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Finally, update the text document that represents the cell with the actual
|
||||||
|
// contents. This should be done at the end so that both the `cells` and
|
||||||
|
// `cell_index` are updated before we start applying the changes to the cells.
|
||||||
|
if let Some(did_open) = structure.did_open {
|
||||||
|
for cell_text_document in did_open {
|
||||||
|
if let Some(cell) = self.cell_by_uri_mut(&cell_text_document.uri) {
|
||||||
|
cell.document = TextDocument::new(
|
||||||
|
cell_text_document.text,
|
||||||
|
cell_text_document.version,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(cell_data) = data {
|
||||||
|
for cell in cell_data {
|
||||||
|
if let Some(existing_cell) = self.cell_by_uri_mut(&cell.document) {
|
||||||
|
existing_cell.kind = cell.kind;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(content_changes) = text_content {
|
||||||
|
for content_change in content_changes {
|
||||||
|
if let Some(cell) = self.cell_by_uri_mut(&content_change.document.uri) {
|
||||||
|
cell.document
|
||||||
|
.apply_changes(content_change.changes, version, encoding);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(metadata_change) = metadata_change {
|
||||||
|
self.metadata = serde_json::from_value(serde_json::Value::Object(metadata_change))?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the current version of the notebook document.
|
||||||
|
pub(crate) fn version(&self) -> DocumentVersion {
|
||||||
|
self.version
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the text document representing the contents of a cell by the cell URI.
|
||||||
|
pub(crate) fn cell_document_by_uri(&self, uri: &lsp_types::Url) -> Option<&TextDocument> {
|
||||||
|
self.cells
|
||||||
|
.get(*self.cell_index.get(uri)?)
|
||||||
|
.map(|cell| &cell.document)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a list of cell URIs in the order they appear in the array.
|
||||||
|
pub(crate) fn urls(&self) -> impl Iterator<Item = &lsp_types::Url> {
|
||||||
|
self.cells.iter().map(|cell| &cell.url)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn cell_by_uri_mut(&mut self, uri: &lsp_types::Url) -> Option<&mut NotebookCell> {
|
||||||
|
self.cells.get_mut(*self.cell_index.get(uri)?)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn make_cell_index(cells: &[NotebookCell]) -> FxHashMap<lsp_types::Url, CellId> {
|
||||||
|
let mut index = FxHashMap::with_capacity_and_hasher(cells.len(), FxBuildHasher);
|
||||||
|
for (i, cell) in cells.iter().enumerate() {
|
||||||
|
index.insert(cell.url.clone(), i);
|
||||||
|
}
|
||||||
|
index
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl NotebookCell {
|
||||||
|
pub(crate) fn new(
|
||||||
|
cell: lsp_types::NotebookCell,
|
||||||
|
contents: String,
|
||||||
|
version: DocumentVersion,
|
||||||
|
) -> Self {
|
||||||
|
Self {
|
||||||
|
url: cell.document,
|
||||||
|
kind: cell.kind,
|
||||||
|
document: TextDocument::new(contents, version),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
98
crates/red_knot_server/src/edit/range.rs
Normal file
98
crates/red_knot_server/src/edit/range.rs
Normal file
|
@ -0,0 +1,98 @@
|
||||||
|
use super::PositionEncoding;
|
||||||
|
use ruff_source_file::LineIndex;
|
||||||
|
use ruff_source_file::OneIndexed;
|
||||||
|
use ruff_text_size::{TextRange, TextSize};
|
||||||
|
|
||||||
|
pub(crate) trait RangeExt {
|
||||||
|
fn to_text_range(&self, text: &str, index: &LineIndex, encoding: PositionEncoding)
|
||||||
|
-> TextRange;
|
||||||
|
}
|
||||||
|
|
||||||
|
fn u32_index_to_usize(index: u32) -> usize {
|
||||||
|
usize::try_from(index).expect("u32 fits in usize")
|
||||||
|
}
|
||||||
|
|
||||||
|
impl RangeExt for lsp_types::Range {
|
||||||
|
fn to_text_range(
|
||||||
|
&self,
|
||||||
|
text: &str,
|
||||||
|
index: &LineIndex,
|
||||||
|
encoding: PositionEncoding,
|
||||||
|
) -> TextRange {
|
||||||
|
let start_line = index.line_range(
|
||||||
|
OneIndexed::from_zero_indexed(u32_index_to_usize(self.start.line)),
|
||||||
|
text,
|
||||||
|
);
|
||||||
|
let end_line = index.line_range(
|
||||||
|
OneIndexed::from_zero_indexed(u32_index_to_usize(self.end.line)),
|
||||||
|
text,
|
||||||
|
);
|
||||||
|
|
||||||
|
let (start_column_offset, end_column_offset) = match encoding {
|
||||||
|
PositionEncoding::UTF8 => (
|
||||||
|
TextSize::new(self.start.character),
|
||||||
|
TextSize::new(self.end.character),
|
||||||
|
),
|
||||||
|
|
||||||
|
PositionEncoding::UTF16 => {
|
||||||
|
// Fast path for ASCII only documents
|
||||||
|
if index.is_ascii() {
|
||||||
|
(
|
||||||
|
TextSize::new(self.start.character),
|
||||||
|
TextSize::new(self.end.character),
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
// UTF16 encodes characters either as one or two 16 bit words.
|
||||||
|
// The position in `range` is the 16-bit word offset from the start of the line (and not the character offset)
|
||||||
|
// UTF-16 with a text that may use variable-length characters.
|
||||||
|
(
|
||||||
|
utf8_column_offset(self.start.character, &text[start_line]),
|
||||||
|
utf8_column_offset(self.end.character, &text[end_line]),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
PositionEncoding::UTF32 => {
|
||||||
|
// UTF-32 uses 4 bytes for each character. Meaning, the position in range is a character offset.
|
||||||
|
return TextRange::new(
|
||||||
|
index.offset(
|
||||||
|
OneIndexed::from_zero_indexed(u32_index_to_usize(self.start.line)),
|
||||||
|
OneIndexed::from_zero_indexed(u32_index_to_usize(self.start.character)),
|
||||||
|
text,
|
||||||
|
),
|
||||||
|
index.offset(
|
||||||
|
OneIndexed::from_zero_indexed(u32_index_to_usize(self.end.line)),
|
||||||
|
OneIndexed::from_zero_indexed(u32_index_to_usize(self.end.character)),
|
||||||
|
text,
|
||||||
|
),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
TextRange::new(
|
||||||
|
start_line.start() + start_column_offset.clamp(TextSize::new(0), start_line.end()),
|
||||||
|
end_line.start() + end_column_offset.clamp(TextSize::new(0), end_line.end()),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Converts a UTF-16 code unit offset for a given line into a UTF-8 column number.
|
||||||
|
fn utf8_column_offset(utf16_code_unit_offset: u32, line: &str) -> TextSize {
|
||||||
|
let mut utf8_code_unit_offset = TextSize::new(0);
|
||||||
|
|
||||||
|
let mut i = 0u32;
|
||||||
|
|
||||||
|
for c in line.chars() {
|
||||||
|
if i >= utf16_code_unit_offset {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Count characters encoded as two 16 bit words as 2 characters.
|
||||||
|
{
|
||||||
|
utf8_code_unit_offset +=
|
||||||
|
TextSize::new(u32::try_from(c.len_utf8()).expect("utf8 len always <=4"));
|
||||||
|
i += u32::try_from(c.len_utf16()).expect("utf16 len always <=2");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
utf8_code_unit_offset
|
||||||
|
}
|
127
crates/red_knot_server/src/edit/text_document.rs
Normal file
127
crates/red_knot_server/src/edit/text_document.rs
Normal file
|
@ -0,0 +1,127 @@
|
||||||
|
use lsp_types::TextDocumentContentChangeEvent;
|
||||||
|
use ruff_source_file::LineIndex;
|
||||||
|
|
||||||
|
use crate::PositionEncoding;
|
||||||
|
|
||||||
|
use super::RangeExt;
|
||||||
|
|
||||||
|
pub(crate) type DocumentVersion = i32;
|
||||||
|
|
||||||
|
/// The state of an individual document in the server. Stays up-to-date
|
||||||
|
/// with changes made by the user, including unsaved changes.
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct TextDocument {
|
||||||
|
/// The string contents of the document.
|
||||||
|
contents: String,
|
||||||
|
/// A computed line index for the document. This should always reflect
|
||||||
|
/// the current version of `contents`. Using a function like [`Self::modify`]
|
||||||
|
/// will re-calculate the line index automatically when the `contents` value is updated.
|
||||||
|
index: LineIndex,
|
||||||
|
/// The latest version of the document, set by the LSP client. The server will panic in
|
||||||
|
/// debug mode if we attempt to update the document with an 'older' version.
|
||||||
|
version: DocumentVersion,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TextDocument {
|
||||||
|
pub fn new(contents: String, version: DocumentVersion) -> Self {
|
||||||
|
let index = LineIndex::from_source_text(&contents);
|
||||||
|
Self {
|
||||||
|
contents,
|
||||||
|
index,
|
||||||
|
version,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn into_contents(self) -> String {
|
||||||
|
self.contents
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn contents(&self) -> &str {
|
||||||
|
&self.contents
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn index(&self) -> &LineIndex {
|
||||||
|
&self.index
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn version(&self) -> DocumentVersion {
|
||||||
|
self.version
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn apply_changes(
|
||||||
|
&mut self,
|
||||||
|
changes: Vec<lsp_types::TextDocumentContentChangeEvent>,
|
||||||
|
new_version: DocumentVersion,
|
||||||
|
encoding: PositionEncoding,
|
||||||
|
) {
|
||||||
|
if let [lsp_types::TextDocumentContentChangeEvent {
|
||||||
|
range: None, text, ..
|
||||||
|
}] = changes.as_slice()
|
||||||
|
{
|
||||||
|
tracing::debug!("Fast path - replacing entire document");
|
||||||
|
self.modify(|contents, version| {
|
||||||
|
contents.clone_from(text);
|
||||||
|
*version = new_version;
|
||||||
|
});
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
let old_contents = self.contents().to_string();
|
||||||
|
let mut new_contents = self.contents().to_string();
|
||||||
|
let mut active_index = self.index().clone();
|
||||||
|
|
||||||
|
for TextDocumentContentChangeEvent {
|
||||||
|
range,
|
||||||
|
text: change,
|
||||||
|
..
|
||||||
|
} in changes
|
||||||
|
{
|
||||||
|
if let Some(range) = range {
|
||||||
|
let range = range.to_text_range(&new_contents, &active_index, encoding);
|
||||||
|
|
||||||
|
new_contents.replace_range(
|
||||||
|
usize::from(range.start())..usize::from(range.end()),
|
||||||
|
&change,
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
new_contents = change;
|
||||||
|
}
|
||||||
|
|
||||||
|
if new_contents != old_contents {
|
||||||
|
active_index = LineIndex::from_source_text(&new_contents);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
self.modify_with_manual_index(|contents, version, index| {
|
||||||
|
if contents != &new_contents {
|
||||||
|
*index = active_index;
|
||||||
|
}
|
||||||
|
*contents = new_contents;
|
||||||
|
*version = new_version;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn update_version(&mut self, new_version: DocumentVersion) {
|
||||||
|
self.modify_with_manual_index(|_, version, _| {
|
||||||
|
*version = new_version;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// A private function for modifying the document's internal state
|
||||||
|
fn modify(&mut self, func: impl FnOnce(&mut String, &mut DocumentVersion)) {
|
||||||
|
self.modify_with_manual_index(|c, v, i| {
|
||||||
|
func(c, v);
|
||||||
|
*i = LineIndex::from_source_text(c);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// A private function for overriding how we update the line index by default.
|
||||||
|
fn modify_with_manual_index(
|
||||||
|
&mut self,
|
||||||
|
func: impl FnOnce(&mut String, &mut DocumentVersion, &mut LineIndex),
|
||||||
|
) {
|
||||||
|
let old_version = self.version;
|
||||||
|
func(&mut self.contents, &mut self.version, &mut self.index);
|
||||||
|
debug_assert!(self.version >= old_version);
|
||||||
|
}
|
||||||
|
}
|
25
crates/red_knot_server/src/lib.rs
Normal file
25
crates/red_knot_server/src/lib.rs
Normal file
|
@ -0,0 +1,25 @@
|
||||||
|
#![allow(dead_code)]
|
||||||
|
|
||||||
|
pub use edit::{DocumentKey, NotebookDocument, PositionEncoding, TextDocument};
|
||||||
|
pub use server::Server;
|
||||||
|
pub use session::{ClientSettings, DocumentQuery, DocumentSnapshot, Session};
|
||||||
|
|
||||||
|
#[macro_use]
|
||||||
|
mod message;
|
||||||
|
|
||||||
|
mod edit;
|
||||||
|
mod server;
|
||||||
|
mod session;
|
||||||
|
mod system;
|
||||||
|
mod trace;
|
||||||
|
|
||||||
|
pub(crate) const SERVER_NAME: &str = "red-knot";
|
||||||
|
pub(crate) const DIAGNOSTIC_NAME: &str = "Red Knot";
|
||||||
|
|
||||||
|
/// A common result type used in most cases where a
|
||||||
|
/// result type is needed.
|
||||||
|
pub(crate) type Result<T> = anyhow::Result<T>;
|
||||||
|
|
||||||
|
pub(crate) fn version() -> &'static str {
|
||||||
|
env!("CARGO_PKG_VERSION")
|
||||||
|
}
|
46
crates/red_knot_server/src/message.rs
Normal file
46
crates/red_knot_server/src/message.rs
Normal file
|
@ -0,0 +1,46 @@
|
||||||
|
use anyhow::Context;
|
||||||
|
use lsp_types::notification::Notification;
|
||||||
|
use std::sync::OnceLock;
|
||||||
|
|
||||||
|
use crate::server::ClientSender;
|
||||||
|
|
||||||
|
static MESSENGER: OnceLock<ClientSender> = OnceLock::new();
|
||||||
|
|
||||||
|
pub(crate) fn init_messenger(client_sender: ClientSender) {
|
||||||
|
MESSENGER
|
||||||
|
.set(client_sender)
|
||||||
|
.expect("messenger should only be initialized once");
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn show_message(message: String, message_type: lsp_types::MessageType) {
|
||||||
|
try_show_message(message, message_type).unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) fn try_show_message(
|
||||||
|
message: String,
|
||||||
|
message_type: lsp_types::MessageType,
|
||||||
|
) -> crate::Result<()> {
|
||||||
|
MESSENGER
|
||||||
|
.get()
|
||||||
|
.ok_or_else(|| anyhow::anyhow!("messenger not initialized"))?
|
||||||
|
.send(lsp_server::Message::Notification(
|
||||||
|
lsp_server::Notification {
|
||||||
|
method: lsp_types::notification::ShowMessage::METHOD.into(),
|
||||||
|
params: serde_json::to_value(lsp_types::ShowMessageParams {
|
||||||
|
typ: message_type,
|
||||||
|
message,
|
||||||
|
})?,
|
||||||
|
},
|
||||||
|
))
|
||||||
|
.context("Failed to send message")?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Sends an error to the client with a formatted message. The error is sent in a
|
||||||
|
/// `window/showMessage` notification.
|
||||||
|
macro_rules! show_err_msg {
|
||||||
|
($msg:expr$(, $($arg:tt),*)?) => {
|
||||||
|
crate::message::show_message(::core::format_args!($msg, $($($arg),*)?).to_string(), lsp_types::MessageType::ERROR)
|
||||||
|
};
|
||||||
|
}
|
237
crates/red_knot_server/src/server.rs
Normal file
237
crates/red_knot_server/src/server.rs
Normal file
|
@ -0,0 +1,237 @@
|
||||||
|
//! Scheduling, I/O, and API endpoints.
|
||||||
|
|
||||||
|
use std::num::NonZeroUsize;
|
||||||
|
use std::panic::PanicInfo;
|
||||||
|
|
||||||
|
use lsp_server as lsp;
|
||||||
|
use lsp_types as types;
|
||||||
|
use lsp_types::{
|
||||||
|
ClientCapabilities, DiagnosticOptions, NotebookCellSelector, NotebookDocumentSyncOptions,
|
||||||
|
NotebookSelector, TextDocumentSyncCapability, TextDocumentSyncOptions,
|
||||||
|
};
|
||||||
|
|
||||||
|
use self::connection::{Connection, ConnectionInitializer};
|
||||||
|
use self::schedule::event_loop_thread;
|
||||||
|
use crate::session::{AllSettings, ClientSettings, Session};
|
||||||
|
use crate::PositionEncoding;
|
||||||
|
|
||||||
|
mod api;
|
||||||
|
mod client;
|
||||||
|
mod connection;
|
||||||
|
mod schedule;
|
||||||
|
|
||||||
|
use crate::message::try_show_message;
|
||||||
|
pub(crate) use connection::ClientSender;
|
||||||
|
|
||||||
|
pub(crate) type Result<T> = std::result::Result<T, api::Error>;
|
||||||
|
|
||||||
|
pub struct Server {
|
||||||
|
connection: Connection,
|
||||||
|
client_capabilities: ClientCapabilities,
|
||||||
|
worker_threads: NonZeroUsize,
|
||||||
|
session: Session,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Server {
|
||||||
|
pub fn new(worker_threads: NonZeroUsize) -> crate::Result<Self> {
|
||||||
|
let connection = ConnectionInitializer::stdio();
|
||||||
|
|
||||||
|
let (id, init_params) = connection.initialize_start()?;
|
||||||
|
|
||||||
|
let client_capabilities = init_params.capabilities;
|
||||||
|
let position_encoding = Self::find_best_position_encoding(&client_capabilities);
|
||||||
|
let server_capabilities = Self::server_capabilities(position_encoding);
|
||||||
|
|
||||||
|
let connection = connection.initialize_finish(
|
||||||
|
id,
|
||||||
|
&server_capabilities,
|
||||||
|
crate::SERVER_NAME,
|
||||||
|
crate::version(),
|
||||||
|
)?;
|
||||||
|
|
||||||
|
if let Some(trace) = init_params.trace {
|
||||||
|
crate::trace::set_trace_value(trace);
|
||||||
|
}
|
||||||
|
|
||||||
|
crate::message::init_messenger(connection.make_sender());
|
||||||
|
|
||||||
|
let AllSettings {
|
||||||
|
global_settings,
|
||||||
|
mut workspace_settings,
|
||||||
|
} = AllSettings::from_value(
|
||||||
|
init_params
|
||||||
|
.initialization_options
|
||||||
|
.unwrap_or_else(|| serde_json::Value::Object(serde_json::Map::default())),
|
||||||
|
);
|
||||||
|
|
||||||
|
crate::trace::init_tracing(
|
||||||
|
connection.make_sender(),
|
||||||
|
global_settings
|
||||||
|
.tracing
|
||||||
|
.log_level
|
||||||
|
.unwrap_or(crate::trace::LogLevel::Info),
|
||||||
|
global_settings.tracing.log_file.as_deref(),
|
||||||
|
init_params.client_info.as_ref(),
|
||||||
|
);
|
||||||
|
|
||||||
|
let mut workspace_for_url = |url: lsp_types::Url| {
|
||||||
|
let Some(workspace_settings) = workspace_settings.as_mut() else {
|
||||||
|
return (url, ClientSettings::default());
|
||||||
|
};
|
||||||
|
let settings = workspace_settings.remove(&url).unwrap_or_else(|| {
|
||||||
|
tracing::warn!("No workspace settings found for {}", url);
|
||||||
|
ClientSettings::default()
|
||||||
|
});
|
||||||
|
(url, settings)
|
||||||
|
};
|
||||||
|
|
||||||
|
let workspaces = init_params
|
||||||
|
.workspace_folders
|
||||||
|
.filter(|folders| !folders.is_empty())
|
||||||
|
.map(|folders| folders.into_iter().map(|folder| {
|
||||||
|
workspace_for_url(folder.uri)
|
||||||
|
}).collect())
|
||||||
|
.or_else(|| {
|
||||||
|
tracing::warn!("No workspace(s) were provided during initialization. Using the current working directory as a default workspace...");
|
||||||
|
let uri = types::Url::from_file_path(std::env::current_dir().ok()?).ok()?;
|
||||||
|
Some(vec![workspace_for_url(uri)])
|
||||||
|
})
|
||||||
|
.ok_or_else(|| {
|
||||||
|
anyhow::anyhow!("Failed to get the current working directory while creating a default workspace.")
|
||||||
|
})?;
|
||||||
|
|
||||||
|
Ok(Self {
|
||||||
|
connection,
|
||||||
|
worker_threads,
|
||||||
|
session: Session::new(
|
||||||
|
&client_capabilities,
|
||||||
|
position_encoding,
|
||||||
|
global_settings,
|
||||||
|
&workspaces,
|
||||||
|
)?,
|
||||||
|
client_capabilities,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn run(self) -> crate::Result<()> {
|
||||||
|
type PanicHook = Box<dyn Fn(&PanicInfo<'_>) + 'static + Sync + Send>;
|
||||||
|
struct RestorePanicHook {
|
||||||
|
hook: Option<PanicHook>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Drop for RestorePanicHook {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
if let Some(hook) = self.hook.take() {
|
||||||
|
std::panic::set_hook(hook);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// unregister any previously registered panic hook
|
||||||
|
// The hook will be restored when this function exits.
|
||||||
|
let _ = RestorePanicHook {
|
||||||
|
hook: Some(std::panic::take_hook()),
|
||||||
|
};
|
||||||
|
|
||||||
|
// When we panic, try to notify the client.
|
||||||
|
std::panic::set_hook(Box::new(move |panic_info| {
|
||||||
|
use std::io::Write;
|
||||||
|
|
||||||
|
let backtrace = std::backtrace::Backtrace::force_capture();
|
||||||
|
tracing::error!("{panic_info}\n{backtrace}");
|
||||||
|
|
||||||
|
// we also need to print to stderr directly for when using `$logTrace` because
|
||||||
|
// the message won't be sent to the client.
|
||||||
|
// But don't use `eprintln` because `eprintln` itself may panic if the pipe is broken.
|
||||||
|
let mut stderr = std::io::stderr().lock();
|
||||||
|
writeln!(stderr, "{panic_info}\n{backtrace}").ok();
|
||||||
|
|
||||||
|
try_show_message(
|
||||||
|
"The Ruff language server exited with a panic. See the logs for more details."
|
||||||
|
.to_string(),
|
||||||
|
lsp_types::MessageType::ERROR,
|
||||||
|
)
|
||||||
|
.ok();
|
||||||
|
}));
|
||||||
|
|
||||||
|
event_loop_thread(move || {
|
||||||
|
Self::event_loop(
|
||||||
|
&self.connection,
|
||||||
|
&self.client_capabilities,
|
||||||
|
self.session,
|
||||||
|
self.worker_threads,
|
||||||
|
)?;
|
||||||
|
self.connection.close()?;
|
||||||
|
Ok(())
|
||||||
|
})?
|
||||||
|
.join()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::needless_pass_by_value)] // this is because we aren't using `next_request_id` yet.
|
||||||
|
fn event_loop(
|
||||||
|
connection: &Connection,
|
||||||
|
_client_capabilities: &ClientCapabilities,
|
||||||
|
mut session: Session,
|
||||||
|
worker_threads: NonZeroUsize,
|
||||||
|
) -> crate::Result<()> {
|
||||||
|
let mut scheduler =
|
||||||
|
schedule::Scheduler::new(&mut session, worker_threads, connection.make_sender());
|
||||||
|
|
||||||
|
for msg in connection.incoming() {
|
||||||
|
if connection.handle_shutdown(&msg)? {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
let task = match msg {
|
||||||
|
lsp::Message::Request(req) => api::request(req),
|
||||||
|
lsp::Message::Notification(notification) => api::notification(notification),
|
||||||
|
lsp::Message::Response(response) => scheduler.response(response),
|
||||||
|
};
|
||||||
|
scheduler.dispatch(task);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn find_best_position_encoding(client_capabilities: &ClientCapabilities) -> PositionEncoding {
|
||||||
|
client_capabilities
|
||||||
|
.general
|
||||||
|
.as_ref()
|
||||||
|
.and_then(|general_capabilities| general_capabilities.position_encodings.as_ref())
|
||||||
|
.and_then(|encodings| {
|
||||||
|
encodings
|
||||||
|
.iter()
|
||||||
|
.filter_map(|encoding| PositionEncoding::try_from(encoding).ok())
|
||||||
|
.max() // this selects the highest priority position encoding
|
||||||
|
})
|
||||||
|
.unwrap_or_default()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn server_capabilities(position_encoding: PositionEncoding) -> types::ServerCapabilities {
|
||||||
|
types::ServerCapabilities {
|
||||||
|
position_encoding: Some(position_encoding.into()),
|
||||||
|
diagnostic_provider: Some(types::DiagnosticServerCapabilities::Options(
|
||||||
|
DiagnosticOptions {
|
||||||
|
identifier: Some(crate::DIAGNOSTIC_NAME.into()),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
)),
|
||||||
|
notebook_document_sync: Some(types::OneOf::Left(NotebookDocumentSyncOptions {
|
||||||
|
save: Some(false),
|
||||||
|
notebook_selector: [NotebookSelector::ByCells {
|
||||||
|
notebook: None,
|
||||||
|
cells: vec![NotebookCellSelector {
|
||||||
|
language: "python".to_string(),
|
||||||
|
}],
|
||||||
|
}]
|
||||||
|
.to_vec(),
|
||||||
|
})),
|
||||||
|
text_document_sync: Some(TextDocumentSyncCapability::Options(
|
||||||
|
TextDocumentSyncOptions {
|
||||||
|
open_close: Some(true),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
)),
|
||||||
|
..Default::default()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
238
crates/red_knot_server/src/server/api.rs
Normal file
238
crates/red_knot_server/src/server/api.rs
Normal file
|
@ -0,0 +1,238 @@
|
||||||
|
use crate::{server::schedule::Task, session::Session, system::url_to_system_path};
|
||||||
|
use lsp_server as server;
|
||||||
|
|
||||||
|
mod diagnostics;
|
||||||
|
mod notifications;
|
||||||
|
mod requests;
|
||||||
|
mod traits;
|
||||||
|
|
||||||
|
use notifications as notification;
|
||||||
|
use requests as request;
|
||||||
|
|
||||||
|
use self::traits::{NotificationHandler, RequestHandler};
|
||||||
|
|
||||||
|
use super::{client::Responder, schedule::BackgroundSchedule, Result};
|
||||||
|
|
||||||
|
pub(super) fn request<'a>(req: server::Request) -> Task<'a> {
|
||||||
|
let id = req.id.clone();
|
||||||
|
|
||||||
|
match req.method.as_str() {
|
||||||
|
request::DocumentDiagnosticRequestHandler::METHOD => {
|
||||||
|
background_request_task::<request::DocumentDiagnosticRequestHandler>(
|
||||||
|
req,
|
||||||
|
BackgroundSchedule::LatencySensitive,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
method => {
|
||||||
|
tracing::warn!("Received request {method} which does not have a handler");
|
||||||
|
return Task::nothing();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
.unwrap_or_else(|err| {
|
||||||
|
tracing::error!("Encountered error when routing request with ID {id}: {err}");
|
||||||
|
show_err_msg!(
|
||||||
|
"Ruff failed to handle a request from the editor. Check the logs for more details."
|
||||||
|
);
|
||||||
|
let result: Result<()> = Err(err);
|
||||||
|
Task::immediate(id, result)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) fn notification<'a>(notif: server::Notification) -> Task<'a> {
|
||||||
|
match notif.method.as_str() {
|
||||||
|
notification::DidCloseTextDocumentHandler::METHOD => local_notification_task::<notification::DidCloseTextDocumentHandler>(notif),
|
||||||
|
notification::DidOpenTextDocumentHandler::METHOD => local_notification_task::<notification::DidOpenTextDocumentHandler>(notif),
|
||||||
|
notification::DidOpenNotebookHandler::METHOD => {
|
||||||
|
local_notification_task::<notification::DidOpenNotebookHandler>(notif)
|
||||||
|
}
|
||||||
|
notification::DidCloseNotebookHandler::METHOD => {
|
||||||
|
local_notification_task::<notification::DidCloseNotebookHandler>(notif)
|
||||||
|
}
|
||||||
|
notification::SetTraceHandler::METHOD => {
|
||||||
|
local_notification_task::<notification::SetTraceHandler>(notif)
|
||||||
|
}
|
||||||
|
method => {
|
||||||
|
tracing::warn!("Received notification {method} which does not have a handler.");
|
||||||
|
return Task::nothing();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
.unwrap_or_else(|err| {
|
||||||
|
tracing::error!("Encountered error when routing notification: {err}");
|
||||||
|
show_err_msg!("Ruff failed to handle a notification from the editor. Check the logs for more details.");
|
||||||
|
Task::nothing()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn _local_request_task<'a, R: traits::SyncRequestHandler>(
|
||||||
|
req: server::Request,
|
||||||
|
) -> super::Result<Task<'a>> {
|
||||||
|
let (id, params) = cast_request::<R>(req)?;
|
||||||
|
Ok(Task::local(|session, notifier, requester, responder| {
|
||||||
|
let result = R::run(session, notifier, requester, params);
|
||||||
|
respond::<R>(id, result, &responder);
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn background_request_task<'a, R: traits::BackgroundDocumentRequestHandler>(
|
||||||
|
req: server::Request,
|
||||||
|
schedule: BackgroundSchedule,
|
||||||
|
) -> super::Result<Task<'a>> {
|
||||||
|
let (id, params) = cast_request::<R>(req)?;
|
||||||
|
Ok(Task::background(schedule, move |session: &Session| {
|
||||||
|
let url = R::document_url(¶ms).into_owned();
|
||||||
|
|
||||||
|
let Ok(path) = url_to_system_path(&url) else {
|
||||||
|
return Box::new(|_, _| {});
|
||||||
|
};
|
||||||
|
let db = session.workspace_db_for_path(path.as_std_path()).cloned();
|
||||||
|
|
||||||
|
let Some(snapshot) = session.take_snapshot(url) else {
|
||||||
|
return Box::new(|_, _| {});
|
||||||
|
};
|
||||||
|
|
||||||
|
Box::new(move |notifier, responder| {
|
||||||
|
let result = R::run_with_snapshot(snapshot, db, notifier, params);
|
||||||
|
respond::<R>(id, result, &responder);
|
||||||
|
})
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn local_notification_task<'a, N: traits::SyncNotificationHandler>(
|
||||||
|
notif: server::Notification,
|
||||||
|
) -> super::Result<Task<'a>> {
|
||||||
|
let (id, params) = cast_notification::<N>(notif)?;
|
||||||
|
Ok(Task::local(move |session, notifier, requester, _| {
|
||||||
|
if let Err(err) = N::run(session, notifier, requester, params) {
|
||||||
|
tracing::error!("An error occurred while running {id}: {err}");
|
||||||
|
show_err_msg!("Ruff encountered a problem. Check the logs for more details.");
|
||||||
|
}
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(dead_code)]
|
||||||
|
fn background_notification_thread<'a, N: traits::BackgroundDocumentNotificationHandler>(
|
||||||
|
req: server::Notification,
|
||||||
|
schedule: BackgroundSchedule,
|
||||||
|
) -> super::Result<Task<'a>> {
|
||||||
|
let (id, params) = cast_notification::<N>(req)?;
|
||||||
|
Ok(Task::background(schedule, move |session: &Session| {
|
||||||
|
// TODO(jane): we should log an error if we can't take a snapshot.
|
||||||
|
let Some(snapshot) = session.take_snapshot(N::document_url(¶ms).into_owned()) else {
|
||||||
|
return Box::new(|_, _| {});
|
||||||
|
};
|
||||||
|
Box::new(move |notifier, _| {
|
||||||
|
if let Err(err) = N::run_with_snapshot(snapshot, notifier, params) {
|
||||||
|
tracing::error!("An error occurred while running {id}: {err}");
|
||||||
|
show_err_msg!("Ruff encountered a problem. Check the logs for more details.");
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Tries to cast a serialized request from the server into
|
||||||
|
/// a parameter type for a specific request handler.
|
||||||
|
/// It is *highly* recommended to not override this function in your
|
||||||
|
/// implementation.
|
||||||
|
fn cast_request<Req>(
|
||||||
|
request: server::Request,
|
||||||
|
) -> super::Result<(
|
||||||
|
server::RequestId,
|
||||||
|
<<Req as RequestHandler>::RequestType as lsp_types::request::Request>::Params,
|
||||||
|
)>
|
||||||
|
where
|
||||||
|
Req: traits::RequestHandler,
|
||||||
|
{
|
||||||
|
request
|
||||||
|
.extract(Req::METHOD)
|
||||||
|
.map_err(|err| match err {
|
||||||
|
json_err @ server::ExtractError::JsonError { .. } => {
|
||||||
|
anyhow::anyhow!("JSON parsing failure:\n{json_err}")
|
||||||
|
}
|
||||||
|
server::ExtractError::MethodMismatch(_) => {
|
||||||
|
unreachable!("A method mismatch should not be possible here unless you've used a different handler (`Req`) \
|
||||||
|
than the one whose method name was matched against earlier.")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.with_failure_code(server::ErrorCode::InternalError)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Sends back a response to the server using a [`Responder`].
|
||||||
|
fn respond<Req>(
|
||||||
|
id: server::RequestId,
|
||||||
|
result: crate::server::Result<
|
||||||
|
<<Req as traits::RequestHandler>::RequestType as lsp_types::request::Request>::Result,
|
||||||
|
>,
|
||||||
|
responder: &Responder,
|
||||||
|
) where
|
||||||
|
Req: traits::RequestHandler,
|
||||||
|
{
|
||||||
|
if let Err(err) = &result {
|
||||||
|
tracing::error!("An error occurred with result ID {id}: {err}");
|
||||||
|
show_err_msg!("Ruff encountered a problem. Check the logs for more details.");
|
||||||
|
}
|
||||||
|
if let Err(err) = responder.respond(id, result) {
|
||||||
|
tracing::error!("Failed to send response: {err}");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Tries to cast a serialized request from the server into
|
||||||
|
/// a parameter type for a specific request handler.
|
||||||
|
fn cast_notification<N>(
|
||||||
|
notification: server::Notification,
|
||||||
|
) -> super::Result<
|
||||||
|
(
|
||||||
|
&'static str,
|
||||||
|
<<N as traits::NotificationHandler>::NotificationType as lsp_types::notification::Notification>::Params,
|
||||||
|
)> where N: traits::NotificationHandler{
|
||||||
|
Ok((
|
||||||
|
N::METHOD,
|
||||||
|
notification
|
||||||
|
.extract(N::METHOD)
|
||||||
|
.map_err(|err| match err {
|
||||||
|
json_err @ server::ExtractError::JsonError { .. } => {
|
||||||
|
anyhow::anyhow!("JSON parsing failure:\n{json_err}")
|
||||||
|
}
|
||||||
|
server::ExtractError::MethodMismatch(_) => {
|
||||||
|
unreachable!("A method mismatch should not be possible here unless you've used a different handler (`N`) \
|
||||||
|
than the one whose method name was matched against earlier.")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.with_failure_code(server::ErrorCode::InternalError)?,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) struct Error {
|
||||||
|
pub(crate) code: server::ErrorCode,
|
||||||
|
pub(crate) error: anyhow::Error,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A trait to convert result types into the server result type, [`super::Result`].
|
||||||
|
trait LSPResult<T> {
|
||||||
|
fn with_failure_code(self, code: server::ErrorCode) -> super::Result<T>;
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T, E: Into<anyhow::Error>> LSPResult<T> for core::result::Result<T, E> {
|
||||||
|
fn with_failure_code(self, code: server::ErrorCode) -> super::Result<T> {
|
||||||
|
self.map_err(|err| Error::new(err.into(), code))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Error {
|
||||||
|
pub(crate) fn new(err: anyhow::Error, code: server::ErrorCode) -> Self {
|
||||||
|
Self { code, error: err }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Right now, we treat the error code as invisible data that won't
|
||||||
|
// be printed.
|
||||||
|
impl std::fmt::Debug for Error {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
self.error.fmt(f)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::fmt::Display for Error {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
self.error.fmt(f)
|
||||||
|
}
|
||||||
|
}
|
18
crates/red_knot_server/src/server/api/diagnostics.rs
Normal file
18
crates/red_knot_server/src/server/api/diagnostics.rs
Normal file
|
@ -0,0 +1,18 @@
|
||||||
|
use lsp_server::ErrorCode;
|
||||||
|
use lsp_types::{notification::PublishDiagnostics, PublishDiagnosticsParams, Url};
|
||||||
|
|
||||||
|
use crate::server::client::Notifier;
|
||||||
|
use crate::server::Result;
|
||||||
|
|
||||||
|
use super::LSPResult;
|
||||||
|
|
||||||
|
pub(super) fn clear_diagnostics(uri: &Url, notifier: &Notifier) -> Result<()> {
|
||||||
|
notifier
|
||||||
|
.notify::<PublishDiagnostics>(PublishDiagnosticsParams {
|
||||||
|
uri: uri.clone(),
|
||||||
|
diagnostics: vec![],
|
||||||
|
version: None,
|
||||||
|
})
|
||||||
|
.with_failure_code(ErrorCode::InternalError)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
11
crates/red_knot_server/src/server/api/notifications.rs
Normal file
11
crates/red_knot_server/src/server/api/notifications.rs
Normal file
|
@ -0,0 +1,11 @@
|
||||||
|
mod did_close;
|
||||||
|
mod did_close_notebook;
|
||||||
|
mod did_open;
|
||||||
|
mod did_open_notebook;
|
||||||
|
mod set_trace;
|
||||||
|
|
||||||
|
pub(super) use did_close::DidCloseTextDocumentHandler;
|
||||||
|
pub(super) use did_close_notebook::DidCloseNotebookHandler;
|
||||||
|
pub(super) use did_open::DidOpenTextDocumentHandler;
|
||||||
|
pub(super) use did_open_notebook::DidOpenNotebookHandler;
|
||||||
|
pub(super) use set_trace::SetTraceHandler;
|
|
@ -0,0 +1,45 @@
|
||||||
|
use lsp_server::ErrorCode;
|
||||||
|
use lsp_types::notification::DidCloseTextDocument;
|
||||||
|
use lsp_types::DidCloseTextDocumentParams;
|
||||||
|
|
||||||
|
use ruff_db::files::File;
|
||||||
|
|
||||||
|
use crate::server::api::diagnostics::clear_diagnostics;
|
||||||
|
use crate::server::api::traits::{NotificationHandler, SyncNotificationHandler};
|
||||||
|
use crate::server::api::LSPResult;
|
||||||
|
use crate::server::client::{Notifier, Requester};
|
||||||
|
use crate::server::Result;
|
||||||
|
use crate::session::Session;
|
||||||
|
use crate::system::url_to_system_path;
|
||||||
|
|
||||||
|
pub(crate) struct DidCloseTextDocumentHandler;
|
||||||
|
|
||||||
|
impl NotificationHandler for DidCloseTextDocumentHandler {
|
||||||
|
type NotificationType = DidCloseTextDocument;
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SyncNotificationHandler for DidCloseTextDocumentHandler {
|
||||||
|
fn run(
|
||||||
|
session: &mut Session,
|
||||||
|
notifier: Notifier,
|
||||||
|
_requester: &mut Requester,
|
||||||
|
params: DidCloseTextDocumentParams,
|
||||||
|
) -> Result<()> {
|
||||||
|
let Ok(path) = url_to_system_path(¶ms.text_document.uri) else {
|
||||||
|
return Ok(());
|
||||||
|
};
|
||||||
|
|
||||||
|
let key = session.key_from_url(params.text_document.uri);
|
||||||
|
session
|
||||||
|
.close_document(&key)
|
||||||
|
.with_failure_code(ErrorCode::InternalError)?;
|
||||||
|
|
||||||
|
if let Some(db) = session.workspace_db_for_path_mut(path.as_std_path()) {
|
||||||
|
File::sync_path(db.get_mut(), &path);
|
||||||
|
}
|
||||||
|
|
||||||
|
clear_diagnostics(key.url(), ¬ifier)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,41 @@
|
||||||
|
use lsp_types::notification::DidCloseNotebookDocument;
|
||||||
|
use lsp_types::DidCloseNotebookDocumentParams;
|
||||||
|
|
||||||
|
use ruff_db::files::File;
|
||||||
|
|
||||||
|
use crate::server::api::traits::{NotificationHandler, SyncNotificationHandler};
|
||||||
|
use crate::server::api::LSPResult;
|
||||||
|
use crate::server::client::{Notifier, Requester};
|
||||||
|
use crate::server::Result;
|
||||||
|
use crate::session::Session;
|
||||||
|
use crate::system::url_to_system_path;
|
||||||
|
|
||||||
|
pub(crate) struct DidCloseNotebookHandler;
|
||||||
|
|
||||||
|
impl NotificationHandler for DidCloseNotebookHandler {
|
||||||
|
type NotificationType = DidCloseNotebookDocument;
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SyncNotificationHandler for DidCloseNotebookHandler {
|
||||||
|
fn run(
|
||||||
|
session: &mut Session,
|
||||||
|
_notifier: Notifier,
|
||||||
|
_requester: &mut Requester,
|
||||||
|
params: DidCloseNotebookDocumentParams,
|
||||||
|
) -> Result<()> {
|
||||||
|
let Ok(path) = url_to_system_path(¶ms.notebook_document.uri) else {
|
||||||
|
return Ok(());
|
||||||
|
};
|
||||||
|
|
||||||
|
let key = session.key_from_url(params.notebook_document.uri);
|
||||||
|
session
|
||||||
|
.close_document(&key)
|
||||||
|
.with_failure_code(lsp_server::ErrorCode::InternalError)?;
|
||||||
|
|
||||||
|
if let Some(db) = session.workspace_db_for_path_mut(path.as_std_path()) {
|
||||||
|
File::sync_path(db.get_mut(), &path);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,43 @@
|
||||||
|
use lsp_types::notification::DidOpenTextDocument;
|
||||||
|
use lsp_types::DidOpenTextDocumentParams;
|
||||||
|
|
||||||
|
use ruff_db::files::system_path_to_file;
|
||||||
|
|
||||||
|
use crate::server::api::traits::{NotificationHandler, SyncNotificationHandler};
|
||||||
|
use crate::server::client::{Notifier, Requester};
|
||||||
|
use crate::server::Result;
|
||||||
|
use crate::session::Session;
|
||||||
|
use crate::system::url_to_system_path;
|
||||||
|
use crate::TextDocument;
|
||||||
|
|
||||||
|
pub(crate) struct DidOpenTextDocumentHandler;
|
||||||
|
|
||||||
|
impl NotificationHandler for DidOpenTextDocumentHandler {
|
||||||
|
type NotificationType = DidOpenTextDocument;
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SyncNotificationHandler for DidOpenTextDocumentHandler {
|
||||||
|
fn run(
|
||||||
|
session: &mut Session,
|
||||||
|
_notifier: Notifier,
|
||||||
|
_requester: &mut Requester,
|
||||||
|
params: DidOpenTextDocumentParams,
|
||||||
|
) -> Result<()> {
|
||||||
|
let Ok(path) = url_to_system_path(¶ms.text_document.uri) else {
|
||||||
|
return Ok(());
|
||||||
|
};
|
||||||
|
|
||||||
|
let document = TextDocument::new(params.text_document.text, params.text_document.version);
|
||||||
|
session.open_text_document(params.text_document.uri, document);
|
||||||
|
|
||||||
|
if let Some(db) = session.workspace_db_for_path_mut(path.as_std_path()) {
|
||||||
|
// TODO(dhruvmanila): Store the `file` in `DocumentController`
|
||||||
|
let file = system_path_to_file(&**db, &path).unwrap();
|
||||||
|
file.sync(db.get_mut());
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(dhruvmanila): Publish diagnostics if the client doesn't support pull diagnostics
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,51 @@
|
||||||
|
use lsp_server::ErrorCode;
|
||||||
|
use lsp_types::notification::DidOpenNotebookDocument;
|
||||||
|
use lsp_types::DidOpenNotebookDocumentParams;
|
||||||
|
|
||||||
|
use ruff_db::files::system_path_to_file;
|
||||||
|
|
||||||
|
use crate::edit::NotebookDocument;
|
||||||
|
use crate::server::api::traits::{NotificationHandler, SyncNotificationHandler};
|
||||||
|
use crate::server::api::LSPResult;
|
||||||
|
use crate::server::client::{Notifier, Requester};
|
||||||
|
use crate::server::Result;
|
||||||
|
use crate::session::Session;
|
||||||
|
use crate::system::url_to_system_path;
|
||||||
|
|
||||||
|
pub(crate) struct DidOpenNotebookHandler;
|
||||||
|
|
||||||
|
impl NotificationHandler for DidOpenNotebookHandler {
|
||||||
|
type NotificationType = DidOpenNotebookDocument;
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SyncNotificationHandler for DidOpenNotebookHandler {
|
||||||
|
fn run(
|
||||||
|
session: &mut Session,
|
||||||
|
_notifier: Notifier,
|
||||||
|
_requester: &mut Requester,
|
||||||
|
params: DidOpenNotebookDocumentParams,
|
||||||
|
) -> Result<()> {
|
||||||
|
let Ok(path) = url_to_system_path(¶ms.notebook_document.uri) else {
|
||||||
|
return Ok(());
|
||||||
|
};
|
||||||
|
|
||||||
|
let notebook = NotebookDocument::new(
|
||||||
|
params.notebook_document.version,
|
||||||
|
params.notebook_document.cells,
|
||||||
|
params.notebook_document.metadata.unwrap_or_default(),
|
||||||
|
params.cell_text_documents,
|
||||||
|
)
|
||||||
|
.with_failure_code(ErrorCode::InternalError)?;
|
||||||
|
session.open_notebook_document(params.notebook_document.uri.clone(), notebook);
|
||||||
|
|
||||||
|
if let Some(db) = session.workspace_db_for_path_mut(path.as_std_path()) {
|
||||||
|
// TODO(dhruvmanila): Store the `file` in `DocumentController`
|
||||||
|
let file = system_path_to_file(&**db, &path).unwrap();
|
||||||
|
file.sync(db.get_mut());
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(dhruvmanila): Publish diagnostics if the client doesn't support pull diagnostics
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,25 @@
|
||||||
|
use lsp_types::notification::SetTrace;
|
||||||
|
use lsp_types::SetTraceParams;
|
||||||
|
|
||||||
|
use crate::server::api::traits::{NotificationHandler, SyncNotificationHandler};
|
||||||
|
use crate::server::client::{Notifier, Requester};
|
||||||
|
use crate::server::Result;
|
||||||
|
use crate::session::Session;
|
||||||
|
|
||||||
|
pub(crate) struct SetTraceHandler;
|
||||||
|
|
||||||
|
impl NotificationHandler for SetTraceHandler {
|
||||||
|
type NotificationType = SetTrace;
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SyncNotificationHandler for SetTraceHandler {
|
||||||
|
fn run(
|
||||||
|
_session: &mut Session,
|
||||||
|
_notifier: Notifier,
|
||||||
|
_requester: &mut Requester,
|
||||||
|
params: SetTraceParams,
|
||||||
|
) -> Result<()> {
|
||||||
|
crate::trace::set_trace_value(params.value);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
3
crates/red_knot_server/src/server/api/requests.rs
Normal file
3
crates/red_knot_server/src/server/api/requests.rs
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
mod diagnostic;
|
||||||
|
|
||||||
|
pub(super) use diagnostic::DocumentDiagnosticRequestHandler;
|
71
crates/red_knot_server/src/server/api/requests/diagnostic.rs
Normal file
71
crates/red_knot_server/src/server/api/requests/diagnostic.rs
Normal file
|
@ -0,0 +1,71 @@
|
||||||
|
use std::borrow::Cow;
|
||||||
|
|
||||||
|
use lsp_types::request::DocumentDiagnosticRequest;
|
||||||
|
use lsp_types::{
|
||||||
|
Diagnostic, DocumentDiagnosticParams, DocumentDiagnosticReport, DocumentDiagnosticReportResult,
|
||||||
|
FullDocumentDiagnosticReport, Range, RelatedFullDocumentDiagnosticReport, Url,
|
||||||
|
};
|
||||||
|
|
||||||
|
use red_knot_workspace::db::RootDatabase;
|
||||||
|
|
||||||
|
use crate::server::api::traits::{BackgroundDocumentRequestHandler, RequestHandler};
|
||||||
|
use crate::server::{client::Notifier, Result};
|
||||||
|
use crate::session::DocumentSnapshot;
|
||||||
|
|
||||||
|
pub(crate) struct DocumentDiagnosticRequestHandler;
|
||||||
|
|
||||||
|
impl RequestHandler for DocumentDiagnosticRequestHandler {
|
||||||
|
type RequestType = DocumentDiagnosticRequest;
|
||||||
|
}
|
||||||
|
|
||||||
|
impl BackgroundDocumentRequestHandler for DocumentDiagnosticRequestHandler {
|
||||||
|
fn document_url(params: &DocumentDiagnosticParams) -> Cow<Url> {
|
||||||
|
Cow::Borrowed(¶ms.text_document.uri)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn run_with_snapshot(
|
||||||
|
snapshot: DocumentSnapshot,
|
||||||
|
db: Option<salsa::Handle<RootDatabase>>,
|
||||||
|
_notifier: Notifier,
|
||||||
|
_params: DocumentDiagnosticParams,
|
||||||
|
) -> Result<DocumentDiagnosticReportResult> {
|
||||||
|
let diagnostics = db
|
||||||
|
.map(|db| compute_diagnostics(&snapshot, &db))
|
||||||
|
.unwrap_or_default();
|
||||||
|
|
||||||
|
Ok(DocumentDiagnosticReportResult::Report(
|
||||||
|
DocumentDiagnosticReport::Full(RelatedFullDocumentDiagnosticReport {
|
||||||
|
related_documents: None,
|
||||||
|
full_document_diagnostic_report: FullDocumentDiagnosticReport {
|
||||||
|
result_id: None,
|
||||||
|
items: diagnostics,
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn compute_diagnostics(snapshot: &DocumentSnapshot, db: &RootDatabase) -> Vec<Diagnostic> {
|
||||||
|
let Some(file) = snapshot.file(db) else {
|
||||||
|
return vec![];
|
||||||
|
};
|
||||||
|
let Ok(diagnostics) = db.check_file(file) else {
|
||||||
|
return vec![];
|
||||||
|
};
|
||||||
|
|
||||||
|
diagnostics
|
||||||
|
.as_slice()
|
||||||
|
.iter()
|
||||||
|
.map(|message| Diagnostic {
|
||||||
|
range: Range::default(),
|
||||||
|
severity: None,
|
||||||
|
tags: None,
|
||||||
|
code: None,
|
||||||
|
code_description: None,
|
||||||
|
source: Some("red-knot".into()),
|
||||||
|
message: message.to_string(),
|
||||||
|
related_information: None,
|
||||||
|
data: None,
|
||||||
|
})
|
||||||
|
.collect()
|
||||||
|
}
|
77
crates/red_knot_server/src/server/api/traits.rs
Normal file
77
crates/red_knot_server/src/server/api/traits.rs
Normal file
|
@ -0,0 +1,77 @@
|
||||||
|
//! A stateful LSP implementation that calls into the Ruff API.
|
||||||
|
|
||||||
|
use crate::server::client::{Notifier, Requester};
|
||||||
|
use crate::session::{DocumentSnapshot, Session};
|
||||||
|
|
||||||
|
use lsp_types::notification::Notification as LSPNotification;
|
||||||
|
use lsp_types::request::Request;
|
||||||
|
use red_knot_workspace::db::RootDatabase;
|
||||||
|
|
||||||
|
/// A supertrait for any server request handler.
|
||||||
|
pub(super) trait RequestHandler {
|
||||||
|
type RequestType: Request;
|
||||||
|
const METHOD: &'static str = <<Self as RequestHandler>::RequestType as Request>::METHOD;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A request handler that needs mutable access to the session.
|
||||||
|
/// This will block the main message receiver loop, meaning that no
|
||||||
|
/// incoming requests or notifications will be handled while `run` is
|
||||||
|
/// executing. Try to avoid doing any I/O or long-running computations.
|
||||||
|
pub(super) trait SyncRequestHandler: RequestHandler {
|
||||||
|
fn run(
|
||||||
|
session: &mut Session,
|
||||||
|
notifier: Notifier,
|
||||||
|
requester: &mut Requester,
|
||||||
|
params: <<Self as RequestHandler>::RequestType as Request>::Params,
|
||||||
|
) -> super::Result<<<Self as RequestHandler>::RequestType as Request>::Result>;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A request handler that can be run on a background thread.
|
||||||
|
pub(super) trait BackgroundDocumentRequestHandler: RequestHandler {
|
||||||
|
fn document_url(
|
||||||
|
params: &<<Self as RequestHandler>::RequestType as Request>::Params,
|
||||||
|
) -> std::borrow::Cow<lsp_types::Url>;
|
||||||
|
|
||||||
|
fn run_with_snapshot(
|
||||||
|
snapshot: DocumentSnapshot,
|
||||||
|
db: Option<salsa::Handle<RootDatabase>>,
|
||||||
|
notifier: Notifier,
|
||||||
|
params: <<Self as RequestHandler>::RequestType as Request>::Params,
|
||||||
|
) -> super::Result<<<Self as RequestHandler>::RequestType as Request>::Result>;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A supertrait for any server notification handler.
|
||||||
|
pub(super) trait NotificationHandler {
|
||||||
|
type NotificationType: LSPNotification;
|
||||||
|
const METHOD: &'static str =
|
||||||
|
<<Self as NotificationHandler>::NotificationType as LSPNotification>::METHOD;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A notification handler that needs mutable access to the session.
|
||||||
|
/// This will block the main message receiver loop, meaning that no
|
||||||
|
/// incoming requests or notifications will be handled while `run` is
|
||||||
|
/// executing. Try to avoid doing any I/O or long-running computations.
|
||||||
|
pub(super) trait SyncNotificationHandler: NotificationHandler {
|
||||||
|
fn run(
|
||||||
|
session: &mut Session,
|
||||||
|
notifier: Notifier,
|
||||||
|
requester: &mut Requester,
|
||||||
|
params: <<Self as NotificationHandler>::NotificationType as LSPNotification>::Params,
|
||||||
|
) -> super::Result<()>;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A notification handler that can be run on a background thread.
|
||||||
|
pub(super) trait BackgroundDocumentNotificationHandler: NotificationHandler {
|
||||||
|
/// `document_url` can be implemented automatically with
|
||||||
|
/// `define_document_url!(params: &<YourParameterType>)` in the trait
|
||||||
|
/// implementation.
|
||||||
|
fn document_url(
|
||||||
|
params: &<<Self as NotificationHandler>::NotificationType as LSPNotification>::Params,
|
||||||
|
) -> std::borrow::Cow<lsp_types::Url>;
|
||||||
|
|
||||||
|
fn run_with_snapshot(
|
||||||
|
snapshot: DocumentSnapshot,
|
||||||
|
notifier: Notifier,
|
||||||
|
params: <<Self as NotificationHandler>::NotificationType as LSPNotification>::Params,
|
||||||
|
) -> super::Result<()>;
|
||||||
|
}
|
169
crates/red_knot_server/src/server/client.rs
Normal file
169
crates/red_knot_server/src/server/client.rs
Normal file
|
@ -0,0 +1,169 @@
|
||||||
|
use std::any::TypeId;
|
||||||
|
|
||||||
|
use lsp_server::{Notification, RequestId};
|
||||||
|
use rustc_hash::FxHashMap;
|
||||||
|
use serde_json::Value;
|
||||||
|
|
||||||
|
use super::{schedule::Task, ClientSender};
|
||||||
|
|
||||||
|
type ResponseBuilder<'s> = Box<dyn FnOnce(lsp_server::Response) -> Task<'s>>;
|
||||||
|
|
||||||
|
pub(crate) struct Client<'s> {
|
||||||
|
notifier: Notifier,
|
||||||
|
responder: Responder,
|
||||||
|
pub(super) requester: Requester<'s>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub(crate) struct Notifier(ClientSender);
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub(crate) struct Responder(ClientSender);
|
||||||
|
|
||||||
|
pub(crate) struct Requester<'s> {
|
||||||
|
sender: ClientSender,
|
||||||
|
next_request_id: i32,
|
||||||
|
response_handlers: FxHashMap<lsp_server::RequestId, ResponseBuilder<'s>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'s> Client<'s> {
|
||||||
|
pub(super) fn new(sender: ClientSender) -> Self {
|
||||||
|
Self {
|
||||||
|
notifier: Notifier(sender.clone()),
|
||||||
|
responder: Responder(sender.clone()),
|
||||||
|
requester: Requester {
|
||||||
|
sender,
|
||||||
|
next_request_id: 1,
|
||||||
|
response_handlers: FxHashMap::default(),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) fn notifier(&self) -> Notifier {
|
||||||
|
self.notifier.clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) fn responder(&self) -> Responder {
|
||||||
|
self.responder.clone()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(dead_code)] // we'll need to use `Notifier` in the future
|
||||||
|
impl Notifier {
|
||||||
|
pub(crate) fn notify<N>(&self, params: N::Params) -> crate::Result<()>
|
||||||
|
where
|
||||||
|
N: lsp_types::notification::Notification,
|
||||||
|
{
|
||||||
|
let method = N::METHOD.to_string();
|
||||||
|
|
||||||
|
let message = lsp_server::Message::Notification(Notification::new(method, params));
|
||||||
|
|
||||||
|
self.0.send(message)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn notify_method(&self, method: String) -> crate::Result<()> {
|
||||||
|
self.0
|
||||||
|
.send(lsp_server::Message::Notification(Notification::new(
|
||||||
|
method,
|
||||||
|
Value::Null,
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Responder {
|
||||||
|
pub(crate) fn respond<R>(
|
||||||
|
&self,
|
||||||
|
id: RequestId,
|
||||||
|
result: crate::server::Result<R>,
|
||||||
|
) -> crate::Result<()>
|
||||||
|
where
|
||||||
|
R: serde::Serialize,
|
||||||
|
{
|
||||||
|
self.0.send(
|
||||||
|
match result {
|
||||||
|
Ok(res) => lsp_server::Response::new_ok(id, res),
|
||||||
|
Err(crate::server::api::Error { code, error }) => {
|
||||||
|
lsp_server::Response::new_err(id, code as i32, format!("{error}"))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
.into(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'s> Requester<'s> {
|
||||||
|
/// Sends a request of kind `R` to the client, with associated parameters.
|
||||||
|
/// The task provided by `response_handler` will be dispatched as soon as the response
|
||||||
|
/// comes back from the client.
|
||||||
|
pub(crate) fn request<R>(
|
||||||
|
&mut self,
|
||||||
|
params: R::Params,
|
||||||
|
response_handler: impl Fn(R::Result) -> Task<'s> + 'static,
|
||||||
|
) -> crate::Result<()>
|
||||||
|
where
|
||||||
|
R: lsp_types::request::Request,
|
||||||
|
{
|
||||||
|
let serialized_params = serde_json::to_value(params)?;
|
||||||
|
|
||||||
|
self.response_handlers.insert(
|
||||||
|
self.next_request_id.into(),
|
||||||
|
Box::new(move |response: lsp_server::Response| {
|
||||||
|
match (response.error, response.result) {
|
||||||
|
(Some(err), _) => {
|
||||||
|
tracing::error!(
|
||||||
|
"Got an error from the client (code {}): {}",
|
||||||
|
err.code,
|
||||||
|
err.message
|
||||||
|
);
|
||||||
|
Task::nothing()
|
||||||
|
}
|
||||||
|
(None, Some(response)) => match serde_json::from_value(response) {
|
||||||
|
Ok(response) => response_handler(response),
|
||||||
|
Err(error) => {
|
||||||
|
tracing::error!("Failed to deserialize response from server: {error}");
|
||||||
|
Task::nothing()
|
||||||
|
}
|
||||||
|
},
|
||||||
|
(None, None) => {
|
||||||
|
if TypeId::of::<R::Result>() == TypeId::of::<()>() {
|
||||||
|
// We can't call `response_handler(())` directly here, but
|
||||||
|
// since we _know_ the type expected is `()`, we can use
|
||||||
|
// `from_value(Value::Null)`. `R::Result` implements `DeserializeOwned`,
|
||||||
|
// so this branch works in the general case but we'll only
|
||||||
|
// hit it if the concrete type is `()`, so the `unwrap()` is safe here.
|
||||||
|
response_handler(serde_json::from_value(Value::Null).unwrap());
|
||||||
|
} else {
|
||||||
|
tracing::error!(
|
||||||
|
"Server response was invalid: did not contain a result or error"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
Task::nothing()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
|
||||||
|
self.sender
|
||||||
|
.send(lsp_server::Message::Request(lsp_server::Request {
|
||||||
|
id: self.next_request_id.into(),
|
||||||
|
method: R::METHOD.into(),
|
||||||
|
params: serialized_params,
|
||||||
|
}))?;
|
||||||
|
|
||||||
|
self.next_request_id += 1;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn pop_response_task(&mut self, response: lsp_server::Response) -> Task<'s> {
|
||||||
|
if let Some(handler) = self.response_handlers.remove(&response.id) {
|
||||||
|
handler(response)
|
||||||
|
} else {
|
||||||
|
tracing::error!(
|
||||||
|
"Received a response with ID {}, which was not expected",
|
||||||
|
response.id
|
||||||
|
);
|
||||||
|
Task::nothing()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
144
crates/red_knot_server/src/server/connection.rs
Normal file
144
crates/red_knot_server/src/server/connection.rs
Normal file
|
@ -0,0 +1,144 @@
|
||||||
|
use lsp_server as lsp;
|
||||||
|
use lsp_types::{notification::Notification, request::Request};
|
||||||
|
use std::sync::{Arc, Weak};
|
||||||
|
|
||||||
|
type ConnectionSender = crossbeam::channel::Sender<lsp::Message>;
|
||||||
|
type ConnectionReceiver = crossbeam::channel::Receiver<lsp::Message>;
|
||||||
|
|
||||||
|
/// A builder for `Connection` that handles LSP initialization.
|
||||||
|
pub(crate) struct ConnectionInitializer {
|
||||||
|
connection: lsp::Connection,
|
||||||
|
threads: lsp::IoThreads,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Handles inbound and outbound messages with the client.
|
||||||
|
pub(crate) struct Connection {
|
||||||
|
sender: Arc<ConnectionSender>,
|
||||||
|
receiver: ConnectionReceiver,
|
||||||
|
threads: lsp::IoThreads,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ConnectionInitializer {
|
||||||
|
/// Create a new LSP server connection over stdin/stdout.
|
||||||
|
pub(super) fn stdio() -> Self {
|
||||||
|
let (connection, threads) = lsp::Connection::stdio();
|
||||||
|
Self {
|
||||||
|
connection,
|
||||||
|
threads,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Starts the initialization process with the client by listening for an initialization request.
|
||||||
|
/// Returns a request ID that should be passed into `initialize_finish` later,
|
||||||
|
/// along with the initialization parameters that were provided.
|
||||||
|
pub(super) fn initialize_start(
|
||||||
|
&self,
|
||||||
|
) -> crate::Result<(lsp::RequestId, lsp_types::InitializeParams)> {
|
||||||
|
let (id, params) = self.connection.initialize_start()?;
|
||||||
|
Ok((id, serde_json::from_value(params)?))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Finishes the initialization process with the client,
|
||||||
|
/// returning an initialized `Connection`.
|
||||||
|
pub(super) fn initialize_finish(
|
||||||
|
self,
|
||||||
|
id: lsp::RequestId,
|
||||||
|
server_capabilities: &lsp_types::ServerCapabilities,
|
||||||
|
name: &str,
|
||||||
|
version: &str,
|
||||||
|
) -> crate::Result<Connection> {
|
||||||
|
self.connection.initialize_finish(
|
||||||
|
id,
|
||||||
|
serde_json::json!({
|
||||||
|
"capabilities": server_capabilities,
|
||||||
|
"serverInfo": {
|
||||||
|
"name": name,
|
||||||
|
"version": version
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
)?;
|
||||||
|
let Self {
|
||||||
|
connection: lsp::Connection { sender, receiver },
|
||||||
|
threads,
|
||||||
|
} = self;
|
||||||
|
Ok(Connection {
|
||||||
|
sender: Arc::new(sender),
|
||||||
|
receiver,
|
||||||
|
threads,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Connection {
|
||||||
|
/// Make a new `ClientSender` for sending messages to the client.
|
||||||
|
pub(super) fn make_sender(&self) -> ClientSender {
|
||||||
|
ClientSender {
|
||||||
|
weak_sender: Arc::downgrade(&self.sender),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// An iterator over incoming messages from the client.
|
||||||
|
pub(super) fn incoming(&self) -> crossbeam::channel::Iter<lsp::Message> {
|
||||||
|
self.receiver.iter()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Check and respond to any incoming shutdown requests; returns`true` if the server should be shutdown.
|
||||||
|
pub(super) fn handle_shutdown(&self, message: &lsp::Message) -> crate::Result<bool> {
|
||||||
|
match message {
|
||||||
|
lsp::Message::Request(lsp::Request { id, method, .. })
|
||||||
|
if method == lsp_types::request::Shutdown::METHOD =>
|
||||||
|
{
|
||||||
|
self.sender
|
||||||
|
.send(lsp::Response::new_ok(id.clone(), ()).into())?;
|
||||||
|
tracing::info!("Shutdown request received. Waiting for an exit notification...");
|
||||||
|
match self.receiver.recv_timeout(std::time::Duration::from_secs(30))? {
|
||||||
|
lsp::Message::Notification(lsp::Notification { method, .. }) if method == lsp_types::notification::Exit::METHOD => {
|
||||||
|
tracing::info!("Exit notification received. Server shutting down...");
|
||||||
|
Ok(true)
|
||||||
|
},
|
||||||
|
message => anyhow::bail!("Server received unexpected message {message:?} while waiting for exit notification")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
lsp::Message::Notification(lsp::Notification { method, .. })
|
||||||
|
if method == lsp_types::notification::Exit::METHOD =>
|
||||||
|
{
|
||||||
|
tracing::error!("Server received an exit notification before a shutdown request was sent. Exiting...");
|
||||||
|
Ok(true)
|
||||||
|
}
|
||||||
|
_ => Ok(false),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Join the I/O threads that underpin this connection.
|
||||||
|
/// This is guaranteed to be nearly immediate since
|
||||||
|
/// we close the only active channels to these threads prior
|
||||||
|
/// to joining them.
|
||||||
|
pub(super) fn close(self) -> crate::Result<()> {
|
||||||
|
std::mem::drop(
|
||||||
|
Arc::into_inner(self.sender)
|
||||||
|
.expect("the client sender shouldn't have more than one strong reference"),
|
||||||
|
);
|
||||||
|
std::mem::drop(self.receiver);
|
||||||
|
self.threads.join()?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A weak reference to an underlying sender channel, used for communication with the client.
|
||||||
|
/// If the `Connection` that created this `ClientSender` is dropped, any `send` calls will throw
|
||||||
|
/// an error.
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub(crate) struct ClientSender {
|
||||||
|
weak_sender: Weak<ConnectionSender>,
|
||||||
|
}
|
||||||
|
|
||||||
|
// note: additional wrapper functions for senders may be implemented as needed.
|
||||||
|
impl ClientSender {
|
||||||
|
pub(crate) fn send(&self, msg: lsp::Message) -> crate::Result<()> {
|
||||||
|
let Some(sender) = self.weak_sender.upgrade() else {
|
||||||
|
anyhow::bail!("The connection with the client has been closed");
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(sender.send(msg)?)
|
||||||
|
}
|
||||||
|
}
|
112
crates/red_knot_server/src/server/schedule.rs
Normal file
112
crates/red_knot_server/src/server/schedule.rs
Normal file
|
@ -0,0 +1,112 @@
|
||||||
|
use std::num::NonZeroUsize;
|
||||||
|
|
||||||
|
use crate::session::Session;
|
||||||
|
|
||||||
|
mod task;
|
||||||
|
mod thread;
|
||||||
|
|
||||||
|
pub(super) use task::{BackgroundSchedule, Task};
|
||||||
|
|
||||||
|
use self::{
|
||||||
|
task::{BackgroundTaskBuilder, SyncTask},
|
||||||
|
thread::ThreadPriority,
|
||||||
|
};
|
||||||
|
|
||||||
|
use super::{client::Client, ClientSender};
|
||||||
|
|
||||||
|
/// The event loop thread is actually a secondary thread that we spawn from the
|
||||||
|
/// _actual_ main thread. This secondary thread has a larger stack size
|
||||||
|
/// than some OS defaults (Windows, for example) and is also designated as
|
||||||
|
/// high-priority.
|
||||||
|
pub(crate) fn event_loop_thread(
|
||||||
|
func: impl FnOnce() -> crate::Result<()> + Send + 'static,
|
||||||
|
) -> crate::Result<thread::JoinHandle<crate::Result<()>>> {
|
||||||
|
// Override OS defaults to avoid stack overflows on platforms with low stack size defaults.
|
||||||
|
const MAIN_THREAD_STACK_SIZE: usize = 2 * 1024 * 1024;
|
||||||
|
const MAIN_THREAD_NAME: &str = "ruff:main";
|
||||||
|
Ok(
|
||||||
|
thread::Builder::new(thread::ThreadPriority::LatencySensitive)
|
||||||
|
.name(MAIN_THREAD_NAME.into())
|
||||||
|
.stack_size(MAIN_THREAD_STACK_SIZE)
|
||||||
|
.spawn(func)?,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) struct Scheduler<'s> {
|
||||||
|
session: &'s mut Session,
|
||||||
|
client: Client<'s>,
|
||||||
|
fmt_pool: thread::Pool,
|
||||||
|
background_pool: thread::Pool,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'s> Scheduler<'s> {
|
||||||
|
pub(super) fn new(
|
||||||
|
session: &'s mut Session,
|
||||||
|
worker_threads: NonZeroUsize,
|
||||||
|
sender: ClientSender,
|
||||||
|
) -> Self {
|
||||||
|
const FMT_THREADS: usize = 1;
|
||||||
|
Self {
|
||||||
|
session,
|
||||||
|
fmt_pool: thread::Pool::new(NonZeroUsize::try_from(FMT_THREADS).unwrap()),
|
||||||
|
background_pool: thread::Pool::new(worker_threads),
|
||||||
|
client: Client::new(sender),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Immediately sends a request of kind `R` to the client, with associated parameters.
|
||||||
|
/// The task provided by `response_handler` will be dispatched as soon as the response
|
||||||
|
/// comes back from the client.
|
||||||
|
pub(super) fn request<R>(
|
||||||
|
&mut self,
|
||||||
|
params: R::Params,
|
||||||
|
response_handler: impl Fn(R::Result) -> Task<'s> + 'static,
|
||||||
|
) -> crate::Result<()>
|
||||||
|
where
|
||||||
|
R: lsp_types::request::Request,
|
||||||
|
{
|
||||||
|
self.client.requester.request::<R>(params, response_handler)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Creates a task to handle a response from the client.
|
||||||
|
pub(super) fn response(&mut self, response: lsp_server::Response) -> Task<'s> {
|
||||||
|
self.client.requester.pop_response_task(response)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Dispatches a `task` by either running it as a blocking function or
|
||||||
|
/// executing it on a background thread pool.
|
||||||
|
pub(super) fn dispatch(&mut self, task: task::Task<'s>) {
|
||||||
|
match task {
|
||||||
|
Task::Sync(SyncTask { func }) => {
|
||||||
|
let notifier = self.client.notifier();
|
||||||
|
let responder = self.client.responder();
|
||||||
|
func(
|
||||||
|
self.session,
|
||||||
|
notifier,
|
||||||
|
&mut self.client.requester,
|
||||||
|
responder,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
Task::Background(BackgroundTaskBuilder {
|
||||||
|
schedule,
|
||||||
|
builder: func,
|
||||||
|
}) => {
|
||||||
|
let static_func = func(self.session);
|
||||||
|
let notifier = self.client.notifier();
|
||||||
|
let responder = self.client.responder();
|
||||||
|
let task = move || static_func(notifier, responder);
|
||||||
|
match schedule {
|
||||||
|
BackgroundSchedule::Worker => {
|
||||||
|
self.background_pool.spawn(ThreadPriority::Worker, task);
|
||||||
|
}
|
||||||
|
BackgroundSchedule::LatencySensitive => self
|
||||||
|
.background_pool
|
||||||
|
.spawn(ThreadPriority::LatencySensitive, task),
|
||||||
|
BackgroundSchedule::Fmt => {
|
||||||
|
self.fmt_pool.spawn(ThreadPriority::LatencySensitive, task);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
95
crates/red_knot_server/src/server/schedule/task.rs
Normal file
95
crates/red_knot_server/src/server/schedule/task.rs
Normal file
|
@ -0,0 +1,95 @@
|
||||||
|
use lsp_server::RequestId;
|
||||||
|
use serde::Serialize;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
server::client::{Notifier, Requester, Responder},
|
||||||
|
session::Session,
|
||||||
|
};
|
||||||
|
|
||||||
|
type LocalFn<'s> = Box<dyn FnOnce(&mut Session, Notifier, &mut Requester, Responder) + 's>;
|
||||||
|
|
||||||
|
type BackgroundFn = Box<dyn FnOnce(Notifier, Responder) + Send + 'static>;
|
||||||
|
|
||||||
|
type BackgroundFnBuilder<'s> = Box<dyn FnOnce(&Session) -> BackgroundFn + 's>;
|
||||||
|
|
||||||
|
/// Describes how the task should be run.
|
||||||
|
#[derive(Clone, Copy, Debug, Default)]
|
||||||
|
pub(in crate::server) enum BackgroundSchedule {
|
||||||
|
/// The task should be run on the background thread designated
|
||||||
|
/// for formatting actions. This is a high priority thread.
|
||||||
|
Fmt,
|
||||||
|
/// The task should be run on the general high-priority background
|
||||||
|
/// thread.
|
||||||
|
LatencySensitive,
|
||||||
|
/// The task should be run on a regular-priority background thread.
|
||||||
|
#[default]
|
||||||
|
Worker,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A [`Task`] is a future that has not yet started, and it is the job of
|
||||||
|
/// the [`super::Scheduler`] to make that happen, via [`super::Scheduler::dispatch`].
|
||||||
|
/// A task can either run on the main thread (in other words, the same thread as the
|
||||||
|
/// scheduler) or it can run in a background thread. The main difference between
|
||||||
|
/// the two is that background threads only have a read-only snapshot of the session,
|
||||||
|
/// while local tasks have exclusive access and can modify it as they please. Keep in mind that
|
||||||
|
/// local tasks will **block** the main event loop, so only use local tasks if you **need**
|
||||||
|
/// mutable state access or you need the absolute lowest latency possible.
|
||||||
|
pub(in crate::server) enum Task<'s> {
|
||||||
|
Background(BackgroundTaskBuilder<'s>),
|
||||||
|
Sync(SyncTask<'s>),
|
||||||
|
}
|
||||||
|
|
||||||
|
// The reason why this isn't just a 'static background closure
|
||||||
|
// is because we need to take a snapshot of the session before sending
|
||||||
|
// this task to the background, and the inner closure can't take the session
|
||||||
|
// as an immutable reference since it's used mutably elsewhere. So instead,
|
||||||
|
// a background task is built using an outer closure that borrows the session to take a snapshot,
|
||||||
|
// that the inner closure can capture. This builder closure has a lifetime linked to the scheduler.
|
||||||
|
// When the task is dispatched, the scheduler runs the synchronous builder, which takes the session
|
||||||
|
// as a reference, to create the inner 'static closure. That closure is then moved to a background task pool.
|
||||||
|
pub(in crate::server) struct BackgroundTaskBuilder<'s> {
|
||||||
|
pub(super) schedule: BackgroundSchedule,
|
||||||
|
pub(super) builder: BackgroundFnBuilder<'s>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(in crate::server) struct SyncTask<'s> {
|
||||||
|
pub(super) func: LocalFn<'s>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'s> Task<'s> {
|
||||||
|
/// Creates a new background task.
|
||||||
|
pub(crate) fn background(
|
||||||
|
schedule: BackgroundSchedule,
|
||||||
|
func: impl FnOnce(&Session) -> Box<dyn FnOnce(Notifier, Responder) + Send + 'static> + 's,
|
||||||
|
) -> Self {
|
||||||
|
Self::Background(BackgroundTaskBuilder {
|
||||||
|
schedule,
|
||||||
|
builder: Box::new(func),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
/// Creates a new local task.
|
||||||
|
pub(crate) fn local(
|
||||||
|
func: impl FnOnce(&mut Session, Notifier, &mut Requester, Responder) + 's,
|
||||||
|
) -> Self {
|
||||||
|
Self::Sync(SyncTask {
|
||||||
|
func: Box::new(func),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
/// Creates a local task that immediately
|
||||||
|
/// responds with the provided `request`.
|
||||||
|
pub(crate) fn immediate<R>(id: RequestId, result: crate::server::Result<R>) -> Self
|
||||||
|
where
|
||||||
|
R: Serialize + Send + 'static,
|
||||||
|
{
|
||||||
|
Self::local(move |_, _, _, responder| {
|
||||||
|
if let Err(err) = responder.respond(id, result) {
|
||||||
|
tracing::error!("Unable to send immediate response: {err}");
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Creates a local task that does nothing.
|
||||||
|
pub(crate) fn nothing() -> Self {
|
||||||
|
Self::local(move |_, _, _, _| {})
|
||||||
|
}
|
||||||
|
}
|
109
crates/red_knot_server/src/server/schedule/thread.rs
Normal file
109
crates/red_knot_server/src/server/schedule/thread.rs
Normal file
|
@ -0,0 +1,109 @@
|
||||||
|
// +------------------------------------------------------------+
|
||||||
|
// | Code adopted from: |
|
||||||
|
// | Repository: https://github.com/rust-lang/rust-analyzer.git |
|
||||||
|
// | File: `crates/stdx/src/thread.rs` |
|
||||||
|
// | Commit: 03b3cb6be9f21c082f4206b35c7fe7f291c94eaa |
|
||||||
|
// +------------------------------------------------------------+
|
||||||
|
//! A utility module for working with threads that automatically joins threads upon drop
|
||||||
|
//! and abstracts over operating system quality of service (QoS) APIs
|
||||||
|
//! through the concept of a “thread priority”.
|
||||||
|
//!
|
||||||
|
//! The priority of a thread is frozen at thread creation time,
|
||||||
|
//! i.e. there is no API to change the priority of a thread once it has been spawned.
|
||||||
|
//!
|
||||||
|
//! As a system, rust-analyzer should have the property that
|
||||||
|
//! old manual scheduling APIs are replaced entirely by QoS.
|
||||||
|
//! To maintain this invariant, we panic when it is clear that
|
||||||
|
//! old scheduling APIs have been used.
|
||||||
|
//!
|
||||||
|
//! Moreover, we also want to ensure that every thread has an priority set explicitly
|
||||||
|
//! to force a decision about its importance to the system.
|
||||||
|
//! Thus, [`ThreadPriority`] has no default value
|
||||||
|
//! and every entry point to creating a thread requires a [`ThreadPriority`] upfront.
|
||||||
|
|
||||||
|
// Keeps us from getting warnings about the word `QoS`
|
||||||
|
#![allow(clippy::doc_markdown)]
|
||||||
|
|
||||||
|
use std::fmt;
|
||||||
|
|
||||||
|
mod pool;
|
||||||
|
mod priority;
|
||||||
|
|
||||||
|
pub(super) use pool::Pool;
|
||||||
|
pub(super) use priority::ThreadPriority;
|
||||||
|
|
||||||
|
pub(super) struct Builder {
|
||||||
|
priority: ThreadPriority,
|
||||||
|
inner: jod_thread::Builder,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Builder {
|
||||||
|
pub(super) fn new(priority: ThreadPriority) -> Builder {
|
||||||
|
Builder {
|
||||||
|
priority,
|
||||||
|
inner: jod_thread::Builder::new(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) fn name(self, name: String) -> Builder {
|
||||||
|
Builder {
|
||||||
|
inner: self.inner.name(name),
|
||||||
|
..self
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) fn stack_size(self, size: usize) -> Builder {
|
||||||
|
Builder {
|
||||||
|
inner: self.inner.stack_size(size),
|
||||||
|
..self
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) fn spawn<F, T>(self, f: F) -> std::io::Result<JoinHandle<T>>
|
||||||
|
where
|
||||||
|
F: FnOnce() -> T,
|
||||||
|
F: Send + 'static,
|
||||||
|
T: Send + 'static,
|
||||||
|
{
|
||||||
|
let inner_handle = self.inner.spawn(move || {
|
||||||
|
self.priority.apply_to_current_thread();
|
||||||
|
f()
|
||||||
|
})?;
|
||||||
|
|
||||||
|
Ok(JoinHandle {
|
||||||
|
inner: Some(inner_handle),
|
||||||
|
allow_leak: false,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) struct JoinHandle<T = ()> {
|
||||||
|
// `inner` is an `Option` so that we can
|
||||||
|
// take ownership of the contained `JoinHandle`.
|
||||||
|
inner: Option<jod_thread::JoinHandle<T>>,
|
||||||
|
allow_leak: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> JoinHandle<T> {
|
||||||
|
pub(crate) fn join(mut self) -> T {
|
||||||
|
self.inner.take().unwrap().join()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> Drop for JoinHandle<T> {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
if !self.allow_leak {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(join_handle) = self.inner.take() {
|
||||||
|
join_handle.detach();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> fmt::Debug for JoinHandle<T> {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
f.pad("JoinHandle { .. }")
|
||||||
|
}
|
||||||
|
}
|
113
crates/red_knot_server/src/server/schedule/thread/pool.rs
Normal file
113
crates/red_knot_server/src/server/schedule/thread/pool.rs
Normal file
|
@ -0,0 +1,113 @@
|
||||||
|
// +------------------------------------------------------------+
|
||||||
|
// | Code adopted from: |
|
||||||
|
// | Repository: https://github.com/rust-lang/rust-analyzer.git |
|
||||||
|
// | File: `crates/stdx/src/thread/pool.rs` |
|
||||||
|
// | Commit: 03b3cb6be9f21c082f4206b35c7fe7f291c94eaa |
|
||||||
|
// +------------------------------------------------------------+
|
||||||
|
//! [`Pool`] implements a basic custom thread pool
|
||||||
|
//! inspired by the [`threadpool` crate](http://docs.rs/threadpool).
|
||||||
|
//! When you spawn a task you specify a thread priority
|
||||||
|
//! so the pool can schedule it to run on a thread with that priority.
|
||||||
|
//! rust-analyzer uses this to prioritize work based on latency requirements.
|
||||||
|
//!
|
||||||
|
//! The thread pool is implemented entirely using
|
||||||
|
//! the threading utilities in [`crate::server::schedule::thread`].
|
||||||
|
|
||||||
|
use std::{
|
||||||
|
num::NonZeroUsize,
|
||||||
|
sync::{
|
||||||
|
atomic::{AtomicUsize, Ordering},
|
||||||
|
Arc,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
use crossbeam::channel::{Receiver, Sender};
|
||||||
|
|
||||||
|
use super::{Builder, JoinHandle, ThreadPriority};
|
||||||
|
|
||||||
|
pub(crate) struct Pool {
|
||||||
|
// `_handles` is never read: the field is present
|
||||||
|
// only for its `Drop` impl.
|
||||||
|
|
||||||
|
// The worker threads exit once the channel closes;
|
||||||
|
// make sure to keep `job_sender` above `handles`
|
||||||
|
// so that the channel is actually closed
|
||||||
|
// before we join the worker threads!
|
||||||
|
job_sender: Sender<Job>,
|
||||||
|
_handles: Vec<JoinHandle>,
|
||||||
|
extant_tasks: Arc<AtomicUsize>,
|
||||||
|
}
|
||||||
|
|
||||||
|
struct Job {
|
||||||
|
requested_priority: ThreadPriority,
|
||||||
|
f: Box<dyn FnOnce() + Send + 'static>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Pool {
|
||||||
|
pub(crate) fn new(threads: NonZeroUsize) -> Pool {
|
||||||
|
// Override OS defaults to avoid stack overflows on platforms with low stack size defaults.
|
||||||
|
const STACK_SIZE: usize = 2 * 1024 * 1024;
|
||||||
|
const INITIAL_PRIORITY: ThreadPriority = ThreadPriority::Worker;
|
||||||
|
|
||||||
|
let threads = usize::from(threads);
|
||||||
|
|
||||||
|
// Channel buffer capacity is between 2 and 4, depending on the pool size.
|
||||||
|
let (job_sender, job_receiver) = crossbeam::channel::bounded(std::cmp::min(threads * 2, 4));
|
||||||
|
let extant_tasks = Arc::new(AtomicUsize::new(0));
|
||||||
|
|
||||||
|
let mut handles = Vec::with_capacity(threads);
|
||||||
|
for i in 0..threads {
|
||||||
|
let handle = Builder::new(INITIAL_PRIORITY)
|
||||||
|
.stack_size(STACK_SIZE)
|
||||||
|
.name(format!("ruff:worker:{i}"))
|
||||||
|
.spawn({
|
||||||
|
let extant_tasks = Arc::clone(&extant_tasks);
|
||||||
|
let job_receiver: Receiver<Job> = job_receiver.clone();
|
||||||
|
move || {
|
||||||
|
let mut current_priority = INITIAL_PRIORITY;
|
||||||
|
for job in job_receiver {
|
||||||
|
if job.requested_priority != current_priority {
|
||||||
|
job.requested_priority.apply_to_current_thread();
|
||||||
|
current_priority = job.requested_priority;
|
||||||
|
}
|
||||||
|
extant_tasks.fetch_add(1, Ordering::SeqCst);
|
||||||
|
(job.f)();
|
||||||
|
extant_tasks.fetch_sub(1, Ordering::SeqCst);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.expect("failed to spawn thread");
|
||||||
|
|
||||||
|
handles.push(handle);
|
||||||
|
}
|
||||||
|
|
||||||
|
Pool {
|
||||||
|
_handles: handles,
|
||||||
|
extant_tasks,
|
||||||
|
job_sender,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn spawn<F>(&self, priority: ThreadPriority, f: F)
|
||||||
|
where
|
||||||
|
F: FnOnce() + Send + 'static,
|
||||||
|
{
|
||||||
|
let f = Box::new(move || {
|
||||||
|
if cfg!(debug_assertions) {
|
||||||
|
priority.assert_is_used_on_current_thread();
|
||||||
|
}
|
||||||
|
f();
|
||||||
|
});
|
||||||
|
|
||||||
|
let job = Job {
|
||||||
|
requested_priority: priority,
|
||||||
|
f,
|
||||||
|
};
|
||||||
|
self.job_sender.send(job).unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(dead_code)]
|
||||||
|
pub(super) fn len(&self) -> usize {
|
||||||
|
self.extant_tasks.load(Ordering::SeqCst)
|
||||||
|
}
|
||||||
|
}
|
297
crates/red_knot_server/src/server/schedule/thread/priority.rs
Normal file
297
crates/red_knot_server/src/server/schedule/thread/priority.rs
Normal file
|
@ -0,0 +1,297 @@
|
||||||
|
// +------------------------------------------------------------+
|
||||||
|
// | Code adopted from: |
|
||||||
|
// | Repository: https://github.com/rust-lang/rust-analyzer.git |
|
||||||
|
// | File: `crates/stdx/src/thread/intent.rs` |
|
||||||
|
// | Commit: 03b3cb6be9f21c082f4206b35c7fe7f291c94eaa |
|
||||||
|
// +------------------------------------------------------------+
|
||||||
|
//! An opaque façade around platform-specific QoS APIs.
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
|
||||||
|
// Please maintain order from least to most priority for the derived `Ord` impl.
|
||||||
|
pub(crate) enum ThreadPriority {
|
||||||
|
/// Any thread which does work that isn't in a critical path.
|
||||||
|
Worker,
|
||||||
|
|
||||||
|
/// Any thread which does work caused by the user typing, or
|
||||||
|
/// work that the editor may wait on.
|
||||||
|
LatencySensitive,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ThreadPriority {
|
||||||
|
// These APIs must remain private;
|
||||||
|
// we only want consumers to set thread priority
|
||||||
|
// during thread creation.
|
||||||
|
|
||||||
|
pub(crate) fn apply_to_current_thread(self) {
|
||||||
|
let class = thread_priority_to_qos_class(self);
|
||||||
|
set_current_thread_qos_class(class);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn assert_is_used_on_current_thread(self) {
|
||||||
|
if IS_QOS_AVAILABLE {
|
||||||
|
let class = thread_priority_to_qos_class(self);
|
||||||
|
assert_eq!(get_current_thread_qos_class(), Some(class));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
use imp::QoSClass;
|
||||||
|
|
||||||
|
const IS_QOS_AVAILABLE: bool = imp::IS_QOS_AVAILABLE;
|
||||||
|
|
||||||
|
fn set_current_thread_qos_class(class: QoSClass) {
|
||||||
|
imp::set_current_thread_qos_class(class);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_current_thread_qos_class() -> Option<QoSClass> {
|
||||||
|
imp::get_current_thread_qos_class()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn thread_priority_to_qos_class(priority: ThreadPriority) -> QoSClass {
|
||||||
|
imp::thread_priority_to_qos_class(priority)
|
||||||
|
}
|
||||||
|
|
||||||
|
// All Apple platforms use XNU as their kernel
|
||||||
|
// and thus have the concept of QoS.
|
||||||
|
#[cfg(target_vendor = "apple")]
|
||||||
|
mod imp {
|
||||||
|
use super::ThreadPriority;
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
|
||||||
|
// Please maintain order from least to most priority for the derived `Ord` impl.
|
||||||
|
pub(super) enum QoSClass {
|
||||||
|
// Documentation adapted from https://github.com/apple-oss-distributions/libpthread/blob/67e155c94093be9a204b69637d198eceff2c7c46/include/sys/qos.h#L55
|
||||||
|
//
|
||||||
|
/// TLDR: invisible maintenance tasks
|
||||||
|
///
|
||||||
|
/// Contract:
|
||||||
|
///
|
||||||
|
/// * **You do not care about how long it takes for work to finish.**
|
||||||
|
/// * **You do not care about work being deferred temporarily.**
|
||||||
|
/// (e.g. if the device's battery is in a critical state)
|
||||||
|
///
|
||||||
|
/// Examples:
|
||||||
|
///
|
||||||
|
/// * in a video editor:
|
||||||
|
/// creating periodic backups of project files
|
||||||
|
/// * in a browser:
|
||||||
|
/// cleaning up cached sites which have not been accessed in a long time
|
||||||
|
/// * in a collaborative word processor:
|
||||||
|
/// creating a searchable index of all documents
|
||||||
|
///
|
||||||
|
/// Use this QoS class for background tasks
|
||||||
|
/// which the user did not initiate themselves
|
||||||
|
/// and which are invisible to the user.
|
||||||
|
/// It is expected that this work will take significant time to complete:
|
||||||
|
/// minutes or even hours.
|
||||||
|
///
|
||||||
|
/// This QoS class provides the most energy and thermally-efficient execution possible.
|
||||||
|
/// All other work is prioritized over background tasks.
|
||||||
|
Background,
|
||||||
|
|
||||||
|
/// TLDR: tasks that don't block using your app
|
||||||
|
///
|
||||||
|
/// Contract:
|
||||||
|
///
|
||||||
|
/// * **Your app remains useful even as the task is executing.**
|
||||||
|
///
|
||||||
|
/// Examples:
|
||||||
|
///
|
||||||
|
/// * in a video editor:
|
||||||
|
/// exporting a video to disk -
|
||||||
|
/// the user can still work on the timeline
|
||||||
|
/// * in a browser:
|
||||||
|
/// automatically extracting a downloaded zip file -
|
||||||
|
/// the user can still switch tabs
|
||||||
|
/// * in a collaborative word processor:
|
||||||
|
/// downloading images embedded in a document -
|
||||||
|
/// the user can still make edits
|
||||||
|
///
|
||||||
|
/// Use this QoS class for tasks which
|
||||||
|
/// may or may not be initiated by the user,
|
||||||
|
/// but whose result is visible.
|
||||||
|
/// It is expected that this work will take a few seconds to a few minutes.
|
||||||
|
/// Typically your app will include a progress bar
|
||||||
|
/// for tasks using this class.
|
||||||
|
///
|
||||||
|
/// This QoS class provides a balance between
|
||||||
|
/// performance, responsiveness and efficiency.
|
||||||
|
Utility,
|
||||||
|
|
||||||
|
/// TLDR: tasks that block using your app
|
||||||
|
///
|
||||||
|
/// Contract:
|
||||||
|
///
|
||||||
|
/// * **You need this work to complete
|
||||||
|
/// before the user can keep interacting with your app.**
|
||||||
|
/// * **Your work will not take more than a few seconds to complete.**
|
||||||
|
///
|
||||||
|
/// Examples:
|
||||||
|
///
|
||||||
|
/// * in a video editor:
|
||||||
|
/// opening a saved project
|
||||||
|
/// * in a browser:
|
||||||
|
/// loading a list of the user's bookmarks and top sites
|
||||||
|
/// when a new tab is created
|
||||||
|
/// * in a collaborative word processor:
|
||||||
|
/// running a search on the document's content
|
||||||
|
///
|
||||||
|
/// Use this QoS class for tasks which were initiated by the user
|
||||||
|
/// and block the usage of your app while they are in progress.
|
||||||
|
/// It is expected that this work will take a few seconds or less to complete;
|
||||||
|
/// not long enough to cause the user to switch to something else.
|
||||||
|
/// Your app will likely indicate progress on these tasks
|
||||||
|
/// through the display of placeholder content or modals.
|
||||||
|
///
|
||||||
|
/// This QoS class is not energy-efficient.
|
||||||
|
/// Rather, it provides responsiveness
|
||||||
|
/// by prioritizing work above other tasks on the system
|
||||||
|
/// except for critical user-interactive work.
|
||||||
|
UserInitiated,
|
||||||
|
|
||||||
|
/// TLDR: render loops and nothing else
|
||||||
|
///
|
||||||
|
/// Contract:
|
||||||
|
///
|
||||||
|
/// * **You absolutely need this work to complete immediately
|
||||||
|
/// or your app will appear to freeze.**
|
||||||
|
/// * **Your work will always complete virtually instantaneously.**
|
||||||
|
///
|
||||||
|
/// Examples:
|
||||||
|
///
|
||||||
|
/// * the main thread in a GUI application
|
||||||
|
/// * the update & render loop in a game
|
||||||
|
/// * a secondary thread which progresses an animation
|
||||||
|
///
|
||||||
|
/// Use this QoS class for any work which, if delayed,
|
||||||
|
/// will make your user interface unresponsive.
|
||||||
|
/// It is expected that this work will be virtually instantaneous.
|
||||||
|
///
|
||||||
|
/// This QoS class is not energy-efficient.
|
||||||
|
/// Specifying this class is a request to run with
|
||||||
|
/// nearly all available system CPU and I/O bandwidth even under contention.
|
||||||
|
UserInteractive,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) const IS_QOS_AVAILABLE: bool = true;
|
||||||
|
|
||||||
|
pub(super) fn set_current_thread_qos_class(class: QoSClass) {
|
||||||
|
let c = match class {
|
||||||
|
QoSClass::UserInteractive => libc::qos_class_t::QOS_CLASS_USER_INTERACTIVE,
|
||||||
|
QoSClass::UserInitiated => libc::qos_class_t::QOS_CLASS_USER_INITIATED,
|
||||||
|
QoSClass::Utility => libc::qos_class_t::QOS_CLASS_UTILITY,
|
||||||
|
QoSClass::Background => libc::qos_class_t::QOS_CLASS_BACKGROUND,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[allow(unsafe_code)]
|
||||||
|
let code = unsafe { libc::pthread_set_qos_class_self_np(c, 0) };
|
||||||
|
|
||||||
|
if code == 0 {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(unsafe_code)]
|
||||||
|
let errno = unsafe { *libc::__error() };
|
||||||
|
|
||||||
|
match errno {
|
||||||
|
libc::EPERM => {
|
||||||
|
// This thread has been excluded from the QoS system
|
||||||
|
// due to a previous call to a function such as `pthread_setschedparam`
|
||||||
|
// which is incompatible with QoS.
|
||||||
|
//
|
||||||
|
// Panic instead of returning an error
|
||||||
|
// to maintain the invariant that we only use QoS APIs.
|
||||||
|
panic!("tried to set QoS of thread which has opted out of QoS (os error {errno})")
|
||||||
|
}
|
||||||
|
|
||||||
|
libc::EINVAL => {
|
||||||
|
// This is returned if we pass something other than a qos_class_t
|
||||||
|
// to `pthread_set_qos_class_self_np`.
|
||||||
|
//
|
||||||
|
// This is impossible, so again panic.
|
||||||
|
unreachable!(
|
||||||
|
"invalid qos_class_t value was passed to pthread_set_qos_class_self_np"
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
_ => {
|
||||||
|
// `pthread_set_qos_class_self_np`’s documentation
|
||||||
|
// does not mention any other errors.
|
||||||
|
unreachable!("`pthread_set_qos_class_self_np` returned unexpected error {errno}")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) fn get_current_thread_qos_class() -> Option<QoSClass> {
|
||||||
|
#[allow(unsafe_code)]
|
||||||
|
let current_thread = unsafe { libc::pthread_self() };
|
||||||
|
let mut qos_class_raw = libc::qos_class_t::QOS_CLASS_UNSPECIFIED;
|
||||||
|
#[allow(unsafe_code)]
|
||||||
|
let code = unsafe {
|
||||||
|
libc::pthread_get_qos_class_np(current_thread, &mut qos_class_raw, std::ptr::null_mut())
|
||||||
|
};
|
||||||
|
|
||||||
|
if code != 0 {
|
||||||
|
// `pthread_get_qos_class_np`’s documentation states that
|
||||||
|
// an error value is placed into errno if the return code is not zero.
|
||||||
|
// However, it never states what errors are possible.
|
||||||
|
// Inspecting the source[0] shows that, as of this writing, it always returns zero.
|
||||||
|
//
|
||||||
|
// Whatever errors the function could report in future are likely to be
|
||||||
|
// ones which we cannot handle anyway
|
||||||
|
//
|
||||||
|
// 0: https://github.com/apple-oss-distributions/libpthread/blob/67e155c94093be9a204b69637d198eceff2c7c46/src/qos.c#L171-L177
|
||||||
|
#[allow(unsafe_code)]
|
||||||
|
let errno = unsafe { *libc::__error() };
|
||||||
|
unreachable!("`pthread_get_qos_class_np` failed unexpectedly (os error {errno})");
|
||||||
|
}
|
||||||
|
|
||||||
|
match qos_class_raw {
|
||||||
|
libc::qos_class_t::QOS_CLASS_USER_INTERACTIVE => Some(QoSClass::UserInteractive),
|
||||||
|
libc::qos_class_t::QOS_CLASS_USER_INITIATED => Some(QoSClass::UserInitiated),
|
||||||
|
libc::qos_class_t::QOS_CLASS_DEFAULT => None, // QoS has never been set
|
||||||
|
libc::qos_class_t::QOS_CLASS_UTILITY => Some(QoSClass::Utility),
|
||||||
|
libc::qos_class_t::QOS_CLASS_BACKGROUND => Some(QoSClass::Background),
|
||||||
|
|
||||||
|
libc::qos_class_t::QOS_CLASS_UNSPECIFIED => {
|
||||||
|
// Using manual scheduling APIs causes threads to “opt out” of QoS.
|
||||||
|
// At this point they become incompatible with QoS,
|
||||||
|
// and as such have the “unspecified” QoS class.
|
||||||
|
//
|
||||||
|
// Panic instead of returning an error
|
||||||
|
// to maintain the invariant that we only use QoS APIs.
|
||||||
|
panic!("tried to get QoS of thread which has opted out of QoS")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) fn thread_priority_to_qos_class(priority: ThreadPriority) -> QoSClass {
|
||||||
|
match priority {
|
||||||
|
ThreadPriority::Worker => QoSClass::Utility,
|
||||||
|
ThreadPriority::LatencySensitive => QoSClass::UserInitiated,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// FIXME: Windows has QoS APIs, we should use them!
|
||||||
|
#[cfg(not(target_vendor = "apple"))]
|
||||||
|
mod imp {
|
||||||
|
use super::ThreadPriority;
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
|
||||||
|
pub(super) enum QoSClass {
|
||||||
|
Default,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) const IS_QOS_AVAILABLE: bool = false;
|
||||||
|
|
||||||
|
pub(super) fn set_current_thread_qos_class(_: QoSClass) {}
|
||||||
|
|
||||||
|
pub(super) fn get_current_thread_qos_class() -> Option<QoSClass> {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) fn thread_priority_to_qos_class(_: ThreadPriority) -> QoSClass {
|
||||||
|
QoSClass::Default
|
||||||
|
}
|
||||||
|
}
|
257
crates/red_knot_server/src/session.rs
Normal file
257
crates/red_knot_server/src/session.rs
Normal file
|
@ -0,0 +1,257 @@
|
||||||
|
//! Data model, state management, and configuration resolution.
|
||||||
|
|
||||||
|
use std::collections::BTreeMap;
|
||||||
|
use std::ops::{Deref, DerefMut};
|
||||||
|
use std::path::{Path, PathBuf};
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use anyhow::anyhow;
|
||||||
|
use lsp_types::{ClientCapabilities, Url};
|
||||||
|
|
||||||
|
use red_knot_workspace::db::RootDatabase;
|
||||||
|
use red_knot_workspace::workspace::WorkspaceMetadata;
|
||||||
|
use ruff_db::files::{system_path_to_file, File};
|
||||||
|
use ruff_db::program::{ProgramSettings, SearchPathSettings, TargetVersion};
|
||||||
|
use ruff_db::system::SystemPath;
|
||||||
|
use ruff_db::Db as _;
|
||||||
|
|
||||||
|
use crate::edit::{DocumentKey, NotebookDocument};
|
||||||
|
use crate::system::{url_to_system_path, LSPSystem};
|
||||||
|
use crate::{PositionEncoding, TextDocument};
|
||||||
|
|
||||||
|
pub(crate) use self::capabilities::ResolvedClientCapabilities;
|
||||||
|
pub use self::index::DocumentQuery;
|
||||||
|
pub(crate) use self::settings::AllSettings;
|
||||||
|
pub use self::settings::ClientSettings;
|
||||||
|
|
||||||
|
mod capabilities;
|
||||||
|
pub(crate) mod index;
|
||||||
|
mod settings;
|
||||||
|
|
||||||
|
// TODO(dhruvmanila): In general, the server shouldn't use any salsa queries directly and instead
|
||||||
|
// should use methods on `RootDatabase`.
|
||||||
|
|
||||||
|
/// The global state for the LSP
|
||||||
|
pub struct Session {
|
||||||
|
/// Used to retrieve information about open documents and settings.
|
||||||
|
///
|
||||||
|
/// This will be [`None`] when a mutable reference is held to the index via [`index_mut`]
|
||||||
|
/// to prevent the index from being accessed while it is being modified. It will be restored
|
||||||
|
/// when the mutable reference ([`MutIndexGuard`]) is dropped.
|
||||||
|
///
|
||||||
|
/// [`index_mut`]: Session::index_mut
|
||||||
|
index: Option<Arc<index::Index>>,
|
||||||
|
|
||||||
|
/// Maps workspace root paths to their respective databases.
|
||||||
|
workspaces: BTreeMap<PathBuf, salsa::Handle<RootDatabase>>,
|
||||||
|
/// The global position encoding, negotiated during LSP initialization.
|
||||||
|
position_encoding: PositionEncoding,
|
||||||
|
/// Tracks what LSP features the client supports and doesn't support.
|
||||||
|
resolved_client_capabilities: Arc<ResolvedClientCapabilities>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Session {
|
||||||
|
pub fn new(
|
||||||
|
client_capabilities: &ClientCapabilities,
|
||||||
|
position_encoding: PositionEncoding,
|
||||||
|
global_settings: ClientSettings,
|
||||||
|
workspace_folders: &[(Url, ClientSettings)],
|
||||||
|
) -> crate::Result<Self> {
|
||||||
|
let mut workspaces = BTreeMap::new();
|
||||||
|
let index = Arc::new(index::Index::new(global_settings));
|
||||||
|
|
||||||
|
for (url, _) in workspace_folders {
|
||||||
|
let path = url
|
||||||
|
.to_file_path()
|
||||||
|
.map_err(|()| anyhow!("Workspace URL is not a file or directory: {:?}", url))?;
|
||||||
|
let system_path = SystemPath::from_std_path(&path)
|
||||||
|
.ok_or_else(|| anyhow!("Workspace path is not a valid UTF-8 path: {:?}", path))?;
|
||||||
|
let system = LSPSystem::new(index.clone());
|
||||||
|
|
||||||
|
let metadata = WorkspaceMetadata::from_path(system_path, &system)?;
|
||||||
|
// TODO(dhruvmanila): Get the values from the client settings
|
||||||
|
let program_settings = ProgramSettings {
|
||||||
|
target_version: TargetVersion::default(),
|
||||||
|
search_paths: SearchPathSettings {
|
||||||
|
extra_paths: vec![],
|
||||||
|
src_root: system_path.to_path_buf(),
|
||||||
|
site_packages: vec![],
|
||||||
|
custom_typeshed: None,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
workspaces.insert(
|
||||||
|
path,
|
||||||
|
salsa::Handle::new(RootDatabase::new(metadata, program_settings, system)),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(Self {
|
||||||
|
position_encoding,
|
||||||
|
workspaces,
|
||||||
|
index: Some(index),
|
||||||
|
resolved_client_capabilities: Arc::new(ResolvedClientCapabilities::new(
|
||||||
|
client_capabilities,
|
||||||
|
)),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn workspace_db_for_path(
|
||||||
|
&self,
|
||||||
|
path: impl AsRef<Path>,
|
||||||
|
) -> Option<&salsa::Handle<RootDatabase>> {
|
||||||
|
self.workspaces
|
||||||
|
.range(..=path.as_ref().to_path_buf())
|
||||||
|
.next_back()
|
||||||
|
.map(|(_, db)| db)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn workspace_db_for_path_mut(
|
||||||
|
&mut self,
|
||||||
|
path: impl AsRef<Path>,
|
||||||
|
) -> Option<&mut salsa::Handle<RootDatabase>> {
|
||||||
|
self.workspaces
|
||||||
|
.range_mut(..=path.as_ref().to_path_buf())
|
||||||
|
.next_back()
|
||||||
|
.map(|(_, db)| db)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn key_from_url(&self, url: Url) -> DocumentKey {
|
||||||
|
self.index().key_from_url(url)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Creates a document snapshot with the URL referencing the document to snapshot.
|
||||||
|
pub fn take_snapshot(&self, url: Url) -> Option<DocumentSnapshot> {
|
||||||
|
let key = self.key_from_url(url);
|
||||||
|
Some(DocumentSnapshot {
|
||||||
|
resolved_client_capabilities: self.resolved_client_capabilities.clone(),
|
||||||
|
document_ref: self.index().make_document_ref(key)?,
|
||||||
|
position_encoding: self.position_encoding,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Registers a notebook document at the provided `url`.
|
||||||
|
/// If a document is already open here, it will be overwritten.
|
||||||
|
pub fn open_notebook_document(&mut self, url: Url, document: NotebookDocument) {
|
||||||
|
self.index_mut().open_notebook_document(url, document);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Registers a text document at the provided `url`.
|
||||||
|
/// If a document is already open here, it will be overwritten.
|
||||||
|
pub(crate) fn open_text_document(&mut self, url: Url, document: TextDocument) {
|
||||||
|
self.index_mut().open_text_document(url, document);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// De-registers a document, specified by its key.
|
||||||
|
/// Calling this multiple times for the same document is a logic error.
|
||||||
|
pub(crate) fn close_document(&mut self, key: &DocumentKey) -> crate::Result<()> {
|
||||||
|
self.index_mut().close_document(key)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a reference to the index.
|
||||||
|
///
|
||||||
|
/// # Panics
|
||||||
|
///
|
||||||
|
/// Panics if there's a mutable reference to the index via [`index_mut`].
|
||||||
|
///
|
||||||
|
/// [`index_mut`]: Session::index_mut
|
||||||
|
fn index(&self) -> &index::Index {
|
||||||
|
self.index.as_ref().unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a mutable reference to the index.
|
||||||
|
///
|
||||||
|
/// This method drops all references to the index and returns a guard that will restore the
|
||||||
|
/// references when dropped. This guard holds the only reference to the index and allows
|
||||||
|
/// modifying it.
|
||||||
|
fn index_mut(&mut self) -> MutIndexGuard {
|
||||||
|
let index = self.index.take().unwrap();
|
||||||
|
|
||||||
|
for db in self.workspaces.values_mut() {
|
||||||
|
// Calling `get_mut` on `Handle<Database>` cancels all pending queries and waits for them to stop.
|
||||||
|
let db = db.get_mut();
|
||||||
|
|
||||||
|
// Remove the `index` from each database. This drops the count of `Arc<Index>` down to 1
|
||||||
|
db.system_mut()
|
||||||
|
.as_any_mut()
|
||||||
|
.downcast_mut::<LSPSystem>()
|
||||||
|
.unwrap()
|
||||||
|
.take_index();
|
||||||
|
}
|
||||||
|
|
||||||
|
// There should now be exactly one reference to index which is self.index.
|
||||||
|
let index = Arc::into_inner(index);
|
||||||
|
|
||||||
|
MutIndexGuard {
|
||||||
|
session: self,
|
||||||
|
index,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A guard that holds the only reference to the index and allows modifying it.
|
||||||
|
///
|
||||||
|
/// When dropped, this guard restores all references to the index.
|
||||||
|
struct MutIndexGuard<'a> {
|
||||||
|
session: &'a mut Session,
|
||||||
|
index: Option<index::Index>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Deref for MutIndexGuard<'_> {
|
||||||
|
type Target = index::Index;
|
||||||
|
|
||||||
|
fn deref(&self) -> &Self::Target {
|
||||||
|
self.index.as_ref().unwrap()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DerefMut for MutIndexGuard<'_> {
|
||||||
|
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||||
|
self.index.as_mut().unwrap()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Drop for MutIndexGuard<'_> {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
if let Some(index) = self.index.take() {
|
||||||
|
let index = Arc::new(index);
|
||||||
|
for db in self.session.workspaces.values_mut() {
|
||||||
|
let db = db.get_mut();
|
||||||
|
db.system_mut()
|
||||||
|
.as_any_mut()
|
||||||
|
.downcast_mut::<LSPSystem>()
|
||||||
|
.unwrap()
|
||||||
|
.set_index(index.clone());
|
||||||
|
}
|
||||||
|
|
||||||
|
self.session.index = Some(index);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// An immutable snapshot of `Session` that references
|
||||||
|
/// a specific document.
|
||||||
|
pub struct DocumentSnapshot {
|
||||||
|
resolved_client_capabilities: Arc<ResolvedClientCapabilities>,
|
||||||
|
document_ref: index::DocumentQuery,
|
||||||
|
position_encoding: PositionEncoding,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DocumentSnapshot {
|
||||||
|
pub(crate) fn resolved_client_capabilities(&self) -> &ResolvedClientCapabilities {
|
||||||
|
&self.resolved_client_capabilities
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn query(&self) -> &index::DocumentQuery {
|
||||||
|
&self.document_ref
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn encoding(&self) -> PositionEncoding {
|
||||||
|
self.position_encoding
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn file(&self, db: &RootDatabase) -> Option<File> {
|
||||||
|
let path = url_to_system_path(self.document_ref.file_url()).ok()?;
|
||||||
|
system_path_to_file(db, path).ok()
|
||||||
|
}
|
||||||
|
}
|
85
crates/red_knot_server/src/session/capabilities.rs
Normal file
85
crates/red_knot_server/src/session/capabilities.rs
Normal file
|
@ -0,0 +1,85 @@
|
||||||
|
use lsp_types::ClientCapabilities;
|
||||||
|
use ruff_linter::display_settings;
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, PartialEq, Eq, Default)]
|
||||||
|
#[allow(clippy::struct_excessive_bools)]
|
||||||
|
pub(crate) struct ResolvedClientCapabilities {
|
||||||
|
pub(crate) code_action_deferred_edit_resolution: bool,
|
||||||
|
pub(crate) apply_edit: bool,
|
||||||
|
pub(crate) document_changes: bool,
|
||||||
|
pub(crate) workspace_refresh: bool,
|
||||||
|
pub(crate) pull_diagnostics: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ResolvedClientCapabilities {
|
||||||
|
pub(super) fn new(client_capabilities: &ClientCapabilities) -> Self {
|
||||||
|
let code_action_settings = client_capabilities
|
||||||
|
.text_document
|
||||||
|
.as_ref()
|
||||||
|
.and_then(|doc_settings| doc_settings.code_action.as_ref());
|
||||||
|
let code_action_data_support = code_action_settings
|
||||||
|
.and_then(|code_action_settings| code_action_settings.data_support)
|
||||||
|
.unwrap_or_default();
|
||||||
|
let code_action_edit_resolution = code_action_settings
|
||||||
|
.and_then(|code_action_settings| code_action_settings.resolve_support.as_ref())
|
||||||
|
.is_some_and(|resolve_support| resolve_support.properties.contains(&"edit".into()));
|
||||||
|
|
||||||
|
let apply_edit = client_capabilities
|
||||||
|
.workspace
|
||||||
|
.as_ref()
|
||||||
|
.and_then(|workspace| workspace.apply_edit)
|
||||||
|
.unwrap_or_default();
|
||||||
|
|
||||||
|
let document_changes = client_capabilities
|
||||||
|
.workspace
|
||||||
|
.as_ref()
|
||||||
|
.and_then(|workspace| workspace.workspace_edit.as_ref())
|
||||||
|
.and_then(|workspace_edit| workspace_edit.document_changes)
|
||||||
|
.unwrap_or_default();
|
||||||
|
|
||||||
|
let workspace_refresh = true;
|
||||||
|
|
||||||
|
// TODO(jane): Once the bug involving workspace.diagnostic(s) deserialization has been fixed,
|
||||||
|
// uncomment this.
|
||||||
|
/*
|
||||||
|
let workspace_refresh = client_capabilities
|
||||||
|
.workspace
|
||||||
|
.as_ref()
|
||||||
|
.and_then(|workspace| workspace.diagnostic.as_ref())
|
||||||
|
.and_then(|diagnostic| diagnostic.refresh_support)
|
||||||
|
.unwrap_or_default();
|
||||||
|
*/
|
||||||
|
|
||||||
|
let pull_diagnostics = client_capabilities
|
||||||
|
.text_document
|
||||||
|
.as_ref()
|
||||||
|
.and_then(|text_document| text_document.diagnostic.as_ref())
|
||||||
|
.is_some();
|
||||||
|
|
||||||
|
Self {
|
||||||
|
code_action_deferred_edit_resolution: code_action_data_support
|
||||||
|
&& code_action_edit_resolution,
|
||||||
|
apply_edit,
|
||||||
|
document_changes,
|
||||||
|
workspace_refresh,
|
||||||
|
pull_diagnostics,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::fmt::Display for ResolvedClientCapabilities {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
display_settings! {
|
||||||
|
formatter = f,
|
||||||
|
namespace = "capabilities",
|
||||||
|
fields = [
|
||||||
|
self.code_action_deferred_edit_resolution,
|
||||||
|
self.apply_edit,
|
||||||
|
self.document_changes,
|
||||||
|
self.workspace_refresh,
|
||||||
|
self.pull_diagnostics,
|
||||||
|
]
|
||||||
|
};
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
357
crates/red_knot_server/src/session/index.rs
Normal file
357
crates/red_knot_server/src/session/index.rs
Normal file
|
@ -0,0 +1,357 @@
|
||||||
|
use std::borrow::Cow;
|
||||||
|
use std::path::{Path, PathBuf};
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use lsp_types::Url;
|
||||||
|
use rustc_hash::FxHashMap;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
edit::{DocumentKey, DocumentVersion, NotebookDocument},
|
||||||
|
PositionEncoding, TextDocument,
|
||||||
|
};
|
||||||
|
|
||||||
|
use super::ClientSettings;
|
||||||
|
|
||||||
|
/// Stores and tracks all open documents in a session, along with their associated settings.
|
||||||
|
#[derive(Default, Debug)]
|
||||||
|
pub(crate) struct Index {
|
||||||
|
/// Maps all document file URLs to the associated document controller
|
||||||
|
documents: FxHashMap<Url, DocumentController>,
|
||||||
|
|
||||||
|
/// Maps opaque cell URLs to a notebook URL (document)
|
||||||
|
notebook_cells: FxHashMap<Url, Url>,
|
||||||
|
|
||||||
|
/// Global settings provided by the client.
|
||||||
|
global_settings: ClientSettings,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Index {
|
||||||
|
pub(super) fn new(global_settings: ClientSettings) -> Self {
|
||||||
|
Self {
|
||||||
|
documents: FxHashMap::default(),
|
||||||
|
notebook_cells: FxHashMap::default(),
|
||||||
|
global_settings,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) fn text_document_urls(&self) -> impl Iterator<Item = &Url> + '_ {
|
||||||
|
self.documents
|
||||||
|
.iter()
|
||||||
|
.filter_map(|(url, doc)| doc.as_text().and(Some(url)))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) fn notebook_document_urls(&self) -> impl Iterator<Item = &Url> + '_ {
|
||||||
|
self.documents
|
||||||
|
.iter()
|
||||||
|
.filter(|(_, doc)| doc.as_notebook().is_some())
|
||||||
|
.map(|(url, _)| url)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) fn update_text_document(
|
||||||
|
&mut self,
|
||||||
|
key: &DocumentKey,
|
||||||
|
content_changes: Vec<lsp_types::TextDocumentContentChangeEvent>,
|
||||||
|
new_version: DocumentVersion,
|
||||||
|
encoding: PositionEncoding,
|
||||||
|
) -> crate::Result<()> {
|
||||||
|
let controller = self.document_controller_for_key(key)?;
|
||||||
|
let Some(document) = controller.as_text_mut() else {
|
||||||
|
anyhow::bail!("Text document URI does not point to a text document");
|
||||||
|
};
|
||||||
|
|
||||||
|
if content_changes.is_empty() {
|
||||||
|
document.update_version(new_version);
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
document.apply_changes(content_changes, new_version, encoding);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn key_from_url(&self, url: Url) -> DocumentKey {
|
||||||
|
if self.notebook_cells.contains_key(&url) {
|
||||||
|
DocumentKey::NotebookCell(url)
|
||||||
|
} else if Path::new(url.path())
|
||||||
|
.extension()
|
||||||
|
.map_or(false, |ext| ext.eq_ignore_ascii_case("ipynb"))
|
||||||
|
{
|
||||||
|
DocumentKey::Notebook(url)
|
||||||
|
} else {
|
||||||
|
DocumentKey::Text(url)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) fn update_notebook_document(
|
||||||
|
&mut self,
|
||||||
|
key: &DocumentKey,
|
||||||
|
cells: Option<lsp_types::NotebookDocumentCellChange>,
|
||||||
|
metadata: Option<serde_json::Map<String, serde_json::Value>>,
|
||||||
|
new_version: DocumentVersion,
|
||||||
|
encoding: PositionEncoding,
|
||||||
|
) -> crate::Result<()> {
|
||||||
|
// update notebook cell index
|
||||||
|
if let Some(lsp_types::NotebookDocumentCellChangeStructure {
|
||||||
|
did_open: Some(did_open),
|
||||||
|
..
|
||||||
|
}) = cells.as_ref().and_then(|cells| cells.structure.as_ref())
|
||||||
|
{
|
||||||
|
let Some(path) = self.url_for_key(key).cloned() else {
|
||||||
|
anyhow::bail!("Tried to open unavailable document `{key}`");
|
||||||
|
};
|
||||||
|
|
||||||
|
for opened_cell in did_open {
|
||||||
|
self.notebook_cells
|
||||||
|
.insert(opened_cell.uri.clone(), path.clone());
|
||||||
|
}
|
||||||
|
// deleted notebook cells are closed via textDocument/didClose - we don't close them here.
|
||||||
|
}
|
||||||
|
|
||||||
|
let controller = self.document_controller_for_key(key)?;
|
||||||
|
let Some(notebook) = controller.as_notebook_mut() else {
|
||||||
|
anyhow::bail!("Notebook document URI does not point to a notebook document");
|
||||||
|
};
|
||||||
|
|
||||||
|
notebook.update(cells, metadata, new_version, encoding)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) fn num_documents(&self) -> usize {
|
||||||
|
self.documents.len()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn make_document_ref(&self, key: DocumentKey) -> Option<DocumentQuery> {
|
||||||
|
let url = self.url_for_key(&key)?.clone();
|
||||||
|
let controller = self.documents.get(&url)?;
|
||||||
|
let cell_url = match key {
|
||||||
|
DocumentKey::NotebookCell(cell_url) => Some(cell_url),
|
||||||
|
_ => None,
|
||||||
|
};
|
||||||
|
Some(controller.make_ref(cell_url, url))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) fn open_text_document(&mut self, url: Url, document: TextDocument) {
|
||||||
|
self.documents
|
||||||
|
.insert(url, DocumentController::new_text(document));
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) fn open_notebook_document(&mut self, notebook_url: Url, document: NotebookDocument) {
|
||||||
|
for cell_url in document.urls() {
|
||||||
|
self.notebook_cells
|
||||||
|
.insert(cell_url.clone(), notebook_url.clone());
|
||||||
|
}
|
||||||
|
self.documents
|
||||||
|
.insert(notebook_url, DocumentController::new_notebook(document));
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) fn close_document(&mut self, key: &DocumentKey) -> crate::Result<()> {
|
||||||
|
// Notebook cells URIs are removed from the index here, instead of during
|
||||||
|
// `update_notebook_document`. This is because a notebook cell, as a text document,
|
||||||
|
// is requested to be `closed` by VS Code after the notebook gets updated.
|
||||||
|
// This is not documented in the LSP specification explicitly, and this assumption
|
||||||
|
// may need revisiting in the future as we support more editors with notebook support.
|
||||||
|
if let DocumentKey::NotebookCell(uri) = key {
|
||||||
|
if self.notebook_cells.remove(uri).is_none() {
|
||||||
|
tracing::warn!("Tried to remove a notebook cell that does not exist: {uri}",);
|
||||||
|
}
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
let Some(url) = self.url_for_key(key).cloned() else {
|
||||||
|
anyhow::bail!("Tried to close unavailable document `{key}`");
|
||||||
|
};
|
||||||
|
|
||||||
|
let Some(_) = self.documents.remove(&url) else {
|
||||||
|
anyhow::bail!("tried to close document that didn't exist at {}", url)
|
||||||
|
};
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn document_controller_for_key(
|
||||||
|
&mut self,
|
||||||
|
key: &DocumentKey,
|
||||||
|
) -> crate::Result<&mut DocumentController> {
|
||||||
|
let Some(url) = self.url_for_key(key).cloned() else {
|
||||||
|
anyhow::bail!("Tried to open unavailable document `{key}`");
|
||||||
|
};
|
||||||
|
let Some(controller) = self.documents.get_mut(&url) else {
|
||||||
|
anyhow::bail!("Document controller not available at `{}`", url);
|
||||||
|
};
|
||||||
|
Ok(controller)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn url_for_key<'a>(&'a self, key: &'a DocumentKey) -> Option<&'a Url> {
|
||||||
|
match key {
|
||||||
|
DocumentKey::Notebook(path) | DocumentKey::Text(path) => Some(path),
|
||||||
|
DocumentKey::NotebookCell(uri) => self.notebook_cells.get(uri),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A mutable handler to an underlying document.
|
||||||
|
#[derive(Debug)]
|
||||||
|
enum DocumentController {
|
||||||
|
Text(Arc<TextDocument>),
|
||||||
|
Notebook(Arc<NotebookDocument>),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DocumentController {
|
||||||
|
fn new_text(document: TextDocument) -> Self {
|
||||||
|
Self::Text(Arc::new(document))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn new_notebook(document: NotebookDocument) -> Self {
|
||||||
|
Self::Notebook(Arc::new(document))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn make_ref(&self, cell_url: Option<Url>, file_url: Url) -> DocumentQuery {
|
||||||
|
match &self {
|
||||||
|
Self::Notebook(notebook) => DocumentQuery::Notebook {
|
||||||
|
cell_url,
|
||||||
|
file_url,
|
||||||
|
notebook: notebook.clone(),
|
||||||
|
},
|
||||||
|
Self::Text(document) => DocumentQuery::Text {
|
||||||
|
file_url,
|
||||||
|
document: document.clone(),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn as_notebook_mut(&mut self) -> Option<&mut NotebookDocument> {
|
||||||
|
Some(match self {
|
||||||
|
Self::Notebook(notebook) => Arc::make_mut(notebook),
|
||||||
|
Self::Text(_) => return None,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn as_notebook(&self) -> Option<&NotebookDocument> {
|
||||||
|
match self {
|
||||||
|
Self::Notebook(notebook) => Some(notebook),
|
||||||
|
Self::Text(_) => None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(dead_code)]
|
||||||
|
pub(crate) fn as_text(&self) -> Option<&TextDocument> {
|
||||||
|
match self {
|
||||||
|
Self::Text(document) => Some(document),
|
||||||
|
Self::Notebook(_) => None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn as_text_mut(&mut self) -> Option<&mut TextDocument> {
|
||||||
|
Some(match self {
|
||||||
|
Self::Text(document) => Arc::make_mut(document),
|
||||||
|
Self::Notebook(_) => return None,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A read-only query to an open document.
|
||||||
|
/// This query can 'select' a text document, full notebook, or a specific notebook cell.
|
||||||
|
/// It also includes document settings.
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub enum DocumentQuery {
|
||||||
|
Text {
|
||||||
|
file_url: Url,
|
||||||
|
document: Arc<TextDocument>,
|
||||||
|
},
|
||||||
|
Notebook {
|
||||||
|
/// The selected notebook cell, if it exists.
|
||||||
|
cell_url: Option<Url>,
|
||||||
|
/// The URL of the notebook.
|
||||||
|
file_url: Url,
|
||||||
|
notebook: Arc<NotebookDocument>,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DocumentQuery {
|
||||||
|
/// Retrieve the original key that describes this document query.
|
||||||
|
pub(crate) fn make_key(&self) -> DocumentKey {
|
||||||
|
match self {
|
||||||
|
Self::Text { file_url, .. } => DocumentKey::Text(file_url.clone()),
|
||||||
|
Self::Notebook {
|
||||||
|
cell_url: Some(cell_uri),
|
||||||
|
..
|
||||||
|
} => DocumentKey::NotebookCell(cell_uri.clone()),
|
||||||
|
Self::Notebook { file_url, .. } => DocumentKey::Notebook(file_url.clone()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Generate a source kind used by the linter.
|
||||||
|
pub(crate) fn make_source_kind(&self) -> ruff_linter::source_kind::SourceKind {
|
||||||
|
match self {
|
||||||
|
Self::Text { document, .. } => {
|
||||||
|
ruff_linter::source_kind::SourceKind::Python(document.contents().to_string())
|
||||||
|
}
|
||||||
|
Self::Notebook { notebook, .. } => {
|
||||||
|
ruff_linter::source_kind::SourceKind::IpyNotebook(notebook.make_ruff_notebook())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Attempts to access the underlying notebook document that this query is selecting.
|
||||||
|
pub fn as_notebook(&self) -> Option<&NotebookDocument> {
|
||||||
|
match self {
|
||||||
|
Self::Notebook { notebook, .. } => Some(notebook),
|
||||||
|
Self::Text { .. } => None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the source type of the document associated with this query.
|
||||||
|
pub(crate) fn source_type(&self) -> ruff_python_ast::PySourceType {
|
||||||
|
match self {
|
||||||
|
Self::Text { .. } => ruff_python_ast::PySourceType::from(self.virtual_file_path()),
|
||||||
|
Self::Notebook { .. } => ruff_python_ast::PySourceType::Ipynb,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the version of document selected by this query.
|
||||||
|
pub(crate) fn version(&self) -> DocumentVersion {
|
||||||
|
match self {
|
||||||
|
Self::Text { document, .. } => document.version(),
|
||||||
|
Self::Notebook { notebook, .. } => notebook.version(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the URL for the document selected by this query.
|
||||||
|
pub(crate) fn file_url(&self) -> &Url {
|
||||||
|
match self {
|
||||||
|
Self::Text { file_url, .. } | Self::Notebook { file_url, .. } => file_url,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the path for the document selected by this query.
|
||||||
|
///
|
||||||
|
/// Returns `None` if this is an unsaved (untitled) document.
|
||||||
|
///
|
||||||
|
/// The path isn't guaranteed to point to a real path on the filesystem. This is the case
|
||||||
|
/// for unsaved (untitled) documents.
|
||||||
|
pub(crate) fn file_path(&self) -> Option<PathBuf> {
|
||||||
|
self.file_url().to_file_path().ok()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the path for the document selected by this query, ignoring whether the file exists on disk.
|
||||||
|
///
|
||||||
|
/// Returns the URL's path if this is an unsaved (untitled) document.
|
||||||
|
pub(crate) fn virtual_file_path(&self) -> Cow<Path> {
|
||||||
|
self.file_path()
|
||||||
|
.map(Cow::Owned)
|
||||||
|
.unwrap_or_else(|| Cow::Borrowed(Path::new(self.file_url().path())))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Attempt to access the single inner text document selected by the query.
|
||||||
|
/// If this query is selecting an entire notebook document, this will return `None`.
|
||||||
|
pub(crate) fn as_single_document(&self) -> Option<&TextDocument> {
|
||||||
|
match self {
|
||||||
|
Self::Text { document, .. } => Some(document),
|
||||||
|
Self::Notebook {
|
||||||
|
notebook,
|
||||||
|
cell_url: cell_uri,
|
||||||
|
..
|
||||||
|
} => cell_uri
|
||||||
|
.as_ref()
|
||||||
|
.and_then(|cell_uri| notebook.cell_document_by_uri(cell_uri)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
111
crates/red_knot_server/src/session/settings.rs
Normal file
111
crates/red_knot_server/src/session/settings.rs
Normal file
|
@ -0,0 +1,111 @@
|
||||||
|
use std::path::PathBuf;
|
||||||
|
|
||||||
|
use lsp_types::Url;
|
||||||
|
use rustc_hash::FxHashMap;
|
||||||
|
use serde::Deserialize;
|
||||||
|
|
||||||
|
/// Maps a workspace URI to its associated client settings. Used during server initialization.
|
||||||
|
pub(crate) type WorkspaceSettingsMap = FxHashMap<Url, ClientSettings>;
|
||||||
|
|
||||||
|
/// This is a direct representation of the settings schema sent by the client.
|
||||||
|
#[derive(Debug, Deserialize, Default)]
|
||||||
|
#[cfg_attr(test, derive(PartialEq, Eq))]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub struct ClientSettings {
|
||||||
|
// These settings are only needed for tracing, and are only read from the global configuration.
|
||||||
|
// These will not be in the resolved settings.
|
||||||
|
#[serde(flatten)]
|
||||||
|
pub(crate) tracing: TracingSettings,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Settings needed to initialize tracing. These will only be
|
||||||
|
/// read from the global configuration.
|
||||||
|
#[derive(Debug, Deserialize, Default)]
|
||||||
|
#[cfg_attr(test, derive(PartialEq, Eq))]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub(crate) struct TracingSettings {
|
||||||
|
pub(crate) log_level: Option<crate::trace::LogLevel>,
|
||||||
|
/// Path to the log file - tildes and environment variables are supported.
|
||||||
|
pub(crate) log_file: Option<PathBuf>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// This is a direct representation of the workspace settings schema,
|
||||||
|
/// which inherits the schema of [`ClientSettings`] and adds extra fields
|
||||||
|
/// to describe the workspace it applies to.
|
||||||
|
#[derive(Debug, Deserialize)]
|
||||||
|
#[cfg_attr(test, derive(PartialEq, Eq))]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
struct WorkspaceSettings {
|
||||||
|
#[serde(flatten)]
|
||||||
|
settings: ClientSettings,
|
||||||
|
workspace: Url,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// This is the exact schema for initialization options sent in by the client
|
||||||
|
/// during initialization.
|
||||||
|
#[derive(Debug, Deserialize)]
|
||||||
|
#[cfg_attr(test, derive(PartialEq, Eq))]
|
||||||
|
#[serde(untagged)]
|
||||||
|
enum InitializationOptions {
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
HasWorkspaces {
|
||||||
|
global_settings: ClientSettings,
|
||||||
|
#[serde(rename = "settings")]
|
||||||
|
workspace_settings: Vec<WorkspaceSettings>,
|
||||||
|
},
|
||||||
|
GlobalOnly {
|
||||||
|
#[serde(default)]
|
||||||
|
settings: ClientSettings,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Built from the initialization options provided by the client.
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub(crate) struct AllSettings {
|
||||||
|
pub(crate) global_settings: ClientSettings,
|
||||||
|
/// If this is `None`, the client only passed in global settings.
|
||||||
|
pub(crate) workspace_settings: Option<WorkspaceSettingsMap>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AllSettings {
|
||||||
|
/// Initializes the controller from the serialized initialization options.
|
||||||
|
/// This fails if `options` are not valid initialization options.
|
||||||
|
pub(crate) fn from_value(options: serde_json::Value) -> Self {
|
||||||
|
Self::from_init_options(
|
||||||
|
serde_json::from_value(options)
|
||||||
|
.map_err(|err| {
|
||||||
|
tracing::error!("Failed to deserialize initialization options: {err}. Falling back to default client settings...");
|
||||||
|
show_err_msg!("Ruff received invalid client settings - falling back to default client settings.");
|
||||||
|
})
|
||||||
|
.unwrap_or_default(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn from_init_options(options: InitializationOptions) -> Self {
|
||||||
|
let (global_settings, workspace_settings) = match options {
|
||||||
|
InitializationOptions::GlobalOnly { settings } => (settings, None),
|
||||||
|
InitializationOptions::HasWorkspaces {
|
||||||
|
global_settings,
|
||||||
|
workspace_settings,
|
||||||
|
} => (global_settings, Some(workspace_settings)),
|
||||||
|
};
|
||||||
|
|
||||||
|
Self {
|
||||||
|
global_settings,
|
||||||
|
workspace_settings: workspace_settings.map(|workspace_settings| {
|
||||||
|
workspace_settings
|
||||||
|
.into_iter()
|
||||||
|
.map(|settings| (settings.workspace, settings.settings))
|
||||||
|
.collect()
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for InitializationOptions {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self::GlobalOnly {
|
||||||
|
settings: ClientSettings::default(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
230
crates/red_knot_server/src/system.rs
Normal file
230
crates/red_knot_server/src/system.rs
Normal file
|
@ -0,0 +1,230 @@
|
||||||
|
use std::any::Any;
|
||||||
|
use std::fmt::Display;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use lsp_types::Url;
|
||||||
|
|
||||||
|
use ruff_db::file_revision::FileRevision;
|
||||||
|
use ruff_db::system::walk_directory::WalkDirectoryBuilder;
|
||||||
|
use ruff_db::system::{
|
||||||
|
DirectoryEntry, FileType, Metadata, OsSystem, Result, System, SystemPath, SystemPathBuf,
|
||||||
|
SystemVirtualPath,
|
||||||
|
};
|
||||||
|
use ruff_notebook::{Notebook, NotebookError};
|
||||||
|
|
||||||
|
use crate::session::index::Index;
|
||||||
|
use crate::DocumentQuery;
|
||||||
|
|
||||||
|
/// Converts the given [`Url`] to a [`SystemPathBuf`].
|
||||||
|
///
|
||||||
|
/// This fails in the following cases:
|
||||||
|
/// * The URL scheme is not `file`.
|
||||||
|
/// * The URL cannot be converted to a file path (refer to [`Url::to_file_path`]).
|
||||||
|
/// * If the URL is not a valid UTF-8 string.
|
||||||
|
pub(crate) fn url_to_system_path(url: &Url) -> std::result::Result<SystemPathBuf, ()> {
|
||||||
|
if url.scheme() == "file" {
|
||||||
|
Ok(SystemPathBuf::from_path_buf(url.to_file_path()?).map_err(|_| ())?)
|
||||||
|
} else {
|
||||||
|
Err(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub(crate) struct LSPSystem {
|
||||||
|
/// A read-only copy of the index where the server stores all the open documents and settings.
|
||||||
|
///
|
||||||
|
/// This will be [`None`] when a mutable reference is held to the index via [`index_mut`]
|
||||||
|
/// method to prevent the index from being accessed while it is being modified. It will be
|
||||||
|
/// restored when the mutable reference is dropped.
|
||||||
|
///
|
||||||
|
/// [`index_mut`]: crate::Session::index_mut
|
||||||
|
index: Option<Arc<Index>>,
|
||||||
|
|
||||||
|
/// A system implementation that uses the local file system.
|
||||||
|
os_system: OsSystem,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl LSPSystem {
|
||||||
|
pub(crate) fn new(index: Arc<Index>) -> Self {
|
||||||
|
let cwd = std::env::current_dir().unwrap();
|
||||||
|
let os_system = OsSystem::new(SystemPathBuf::from_path_buf(cwd).unwrap());
|
||||||
|
|
||||||
|
Self {
|
||||||
|
index: Some(index),
|
||||||
|
os_system,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Takes the index out of the system.
|
||||||
|
pub(crate) fn take_index(&mut self) -> Option<Arc<Index>> {
|
||||||
|
self.index.take()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Sets the index for the system.
|
||||||
|
pub(crate) fn set_index(&mut self, index: Arc<Index>) {
|
||||||
|
self.index = Some(index);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a reference to the contained index.
|
||||||
|
///
|
||||||
|
/// # Panics
|
||||||
|
///
|
||||||
|
/// Panics if the index is `None`.
|
||||||
|
fn index(&self) -> &Index {
|
||||||
|
self.index.as_ref().unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn make_document_ref(&self, url: Url) -> Option<DocumentQuery> {
|
||||||
|
let index = self.index();
|
||||||
|
let key = index.key_from_url(url);
|
||||||
|
index.make_document_ref(key)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn system_path_to_document_ref(&self, path: &SystemPath) -> Result<Option<DocumentQuery>> {
|
||||||
|
let url = Url::from_file_path(path.as_std_path()).map_err(|()| {
|
||||||
|
std::io::Error::new(
|
||||||
|
std::io::ErrorKind::InvalidInput,
|
||||||
|
format!("Failed to convert system path to URL: {path:?}"),
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
Ok(self.make_document_ref(url))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn system_virtual_path_to_document_ref(
|
||||||
|
&self,
|
||||||
|
path: &SystemVirtualPath,
|
||||||
|
) -> Result<Option<DocumentQuery>> {
|
||||||
|
let url = Url::parse(path.as_str()).map_err(|_| {
|
||||||
|
std::io::Error::new(
|
||||||
|
std::io::ErrorKind::InvalidInput,
|
||||||
|
format!("Failed to convert virtual path to URL: {path:?}"),
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
Ok(self.make_document_ref(url))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl System for LSPSystem {
|
||||||
|
fn path_metadata(&self, path: &SystemPath) -> Result<Metadata> {
|
||||||
|
let document = self.system_path_to_document_ref(path)?;
|
||||||
|
|
||||||
|
if let Some(document) = document {
|
||||||
|
Ok(Metadata::new(
|
||||||
|
document_revision(&document),
|
||||||
|
None,
|
||||||
|
FileType::File,
|
||||||
|
))
|
||||||
|
} else {
|
||||||
|
self.os_system.path_metadata(path)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn canonicalize_path(&self, path: &SystemPath) -> Result<SystemPathBuf> {
|
||||||
|
self.os_system.canonicalize_path(path)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn read_to_string(&self, path: &SystemPath) -> Result<String> {
|
||||||
|
let document = self.system_path_to_document_ref(path)?;
|
||||||
|
|
||||||
|
match document {
|
||||||
|
Some(DocumentQuery::Text { document, .. }) => Ok(document.contents().to_string()),
|
||||||
|
_ => self.os_system.read_to_string(path),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn read_to_notebook(&self, path: &SystemPath) -> std::result::Result<Notebook, NotebookError> {
|
||||||
|
let document = self.system_path_to_document_ref(path)?;
|
||||||
|
|
||||||
|
match document {
|
||||||
|
Some(DocumentQuery::Text { document, .. }) => {
|
||||||
|
Notebook::from_source_code(document.contents())
|
||||||
|
}
|
||||||
|
Some(DocumentQuery::Notebook { notebook, .. }) => Ok(notebook.make_ruff_notebook()),
|
||||||
|
None => self.os_system.read_to_notebook(path),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn virtual_path_metadata(&self, path: &SystemVirtualPath) -> Result<Metadata> {
|
||||||
|
// Virtual paths only exists in the LSP system, so we don't need to check the OS system.
|
||||||
|
let document = self
|
||||||
|
.system_virtual_path_to_document_ref(path)?
|
||||||
|
.ok_or_else(|| virtual_path_not_found(path))?;
|
||||||
|
|
||||||
|
Ok(Metadata::new(
|
||||||
|
document_revision(&document),
|
||||||
|
None,
|
||||||
|
FileType::File,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn read_virtual_path_to_string(&self, path: &SystemVirtualPath) -> Result<String> {
|
||||||
|
let document = self
|
||||||
|
.system_virtual_path_to_document_ref(path)?
|
||||||
|
.ok_or_else(|| virtual_path_not_found(path))?;
|
||||||
|
|
||||||
|
if let DocumentQuery::Text { document, .. } = &document {
|
||||||
|
Ok(document.contents().to_string())
|
||||||
|
} else {
|
||||||
|
Err(not_a_text_document(path))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn read_virtual_path_to_notebook(
|
||||||
|
&self,
|
||||||
|
path: &SystemVirtualPath,
|
||||||
|
) -> std::result::Result<Notebook, NotebookError> {
|
||||||
|
let document = self
|
||||||
|
.system_virtual_path_to_document_ref(path)?
|
||||||
|
.ok_or_else(|| virtual_path_not_found(path))?;
|
||||||
|
|
||||||
|
match document {
|
||||||
|
DocumentQuery::Text { document, .. } => Notebook::from_source_code(document.contents()),
|
||||||
|
DocumentQuery::Notebook { notebook, .. } => Ok(notebook.make_ruff_notebook()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn current_directory(&self) -> &SystemPath {
|
||||||
|
self.os_system.current_directory()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn read_directory<'a>(
|
||||||
|
&'a self,
|
||||||
|
path: &SystemPath,
|
||||||
|
) -> Result<Box<dyn Iterator<Item = Result<DirectoryEntry>> + 'a>> {
|
||||||
|
self.os_system.read_directory(path)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn walk_directory(&self, path: &SystemPath) -> WalkDirectoryBuilder {
|
||||||
|
self.os_system.walk_directory(path)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn as_any(&self) -> &dyn Any {
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
fn as_any_mut(&mut self) -> &mut dyn Any {
|
||||||
|
self
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn not_a_text_document(path: impl Display) -> std::io::Error {
|
||||||
|
std::io::Error::new(
|
||||||
|
std::io::ErrorKind::InvalidInput,
|
||||||
|
format!("Input is not a text document: {path}"),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn virtual_path_not_found(path: impl Display) -> std::io::Error {
|
||||||
|
std::io::Error::new(
|
||||||
|
std::io::ErrorKind::NotFound,
|
||||||
|
format!("Virtual path does not exist: {path}"),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Helper function to get the [`FileRevision`] of the given document.
|
||||||
|
fn document_revision(document: &DocumentQuery) -> FileRevision {
|
||||||
|
// The file revision is just an opaque number which doesn't have any significant meaning other
|
||||||
|
// than that the file has changed if the revisions are different.
|
||||||
|
#[allow(clippy::cast_sign_loss)]
|
||||||
|
FileRevision::new(document.version() as u128)
|
||||||
|
}
|
221
crates/red_knot_server/src/trace.rs
Normal file
221
crates/red_knot_server/src/trace.rs
Normal file
|
@ -0,0 +1,221 @@
|
||||||
|
//! The tracing system for `ruff server`.
|
||||||
|
//!
|
||||||
|
//! Traces are controlled by the `logLevel` setting, along with the
|
||||||
|
//! trace level set through the LSP. On VS Code, the trace level can
|
||||||
|
//! also be set with `ruff.trace.server`. A trace level of `messages` or
|
||||||
|
//! `verbose` will enable tracing - otherwise, no traces will be shown.
|
||||||
|
//!
|
||||||
|
//! `logLevel` can be used to configure the level of tracing that is shown.
|
||||||
|
//! By default, `logLevel` is set to `"info"`.
|
||||||
|
//!
|
||||||
|
//! The server also supports the `RUFF_TRACE` environment variable, which will
|
||||||
|
//! override the trace value provided by the LSP client. Use this if there's no good way
|
||||||
|
//! to set the trace value through your editor's configuration.
|
||||||
|
//!
|
||||||
|
//! Tracing will write to `stderr` by default, which should appear in the logs for most LSP clients.
|
||||||
|
//! A `logFile` path can also be specified in the settings, and output will be directed there instead.
|
||||||
|
use core::str;
|
||||||
|
use lsp_server::{Message, Notification};
|
||||||
|
use lsp_types::{
|
||||||
|
notification::{LogTrace, Notification as _},
|
||||||
|
ClientInfo, TraceValue,
|
||||||
|
};
|
||||||
|
use serde::Deserialize;
|
||||||
|
use std::{
|
||||||
|
io::{Error as IoError, ErrorKind, Write},
|
||||||
|
path::PathBuf,
|
||||||
|
str::FromStr,
|
||||||
|
sync::{Arc, Mutex, OnceLock},
|
||||||
|
};
|
||||||
|
use tracing::level_filters::LevelFilter;
|
||||||
|
use tracing_subscriber::{
|
||||||
|
fmt::{time::Uptime, writer::BoxMakeWriter, MakeWriter},
|
||||||
|
layer::SubscriberExt,
|
||||||
|
Layer,
|
||||||
|
};
|
||||||
|
|
||||||
|
use crate::server::ClientSender;
|
||||||
|
|
||||||
|
const TRACE_ENV_KEY: &str = "RUFF_TRACE";
|
||||||
|
|
||||||
|
static LOGGING_SENDER: OnceLock<ClientSender> = OnceLock::new();
|
||||||
|
|
||||||
|
static TRACE_VALUE: Mutex<lsp_types::TraceValue> = Mutex::new(lsp_types::TraceValue::Off);
|
||||||
|
|
||||||
|
pub(crate) fn set_trace_value(trace_value: TraceValue) {
|
||||||
|
let mut global_trace_value = TRACE_VALUE
|
||||||
|
.lock()
|
||||||
|
.expect("trace value mutex should be available");
|
||||||
|
*global_trace_value = trace_value;
|
||||||
|
}
|
||||||
|
|
||||||
|
// A tracing writer that uses LSPs logTrace method.
|
||||||
|
struct TraceLogWriter;
|
||||||
|
|
||||||
|
impl Write for TraceLogWriter {
|
||||||
|
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
|
||||||
|
let message = str::from_utf8(buf).map_err(|e| IoError::new(ErrorKind::InvalidData, e))?;
|
||||||
|
LOGGING_SENDER
|
||||||
|
.get()
|
||||||
|
.expect("logging sender should be initialized at this point")
|
||||||
|
.send(Message::Notification(Notification {
|
||||||
|
method: LogTrace::METHOD.to_owned(),
|
||||||
|
params: serde_json::json!({
|
||||||
|
"message": message
|
||||||
|
}),
|
||||||
|
}))
|
||||||
|
.map_err(|e| IoError::new(ErrorKind::Other, e))?;
|
||||||
|
Ok(buf.len())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn flush(&mut self) -> std::io::Result<()> {
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> MakeWriter<'a> for TraceLogWriter {
|
||||||
|
type Writer = Self;
|
||||||
|
|
||||||
|
fn make_writer(&'a self) -> Self::Writer {
|
||||||
|
Self
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn init_tracing(
|
||||||
|
sender: ClientSender,
|
||||||
|
log_level: LogLevel,
|
||||||
|
log_file: Option<&std::path::Path>,
|
||||||
|
client: Option<&ClientInfo>,
|
||||||
|
) {
|
||||||
|
LOGGING_SENDER
|
||||||
|
.set(sender)
|
||||||
|
.expect("logging sender should only be initialized once");
|
||||||
|
|
||||||
|
let log_file = log_file
|
||||||
|
.map(|path| {
|
||||||
|
// this expands `logFile` so that tildes and environment variables
|
||||||
|
// are replaced with their values, if possible.
|
||||||
|
if let Some(expanded) = shellexpand::full(&path.to_string_lossy())
|
||||||
|
.ok()
|
||||||
|
.and_then(|path| PathBuf::from_str(&path).ok())
|
||||||
|
{
|
||||||
|
expanded
|
||||||
|
} else {
|
||||||
|
path.to_path_buf()
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.and_then(|path| {
|
||||||
|
std::fs::OpenOptions::new()
|
||||||
|
.create(true)
|
||||||
|
.append(true)
|
||||||
|
.open(&path)
|
||||||
|
.map_err(|err| {
|
||||||
|
#[allow(clippy::print_stderr)]
|
||||||
|
{
|
||||||
|
eprintln!(
|
||||||
|
"Failed to open file at {} for logging: {err}",
|
||||||
|
path.display()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.ok()
|
||||||
|
});
|
||||||
|
|
||||||
|
let logger = match log_file {
|
||||||
|
Some(file) => BoxMakeWriter::new(Arc::new(file)),
|
||||||
|
None => {
|
||||||
|
if client.is_some_and(|client| {
|
||||||
|
client.name.starts_with("Zed") || client.name.starts_with("Visual Studio Code")
|
||||||
|
}) {
|
||||||
|
BoxMakeWriter::new(TraceLogWriter)
|
||||||
|
} else {
|
||||||
|
BoxMakeWriter::new(std::io::stderr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
let subscriber = tracing_subscriber::Registry::default().with(
|
||||||
|
tracing_subscriber::fmt::layer()
|
||||||
|
.with_timer(Uptime::default())
|
||||||
|
.with_thread_names(true)
|
||||||
|
.with_ansi(false)
|
||||||
|
.with_writer(logger)
|
||||||
|
.with_filter(TraceLevelFilter)
|
||||||
|
.with_filter(LogLevelFilter { filter: log_level }),
|
||||||
|
);
|
||||||
|
|
||||||
|
tracing::subscriber::set_global_default(subscriber)
|
||||||
|
.expect("should be able to set global default subscriber");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Copy, Debug, Deserialize, Default, PartialEq, Eq, PartialOrd, Ord)]
|
||||||
|
#[serde(rename_all = "lowercase")]
|
||||||
|
pub(crate) enum LogLevel {
|
||||||
|
#[default]
|
||||||
|
Error,
|
||||||
|
Warn,
|
||||||
|
Info,
|
||||||
|
Debug,
|
||||||
|
Trace,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl LogLevel {
|
||||||
|
fn trace_level(self) -> tracing::Level {
|
||||||
|
match self {
|
||||||
|
Self::Error => tracing::Level::ERROR,
|
||||||
|
Self::Warn => tracing::Level::WARN,
|
||||||
|
Self::Info => tracing::Level::INFO,
|
||||||
|
Self::Debug => tracing::Level::DEBUG,
|
||||||
|
Self::Trace => tracing::Level::TRACE,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Filters out traces which have a log level lower than the `logLevel` set by the client.
|
||||||
|
struct LogLevelFilter {
|
||||||
|
filter: LogLevel,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Filters out traces if the trace value set by the client is `off`.
|
||||||
|
struct TraceLevelFilter;
|
||||||
|
|
||||||
|
impl<S> tracing_subscriber::layer::Filter<S> for LogLevelFilter {
|
||||||
|
fn enabled(
|
||||||
|
&self,
|
||||||
|
meta: &tracing::Metadata<'_>,
|
||||||
|
_: &tracing_subscriber::layer::Context<'_, S>,
|
||||||
|
) -> bool {
|
||||||
|
let filter = if meta.target().starts_with("ruff") {
|
||||||
|
self.filter.trace_level()
|
||||||
|
} else {
|
||||||
|
tracing::Level::INFO
|
||||||
|
};
|
||||||
|
|
||||||
|
meta.level() <= &filter
|
||||||
|
}
|
||||||
|
|
||||||
|
fn max_level_hint(&self) -> Option<tracing::level_filters::LevelFilter> {
|
||||||
|
Some(LevelFilter::from_level(self.filter.trace_level()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<S> tracing_subscriber::layer::Filter<S> for TraceLevelFilter {
|
||||||
|
fn enabled(
|
||||||
|
&self,
|
||||||
|
_: &tracing::Metadata<'_>,
|
||||||
|
_: &tracing_subscriber::layer::Context<'_, S>,
|
||||||
|
) -> bool {
|
||||||
|
trace_value() != lsp_types::TraceValue::Off
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
fn trace_value() -> lsp_types::TraceValue {
|
||||||
|
std::env::var(TRACE_ENV_KEY)
|
||||||
|
.ok()
|
||||||
|
.and_then(|trace| serde_json::from_value(serde_json::Value::String(trace)).ok())
|
||||||
|
.unwrap_or_else(|| {
|
||||||
|
*TRACE_VALUE
|
||||||
|
.lock()
|
||||||
|
.expect("trace value mutex should be available")
|
||||||
|
})
|
||||||
|
}
|
|
@ -273,6 +273,10 @@ impl System for WasmSystem {
|
||||||
fn as_any(&self) -> &dyn Any {
|
fn as_any(&self) -> &dyn Any {
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn as_any_mut(&mut self) -> &mut dyn std::any::Any {
|
||||||
|
self
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn not_found() -> std::io::Error {
|
fn not_found() -> std::io::Error {
|
||||||
|
|
|
@ -1,5 +1,7 @@
|
||||||
use std::panic::{AssertUnwindSafe, RefUnwindSafe};
|
use std::panic::{AssertUnwindSafe, RefUnwindSafe};
|
||||||
|
|
||||||
|
use salsa::Cancelled;
|
||||||
|
|
||||||
use red_knot_module_resolver::{vendored_typeshed_stubs, Db as ResolverDb};
|
use red_knot_module_resolver::{vendored_typeshed_stubs, Db as ResolverDb};
|
||||||
use red_knot_python_semantic::Db as SemanticDb;
|
use red_knot_python_semantic::Db as SemanticDb;
|
||||||
use ruff_db::files::{File, Files};
|
use ruff_db::files::{File, Files};
|
||||||
|
@ -7,7 +9,6 @@ use ruff_db::program::{Program, ProgramSettings};
|
||||||
use ruff_db::system::System;
|
use ruff_db::system::System;
|
||||||
use ruff_db::vendored::VendoredFileSystem;
|
use ruff_db::vendored::VendoredFileSystem;
|
||||||
use ruff_db::{Db as SourceDb, Upcast};
|
use ruff_db::{Db as SourceDb, Upcast};
|
||||||
use salsa::Cancelled;
|
|
||||||
|
|
||||||
use crate::lint::Diagnostics;
|
use crate::lint::Diagnostics;
|
||||||
use crate::workspace::{check_file, Workspace, WorkspaceMetadata};
|
use crate::workspace::{check_file, Workspace, WorkspaceMetadata};
|
||||||
|
@ -132,6 +133,10 @@ impl SourceDb for RootDatabase {
|
||||||
&*self.system
|
&*self.system
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn system_mut(&mut self) -> &mut dyn System {
|
||||||
|
&mut *self.system
|
||||||
|
}
|
||||||
|
|
||||||
fn files(&self) -> &Files {
|
fn files(&self) -> &Files {
|
||||||
&self.files
|
&self.files
|
||||||
}
|
}
|
||||||
|
@ -192,6 +197,10 @@ pub(crate) mod tests {
|
||||||
&self.system
|
&self.system
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn system_mut(&mut self) -> &mut dyn System {
|
||||||
|
&mut self.system
|
||||||
|
}
|
||||||
|
|
||||||
fn files(&self) -> &Files {
|
fn files(&self) -> &Files {
|
||||||
&self.files
|
&self.files
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,6 +23,7 @@ pub type FxDashSet<K> = dashmap::DashSet<K, BuildHasherDefault<FxHasher>>;
|
||||||
pub trait Db: salsa::Database {
|
pub trait Db: salsa::Database {
|
||||||
fn vendored(&self) -> &VendoredFileSystem;
|
fn vendored(&self) -> &VendoredFileSystem;
|
||||||
fn system(&self) -> &dyn System;
|
fn system(&self) -> &dyn System;
|
||||||
|
fn system_mut(&mut self) -> &mut dyn System;
|
||||||
fn files(&self) -> &Files;
|
fn files(&self) -> &Files;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -103,6 +104,10 @@ mod tests {
|
||||||
&self.system
|
&self.system
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn system_mut(&mut self) -> &mut dyn System {
|
||||||
|
&mut self.system
|
||||||
|
}
|
||||||
|
|
||||||
fn files(&self) -> &Files {
|
fn files(&self) -> &Files {
|
||||||
&self.files
|
&self.files
|
||||||
}
|
}
|
||||||
|
|
|
@ -130,6 +130,8 @@ pub trait System: Debug {
|
||||||
fn walk_directory(&self, path: &SystemPath) -> WalkDirectoryBuilder;
|
fn walk_directory(&self, path: &SystemPath) -> WalkDirectoryBuilder;
|
||||||
|
|
||||||
fn as_any(&self) -> &dyn std::any::Any;
|
fn as_any(&self) -> &dyn std::any::Any;
|
||||||
|
|
||||||
|
fn as_any_mut(&mut self) -> &mut dyn std::any::Any;
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||||
|
@ -140,6 +142,14 @@ pub struct Metadata {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Metadata {
|
impl Metadata {
|
||||||
|
pub fn new(revision: FileRevision, permissions: Option<u32>, file_type: FileType) -> Self {
|
||||||
|
Self {
|
||||||
|
revision,
|
||||||
|
permissions,
|
||||||
|
file_type,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub fn revision(&self) -> FileRevision {
|
pub fn revision(&self) -> FileRevision {
|
||||||
self.revision
|
self.revision
|
||||||
}
|
}
|
||||||
|
|
|
@ -112,6 +112,10 @@ impl System for OsSystem {
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn as_any_mut(&mut self) -> &mut dyn Any {
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
fn read_directory(
|
fn read_directory(
|
||||||
&self,
|
&self,
|
||||||
path: &SystemPath,
|
path: &SystemPath,
|
||||||
|
|
|
@ -132,6 +132,10 @@ impl System for TestSystem {
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn as_any_mut(&mut self) -> &mut dyn Any {
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
fn read_directory<'a>(
|
fn read_directory<'a>(
|
||||||
&'a self,
|
&'a self,
|
||||||
path: &SystemPath,
|
path: &SystemPath,
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue