mirror of
https://github.com/astral-sh/ruff.git
synced 2025-08-30 15:17:59 +00:00
Rename Red Knot (#17820)
This commit is contained in:
parent
e6a798b962
commit
b51c4f82ea
1564 changed files with 1598 additions and 1578 deletions
92
crates/ty_server/src/document.rs
Normal file
92
crates/ty_server/src/document.rs
Normal file
|
@ -0,0 +1,92 @@
|
|||
//! Types and utilities for working with text, modifying source files, and `Ruff <-> LSP` type conversion.
|
||||
|
||||
mod location;
|
||||
mod notebook;
|
||||
mod range;
|
||||
mod text_document;
|
||||
|
||||
pub(crate) use location::ToLink;
|
||||
use lsp_types::{PositionEncodingKind, Url};
|
||||
pub use notebook::NotebookDocument;
|
||||
pub(crate) use range::{FileRangeExt, PositionExt, RangeExt, TextSizeExt, ToRangeExt};
|
||||
pub(crate) use text_document::DocumentVersion;
|
||||
pub use text_document::TextDocument;
|
||||
|
||||
/// A convenient enumeration for supported text encodings. Can be converted to [`lsp_types::PositionEncodingKind`].
|
||||
// Please maintain the order from least to greatest priority for the derived `Ord` impl.
|
||||
#[derive(Default, Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)]
|
||||
pub enum PositionEncoding {
|
||||
/// UTF 16 is the encoding supported by all LSP clients.
|
||||
#[default]
|
||||
UTF16,
|
||||
|
||||
/// Second choice because UTF32 uses a fixed 4 byte encoding for each character (makes conversion relatively easy)
|
||||
UTF32,
|
||||
|
||||
/// Ruff's preferred encoding
|
||||
UTF8,
|
||||
}
|
||||
|
||||
impl From<PositionEncoding> for ruff_source_file::PositionEncoding {
|
||||
fn from(value: PositionEncoding) -> Self {
|
||||
match value {
|
||||
PositionEncoding::UTF8 => Self::Utf8,
|
||||
PositionEncoding::UTF16 => Self::Utf16,
|
||||
PositionEncoding::UTF32 => Self::Utf32,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A unique document ID, derived from a URL passed as part of an LSP request.
|
||||
/// This document ID can point to either be a standalone Python file, a full notebook, or a cell within a notebook.
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum DocumentKey {
|
||||
Notebook(Url),
|
||||
NotebookCell(Url),
|
||||
Text(Url),
|
||||
}
|
||||
|
||||
impl DocumentKey {
|
||||
/// Returns the URL associated with the key.
|
||||
pub(crate) fn url(&self) -> &Url {
|
||||
match self {
|
||||
DocumentKey::NotebookCell(url)
|
||||
| DocumentKey::Notebook(url)
|
||||
| DocumentKey::Text(url) => url,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for DocumentKey {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
Self::NotebookCell(url) | Self::Notebook(url) | Self::Text(url) => url.fmt(f),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<PositionEncoding> for PositionEncodingKind {
|
||||
fn from(value: PositionEncoding) -> Self {
|
||||
match value {
|
||||
PositionEncoding::UTF8 => PositionEncodingKind::UTF8,
|
||||
PositionEncoding::UTF16 => PositionEncodingKind::UTF16,
|
||||
PositionEncoding::UTF32 => PositionEncodingKind::UTF32,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<&PositionEncodingKind> for PositionEncoding {
|
||||
type Error = ();
|
||||
|
||||
fn try_from(value: &PositionEncodingKind) -> Result<Self, Self::Error> {
|
||||
Ok(if value == &PositionEncodingKind::UTF8 {
|
||||
PositionEncoding::UTF8
|
||||
} else if value == &PositionEncodingKind::UTF16 {
|
||||
PositionEncoding::UTF16
|
||||
} else if value == &PositionEncodingKind::UTF32 {
|
||||
PositionEncoding::UTF32
|
||||
} else {
|
||||
return Err(());
|
||||
})
|
||||
}
|
||||
}
|
54
crates/ty_server/src/document/location.rs
Normal file
54
crates/ty_server/src/document/location.rs
Normal file
|
@ -0,0 +1,54 @@
|
|||
use crate::document::{FileRangeExt, ToRangeExt};
|
||||
use crate::system::file_to_url;
|
||||
use crate::PositionEncoding;
|
||||
use lsp_types::Location;
|
||||
use ruff_db::files::FileRange;
|
||||
use ruff_db::source::{line_index, source_text};
|
||||
use ruff_text_size::Ranged;
|
||||
use ty_ide::{Db, NavigationTarget};
|
||||
|
||||
pub(crate) trait ToLink {
|
||||
fn to_location(&self, db: &dyn ty_ide::Db, encoding: PositionEncoding) -> Option<Location>;
|
||||
|
||||
fn to_link(
|
||||
&self,
|
||||
db: &dyn ty_ide::Db,
|
||||
src: Option<FileRange>,
|
||||
encoding: PositionEncoding,
|
||||
) -> Option<lsp_types::LocationLink>;
|
||||
}
|
||||
|
||||
impl ToLink for NavigationTarget {
|
||||
fn to_location(&self, db: &dyn Db, encoding: PositionEncoding) -> Option<Location> {
|
||||
FileRange::new(self.file(), self.focus_range()).to_location(db.upcast(), encoding)
|
||||
}
|
||||
|
||||
fn to_link(
|
||||
&self,
|
||||
db: &dyn Db,
|
||||
src: Option<FileRange>,
|
||||
encoding: PositionEncoding,
|
||||
) -> Option<lsp_types::LocationLink> {
|
||||
let file = self.file();
|
||||
let uri = file_to_url(db.upcast(), file)?;
|
||||
let source = source_text(db.upcast(), file);
|
||||
let index = line_index(db.upcast(), file);
|
||||
|
||||
let target_range = self.full_range().to_lsp_range(&source, &index, encoding);
|
||||
let selection_range = self.focus_range().to_lsp_range(&source, &index, encoding);
|
||||
|
||||
let src = src.map(|src| {
|
||||
let source = source_text(db.upcast(), src.file());
|
||||
let index = line_index(db.upcast(), src.file());
|
||||
|
||||
src.range().to_lsp_range(&source, &index, encoding)
|
||||
});
|
||||
|
||||
Some(lsp_types::LocationLink {
|
||||
target_uri: uri,
|
||||
target_range,
|
||||
target_selection_range: selection_range,
|
||||
origin_selection_range: src,
|
||||
})
|
||||
}
|
||||
}
|
356
crates/ty_server/src/document/notebook.rs
Normal file
356
crates/ty_server/src/document/notebook.rs
Normal file
|
@ -0,0 +1,356 @@
|
|||
use anyhow::Ok;
|
||||
use lsp_types::NotebookCellKind;
|
||||
use ruff_notebook::CellMetadata;
|
||||
use rustc_hash::{FxBuildHasher, FxHashMap};
|
||||
|
||||
use crate::{PositionEncoding, TextDocument};
|
||||
|
||||
use super::DocumentVersion;
|
||||
|
||||
pub(super) type CellId = usize;
|
||||
|
||||
/// The state of a notebook document in the server. Contains an array of cells whose
|
||||
/// contents are internally represented by [`TextDocument`]s.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct NotebookDocument {
|
||||
cells: Vec<NotebookCell>,
|
||||
metadata: ruff_notebook::RawNotebookMetadata,
|
||||
version: DocumentVersion,
|
||||
// Used to quickly find the index of a cell for a given URL.
|
||||
cell_index: FxHashMap<lsp_types::Url, CellId>,
|
||||
}
|
||||
|
||||
/// A single cell within a notebook, which has text contents represented as a `TextDocument`.
|
||||
#[derive(Clone, Debug)]
|
||||
struct NotebookCell {
|
||||
url: lsp_types::Url,
|
||||
kind: NotebookCellKind,
|
||||
document: TextDocument,
|
||||
}
|
||||
|
||||
impl NotebookDocument {
|
||||
pub fn new(
|
||||
version: DocumentVersion,
|
||||
cells: Vec<lsp_types::NotebookCell>,
|
||||
metadata: serde_json::Map<String, serde_json::Value>,
|
||||
cell_documents: Vec<lsp_types::TextDocumentItem>,
|
||||
) -> crate::Result<Self> {
|
||||
let mut cell_contents: FxHashMap<_, _> = cell_documents
|
||||
.into_iter()
|
||||
.map(|document| (document.uri, document.text))
|
||||
.collect();
|
||||
|
||||
let cells: Vec<_> = cells
|
||||
.into_iter()
|
||||
.map(|cell| {
|
||||
let contents = cell_contents.remove(&cell.document).unwrap_or_default();
|
||||
NotebookCell::new(cell, contents, version)
|
||||
})
|
||||
.collect();
|
||||
|
||||
Ok(Self {
|
||||
version,
|
||||
cell_index: Self::make_cell_index(cells.as_slice()),
|
||||
metadata: serde_json::from_value(serde_json::Value::Object(metadata))?,
|
||||
cells,
|
||||
})
|
||||
}
|
||||
|
||||
/// Generates a pseudo-representation of a notebook that lacks per-cell metadata and contextual information
|
||||
/// but should still work with Ruff's linter.
|
||||
pub fn make_ruff_notebook(&self) -> ruff_notebook::Notebook {
|
||||
let cells = self
|
||||
.cells
|
||||
.iter()
|
||||
.map(|cell| match cell.kind {
|
||||
NotebookCellKind::Code => ruff_notebook::Cell::Code(ruff_notebook::CodeCell {
|
||||
execution_count: None,
|
||||
id: None,
|
||||
metadata: CellMetadata::default(),
|
||||
outputs: vec![],
|
||||
source: ruff_notebook::SourceValue::String(
|
||||
cell.document.contents().to_string(),
|
||||
),
|
||||
}),
|
||||
NotebookCellKind::Markup => {
|
||||
ruff_notebook::Cell::Markdown(ruff_notebook::MarkdownCell {
|
||||
attachments: None,
|
||||
id: None,
|
||||
metadata: CellMetadata::default(),
|
||||
source: ruff_notebook::SourceValue::String(
|
||||
cell.document.contents().to_string(),
|
||||
),
|
||||
})
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
let raw_notebook = ruff_notebook::RawNotebook {
|
||||
cells,
|
||||
metadata: self.metadata.clone(),
|
||||
nbformat: 4,
|
||||
nbformat_minor: 5,
|
||||
};
|
||||
|
||||
ruff_notebook::Notebook::from_raw_notebook(raw_notebook, false)
|
||||
.unwrap_or_else(|err| panic!("Server notebook document could not be converted to Ruff's notebook document format: {err}"))
|
||||
}
|
||||
|
||||
pub(crate) fn update(
|
||||
&mut self,
|
||||
cells: Option<lsp_types::NotebookDocumentCellChange>,
|
||||
metadata_change: Option<serde_json::Map<String, serde_json::Value>>,
|
||||
version: DocumentVersion,
|
||||
encoding: PositionEncoding,
|
||||
) -> crate::Result<()> {
|
||||
self.version = version;
|
||||
|
||||
if let Some(lsp_types::NotebookDocumentCellChange {
|
||||
structure,
|
||||
data,
|
||||
text_content,
|
||||
}) = cells
|
||||
{
|
||||
// The structural changes should be done first, as they may affect the cell index.
|
||||
if let Some(structure) = structure {
|
||||
let start = structure.array.start as usize;
|
||||
let delete = structure.array.delete_count as usize;
|
||||
|
||||
// This is required because of the way the `NotebookCell` is modelled. We include
|
||||
// the `TextDocument` within the `NotebookCell` so when it's deleted, the
|
||||
// corresponding `TextDocument` is removed as well. But, when cells are
|
||||
// re-ordered, the change request doesn't provide the actual contents of the cell.
|
||||
// Instead, it only provides that (a) these cell URIs were removed, and (b) these
|
||||
// cell URIs were added.
|
||||
// https://github.com/astral-sh/ruff/issues/12573
|
||||
let mut deleted_cells = FxHashMap::default();
|
||||
|
||||
// First, delete the cells and remove them from the index.
|
||||
if delete > 0 {
|
||||
for cell in self.cells.drain(start..start + delete) {
|
||||
self.cell_index.remove(&cell.url);
|
||||
deleted_cells.insert(cell.url, cell.document);
|
||||
}
|
||||
}
|
||||
|
||||
// Second, insert the new cells with the available information. This array does not
|
||||
// provide the actual contents of the cells, so we'll initialize them with empty
|
||||
// contents.
|
||||
for cell in structure.array.cells.into_iter().flatten().rev() {
|
||||
let (content, version) =
|
||||
if let Some(text_document) = deleted_cells.remove(&cell.document) {
|
||||
let version = text_document.version();
|
||||
(text_document.into_contents(), version)
|
||||
} else {
|
||||
(String::new(), 0)
|
||||
};
|
||||
self.cells
|
||||
.insert(start, NotebookCell::new(cell, content, version));
|
||||
}
|
||||
|
||||
// Third, register the new cells in the index and update existing ones that came
|
||||
// after the insertion.
|
||||
for (index, cell) in self.cells.iter().enumerate().skip(start) {
|
||||
self.cell_index.insert(cell.url.clone(), index);
|
||||
}
|
||||
|
||||
// Finally, update the text document that represents the cell with the actual
|
||||
// contents. This should be done at the end so that both the `cells` and
|
||||
// `cell_index` are updated before we start applying the changes to the cells.
|
||||
if let Some(did_open) = structure.did_open {
|
||||
for cell_text_document in did_open {
|
||||
if let Some(cell) = self.cell_by_uri_mut(&cell_text_document.uri) {
|
||||
cell.document = TextDocument::new(
|
||||
cell_text_document.text,
|
||||
cell_text_document.version,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(cell_data) = data {
|
||||
for cell in cell_data {
|
||||
if let Some(existing_cell) = self.cell_by_uri_mut(&cell.document) {
|
||||
existing_cell.kind = cell.kind;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(content_changes) = text_content {
|
||||
for content_change in content_changes {
|
||||
if let Some(cell) = self.cell_by_uri_mut(&content_change.document.uri) {
|
||||
cell.document
|
||||
.apply_changes(content_change.changes, version, encoding);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(metadata_change) = metadata_change {
|
||||
self.metadata = serde_json::from_value(serde_json::Value::Object(metadata_change))?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Get the current version of the notebook document.
|
||||
pub(crate) fn version(&self) -> DocumentVersion {
|
||||
self.version
|
||||
}
|
||||
|
||||
/// Get the URI for a cell by its index within the cell array.
|
||||
#[expect(dead_code)]
|
||||
pub(crate) fn cell_uri_by_index(&self, index: CellId) -> Option<&lsp_types::Url> {
|
||||
self.cells.get(index).map(|cell| &cell.url)
|
||||
}
|
||||
|
||||
/// Get the text document representing the contents of a cell by the cell URI.
|
||||
pub(crate) fn cell_document_by_uri(&self, uri: &lsp_types::Url) -> Option<&TextDocument> {
|
||||
self.cells
|
||||
.get(*self.cell_index.get(uri)?)
|
||||
.map(|cell| &cell.document)
|
||||
}
|
||||
|
||||
/// Returns a list of cell URIs in the order they appear in the array.
|
||||
pub(crate) fn urls(&self) -> impl Iterator<Item = &lsp_types::Url> {
|
||||
self.cells.iter().map(|cell| &cell.url)
|
||||
}
|
||||
|
||||
fn cell_by_uri_mut(&mut self, uri: &lsp_types::Url) -> Option<&mut NotebookCell> {
|
||||
self.cells.get_mut(*self.cell_index.get(uri)?)
|
||||
}
|
||||
|
||||
fn make_cell_index(cells: &[NotebookCell]) -> FxHashMap<lsp_types::Url, CellId> {
|
||||
let mut index = FxHashMap::with_capacity_and_hasher(cells.len(), FxBuildHasher);
|
||||
for (i, cell) in cells.iter().enumerate() {
|
||||
index.insert(cell.url.clone(), i);
|
||||
}
|
||||
index
|
||||
}
|
||||
}
|
||||
|
||||
impl NotebookCell {
|
||||
pub(crate) fn new(
|
||||
cell: lsp_types::NotebookCell,
|
||||
contents: String,
|
||||
version: DocumentVersion,
|
||||
) -> Self {
|
||||
Self {
|
||||
url: cell.document,
|
||||
kind: cell.kind,
|
||||
document: TextDocument::new(contents, version),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::NotebookDocument;
|
||||
|
||||
enum TestCellContent {
|
||||
#[allow(dead_code)]
|
||||
Markup(String),
|
||||
Code(String),
|
||||
}
|
||||
|
||||
fn create_test_url(index: usize) -> lsp_types::Url {
|
||||
lsp_types::Url::parse(&format!("cell:/test.ipynb#{index}")).unwrap()
|
||||
}
|
||||
|
||||
fn create_test_notebook(test_cells: Vec<TestCellContent>) -> NotebookDocument {
|
||||
let mut cells = Vec::with_capacity(test_cells.len());
|
||||
let mut cell_documents = Vec::with_capacity(test_cells.len());
|
||||
|
||||
for (index, test_cell) in test_cells.into_iter().enumerate() {
|
||||
let url = create_test_url(index);
|
||||
match test_cell {
|
||||
TestCellContent::Markup(content) => {
|
||||
cells.push(lsp_types::NotebookCell {
|
||||
kind: lsp_types::NotebookCellKind::Markup,
|
||||
document: url.clone(),
|
||||
metadata: None,
|
||||
execution_summary: None,
|
||||
});
|
||||
cell_documents.push(lsp_types::TextDocumentItem {
|
||||
uri: url,
|
||||
language_id: "markdown".to_owned(),
|
||||
version: 0,
|
||||
text: content,
|
||||
});
|
||||
}
|
||||
TestCellContent::Code(content) => {
|
||||
cells.push(lsp_types::NotebookCell {
|
||||
kind: lsp_types::NotebookCellKind::Code,
|
||||
document: url.clone(),
|
||||
metadata: None,
|
||||
execution_summary: None,
|
||||
});
|
||||
cell_documents.push(lsp_types::TextDocumentItem {
|
||||
uri: url,
|
||||
language_id: "python".to_owned(),
|
||||
version: 0,
|
||||
text: content,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
NotebookDocument::new(0, cells, serde_json::Map::default(), cell_documents).unwrap()
|
||||
}
|
||||
|
||||
/// This test case checks that for a notebook with three code cells, when the client sends a
|
||||
/// change request to swap the first two cells, the notebook document is updated correctly.
|
||||
///
|
||||
/// The swap operation as a change request is represented as deleting the first two cells and
|
||||
/// adding them back in the reverse order.
|
||||
#[test]
|
||||
fn swap_cells() {
|
||||
let mut notebook = create_test_notebook(vec![
|
||||
TestCellContent::Code("cell = 0".to_owned()),
|
||||
TestCellContent::Code("cell = 1".to_owned()),
|
||||
TestCellContent::Code("cell = 2".to_owned()),
|
||||
]);
|
||||
|
||||
notebook
|
||||
.update(
|
||||
Some(lsp_types::NotebookDocumentCellChange {
|
||||
structure: Some(lsp_types::NotebookDocumentCellChangeStructure {
|
||||
array: lsp_types::NotebookCellArrayChange {
|
||||
start: 0,
|
||||
delete_count: 2,
|
||||
cells: Some(vec![
|
||||
lsp_types::NotebookCell {
|
||||
kind: lsp_types::NotebookCellKind::Code,
|
||||
document: create_test_url(1),
|
||||
metadata: None,
|
||||
execution_summary: None,
|
||||
},
|
||||
lsp_types::NotebookCell {
|
||||
kind: lsp_types::NotebookCellKind::Code,
|
||||
document: create_test_url(0),
|
||||
metadata: None,
|
||||
execution_summary: None,
|
||||
},
|
||||
]),
|
||||
},
|
||||
did_open: None,
|
||||
did_close: None,
|
||||
}),
|
||||
data: None,
|
||||
text_content: None,
|
||||
}),
|
||||
None,
|
||||
1,
|
||||
crate::PositionEncoding::default(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(
|
||||
notebook.make_ruff_notebook().source_code(),
|
||||
"cell = 1
|
||||
cell = 0
|
||||
cell = 2
|
||||
"
|
||||
);
|
||||
}
|
||||
}
|
172
crates/ty_server/src/document/range.rs
Normal file
172
crates/ty_server/src/document/range.rs
Normal file
|
@ -0,0 +1,172 @@
|
|||
use super::notebook;
|
||||
use super::PositionEncoding;
|
||||
use crate::system::file_to_url;
|
||||
|
||||
use lsp_types as types;
|
||||
use lsp_types::Location;
|
||||
|
||||
use ruff_db::files::FileRange;
|
||||
use ruff_db::source::{line_index, source_text};
|
||||
use ruff_notebook::NotebookIndex;
|
||||
use ruff_source_file::LineIndex;
|
||||
use ruff_source_file::{OneIndexed, SourceLocation};
|
||||
use ruff_text_size::{Ranged, TextRange, TextSize};
|
||||
use ty_python_semantic::Db;
|
||||
|
||||
#[expect(dead_code)]
|
||||
pub(crate) struct NotebookRange {
|
||||
pub(crate) cell: notebook::CellId,
|
||||
pub(crate) range: types::Range,
|
||||
}
|
||||
|
||||
pub(crate) trait RangeExt {
|
||||
fn to_text_range(&self, text: &str, index: &LineIndex, encoding: PositionEncoding)
|
||||
-> TextRange;
|
||||
}
|
||||
|
||||
pub(crate) trait PositionExt {
|
||||
fn to_text_size(&self, text: &str, index: &LineIndex, encoding: PositionEncoding) -> TextSize;
|
||||
}
|
||||
|
||||
pub(crate) trait TextSizeExt {
|
||||
fn to_position(
|
||||
self,
|
||||
text: &str,
|
||||
index: &LineIndex,
|
||||
encoding: PositionEncoding,
|
||||
) -> types::Position
|
||||
where
|
||||
Self: Sized;
|
||||
}
|
||||
|
||||
impl TextSizeExt for TextSize {
|
||||
fn to_position(
|
||||
self,
|
||||
text: &str,
|
||||
index: &LineIndex,
|
||||
encoding: PositionEncoding,
|
||||
) -> types::Position {
|
||||
let source_location = index.source_location(self, text, encoding.into());
|
||||
source_location_to_position(&source_location)
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) trait ToRangeExt {
|
||||
fn to_lsp_range(
|
||||
&self,
|
||||
text: &str,
|
||||
index: &LineIndex,
|
||||
encoding: PositionEncoding,
|
||||
) -> types::Range;
|
||||
|
||||
#[expect(dead_code)]
|
||||
fn to_notebook_range(
|
||||
&self,
|
||||
text: &str,
|
||||
source_index: &LineIndex,
|
||||
notebook_index: &NotebookIndex,
|
||||
encoding: PositionEncoding,
|
||||
) -> NotebookRange;
|
||||
}
|
||||
|
||||
fn u32_index_to_usize(index: u32) -> usize {
|
||||
usize::try_from(index).expect("u32 fits in usize")
|
||||
}
|
||||
|
||||
impl PositionExt for lsp_types::Position {
|
||||
fn to_text_size(&self, text: &str, index: &LineIndex, encoding: PositionEncoding) -> TextSize {
|
||||
index.offset(
|
||||
SourceLocation {
|
||||
line: OneIndexed::from_zero_indexed(u32_index_to_usize(self.line)),
|
||||
character_offset: OneIndexed::from_zero_indexed(u32_index_to_usize(self.character)),
|
||||
},
|
||||
text,
|
||||
encoding.into(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl RangeExt for lsp_types::Range {
|
||||
fn to_text_range(
|
||||
&self,
|
||||
text: &str,
|
||||
index: &LineIndex,
|
||||
encoding: PositionEncoding,
|
||||
) -> TextRange {
|
||||
TextRange::new(
|
||||
self.start.to_text_size(text, index, encoding),
|
||||
self.end.to_text_size(text, index, encoding),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl ToRangeExt for TextRange {
|
||||
fn to_lsp_range(
|
||||
&self,
|
||||
text: &str,
|
||||
index: &LineIndex,
|
||||
encoding: PositionEncoding,
|
||||
) -> types::Range {
|
||||
types::Range {
|
||||
start: self.start().to_position(text, index, encoding),
|
||||
end: self.end().to_position(text, index, encoding),
|
||||
}
|
||||
}
|
||||
|
||||
fn to_notebook_range(
|
||||
&self,
|
||||
text: &str,
|
||||
source_index: &LineIndex,
|
||||
notebook_index: &NotebookIndex,
|
||||
encoding: PositionEncoding,
|
||||
) -> NotebookRange {
|
||||
let start = source_index.source_location(self.start(), text, encoding.into());
|
||||
let mut end = source_index.source_location(self.end(), text, encoding.into());
|
||||
let starting_cell = notebook_index.cell(start.line);
|
||||
|
||||
// weird edge case here - if the end of the range is where the newline after the cell got added (making it 'out of bounds')
|
||||
// we need to move it one character back (which should place it at the end of the last line).
|
||||
// we test this by checking if the ending offset is in a different (or nonexistent) cell compared to the cell of the starting offset.
|
||||
if notebook_index.cell(end.line) != starting_cell {
|
||||
end.line = end.line.saturating_sub(1);
|
||||
let offset = self.end().checked_sub(1.into()).unwrap_or_default();
|
||||
end.character_offset = source_index
|
||||
.source_location(offset, text, encoding.into())
|
||||
.character_offset;
|
||||
}
|
||||
|
||||
let start = source_location_to_position(¬ebook_index.translate_source_location(&start));
|
||||
let end = source_location_to_position(¬ebook_index.translate_source_location(&end));
|
||||
|
||||
NotebookRange {
|
||||
cell: starting_cell
|
||||
.map(OneIndexed::to_zero_indexed)
|
||||
.unwrap_or_default(),
|
||||
range: types::Range { start, end },
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn source_location_to_position(location: &SourceLocation) -> types::Position {
|
||||
types::Position {
|
||||
line: u32::try_from(location.line.to_zero_indexed()).expect("line usize fits in u32"),
|
||||
character: u32::try_from(location.character_offset.to_zero_indexed())
|
||||
.expect("character usize fits in u32"),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) trait FileRangeExt {
|
||||
fn to_location(&self, db: &dyn Db, encoding: PositionEncoding) -> Option<Location>;
|
||||
}
|
||||
|
||||
impl FileRangeExt for FileRange {
|
||||
fn to_location(&self, db: &dyn Db, encoding: PositionEncoding) -> Option<Location> {
|
||||
let file = self.file();
|
||||
let uri = file_to_url(db, file)?;
|
||||
let source = source_text(db.upcast(), file);
|
||||
let line_index = line_index(db.upcast(), file);
|
||||
|
||||
let range = self.range().to_lsp_range(&source, &line_index, encoding);
|
||||
Some(Location { uri, range })
|
||||
}
|
||||
}
|
222
crates/ty_server/src/document/text_document.rs
Normal file
222
crates/ty_server/src/document/text_document.rs
Normal file
|
@ -0,0 +1,222 @@
|
|||
use lsp_types::TextDocumentContentChangeEvent;
|
||||
use ruff_source_file::LineIndex;
|
||||
|
||||
use crate::PositionEncoding;
|
||||
|
||||
use super::RangeExt;
|
||||
|
||||
pub(crate) type DocumentVersion = i32;
|
||||
|
||||
/// The state of an individual document in the server. Stays up-to-date
|
||||
/// with changes made by the user, including unsaved changes.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct TextDocument {
|
||||
/// The string contents of the document.
|
||||
contents: String,
|
||||
/// A computed line index for the document. This should always reflect
|
||||
/// the current version of `contents`. Using a function like [`Self::modify`]
|
||||
/// will re-calculate the line index automatically when the `contents` value is updated.
|
||||
index: LineIndex,
|
||||
/// The latest version of the document, set by the LSP client. The server will panic in
|
||||
/// debug mode if we attempt to update the document with an 'older' version.
|
||||
version: DocumentVersion,
|
||||
/// The language ID of the document as provided by the client.
|
||||
language_id: Option<LanguageId>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
||||
pub enum LanguageId {
|
||||
Python,
|
||||
Other,
|
||||
}
|
||||
|
||||
impl From<&str> for LanguageId {
|
||||
fn from(language_id: &str) -> Self {
|
||||
match language_id {
|
||||
"python" => Self::Python,
|
||||
_ => Self::Other,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl TextDocument {
|
||||
pub fn new(contents: String, version: DocumentVersion) -> Self {
|
||||
let index = LineIndex::from_source_text(&contents);
|
||||
Self {
|
||||
contents,
|
||||
index,
|
||||
version,
|
||||
language_id: None,
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn with_language_id(mut self, language_id: &str) -> Self {
|
||||
self.language_id = Some(LanguageId::from(language_id));
|
||||
self
|
||||
}
|
||||
|
||||
pub fn into_contents(self) -> String {
|
||||
self.contents
|
||||
}
|
||||
|
||||
pub fn contents(&self) -> &str {
|
||||
&self.contents
|
||||
}
|
||||
|
||||
pub fn index(&self) -> &LineIndex {
|
||||
&self.index
|
||||
}
|
||||
|
||||
pub fn version(&self) -> DocumentVersion {
|
||||
self.version
|
||||
}
|
||||
|
||||
pub fn language_id(&self) -> Option<LanguageId> {
|
||||
self.language_id
|
||||
}
|
||||
|
||||
pub fn apply_changes(
|
||||
&mut self,
|
||||
changes: Vec<lsp_types::TextDocumentContentChangeEvent>,
|
||||
new_version: DocumentVersion,
|
||||
encoding: PositionEncoding,
|
||||
) {
|
||||
if let [lsp_types::TextDocumentContentChangeEvent {
|
||||
range: None, text, ..
|
||||
}] = changes.as_slice()
|
||||
{
|
||||
tracing::debug!("Fast path - replacing entire document");
|
||||
self.modify(|contents, version| {
|
||||
contents.clone_from(text);
|
||||
*version = new_version;
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
let mut new_contents = self.contents().to_string();
|
||||
let mut active_index = self.index().clone();
|
||||
|
||||
for TextDocumentContentChangeEvent {
|
||||
range,
|
||||
text: change,
|
||||
..
|
||||
} in changes
|
||||
{
|
||||
if let Some(range) = range {
|
||||
let range = range.to_text_range(&new_contents, &active_index, encoding);
|
||||
|
||||
new_contents.replace_range(
|
||||
usize::from(range.start())..usize::from(range.end()),
|
||||
&change,
|
||||
);
|
||||
} else {
|
||||
new_contents = change;
|
||||
}
|
||||
|
||||
active_index = LineIndex::from_source_text(&new_contents);
|
||||
}
|
||||
|
||||
self.modify_with_manual_index(|contents, version, index| {
|
||||
*index = active_index;
|
||||
*contents = new_contents;
|
||||
*version = new_version;
|
||||
});
|
||||
}
|
||||
|
||||
pub fn update_version(&mut self, new_version: DocumentVersion) {
|
||||
self.modify_with_manual_index(|_, version, _| {
|
||||
*version = new_version;
|
||||
});
|
||||
}
|
||||
|
||||
// A private function for modifying the document's internal state
|
||||
fn modify(&mut self, func: impl FnOnce(&mut String, &mut DocumentVersion)) {
|
||||
self.modify_with_manual_index(|c, v, i| {
|
||||
func(c, v);
|
||||
*i = LineIndex::from_source_text(c);
|
||||
});
|
||||
}
|
||||
|
||||
// A private function for overriding how we update the line index by default.
|
||||
fn modify_with_manual_index(
|
||||
&mut self,
|
||||
func: impl FnOnce(&mut String, &mut DocumentVersion, &mut LineIndex),
|
||||
) {
|
||||
let old_version = self.version;
|
||||
func(&mut self.contents, &mut self.version, &mut self.index);
|
||||
debug_assert!(self.version >= old_version);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::{PositionEncoding, TextDocument};
|
||||
use lsp_types::{Position, TextDocumentContentChangeEvent};
|
||||
|
||||
#[test]
|
||||
fn redo_edit() {
|
||||
let mut document = TextDocument::new(
|
||||
r#""""
|
||||
测试comment
|
||||
一些测试内容
|
||||
"""
|
||||
import click
|
||||
|
||||
|
||||
@click.group()
|
||||
def interface():
|
||||
pas
|
||||
"#
|
||||
.to_string(),
|
||||
0,
|
||||
);
|
||||
|
||||
// Add an `s`, remove it again (back to the original code), and then re-add the `s`
|
||||
document.apply_changes(
|
||||
vec![
|
||||
TextDocumentContentChangeEvent {
|
||||
range: Some(lsp_types::Range::new(
|
||||
Position::new(9, 7),
|
||||
Position::new(9, 7),
|
||||
)),
|
||||
range_length: Some(0),
|
||||
text: "s".to_string(),
|
||||
},
|
||||
TextDocumentContentChangeEvent {
|
||||
range: Some(lsp_types::Range::new(
|
||||
Position::new(9, 7),
|
||||
Position::new(9, 8),
|
||||
)),
|
||||
range_length: Some(1),
|
||||
text: String::new(),
|
||||
},
|
||||
TextDocumentContentChangeEvent {
|
||||
range: Some(lsp_types::Range::new(
|
||||
Position::new(9, 7),
|
||||
Position::new(9, 7),
|
||||
)),
|
||||
range_length: Some(0),
|
||||
text: "s".to_string(),
|
||||
},
|
||||
],
|
||||
1,
|
||||
PositionEncoding::UTF16,
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
&document.contents,
|
||||
r#""""
|
||||
测试comment
|
||||
一些测试内容
|
||||
"""
|
||||
import click
|
||||
|
||||
|
||||
@click.group()
|
||||
def interface():
|
||||
pass
|
||||
"#
|
||||
);
|
||||
}
|
||||
}
|
40
crates/ty_server/src/lib.rs
Normal file
40
crates/ty_server/src/lib.rs
Normal file
|
@ -0,0 +1,40 @@
|
|||
use crate::server::Server;
|
||||
use anyhow::Context;
|
||||
pub use document::{DocumentKey, NotebookDocument, PositionEncoding, TextDocument};
|
||||
pub use session::{ClientSettings, DocumentQuery, DocumentSnapshot, Session};
|
||||
use std::num::NonZeroUsize;
|
||||
|
||||
#[macro_use]
|
||||
mod message;
|
||||
|
||||
mod document;
|
||||
mod logging;
|
||||
mod server;
|
||||
mod session;
|
||||
mod system;
|
||||
|
||||
pub(crate) const SERVER_NAME: &str = "ty";
|
||||
pub(crate) const DIAGNOSTIC_NAME: &str = "ty";
|
||||
|
||||
/// A common result type used in most cases where a
|
||||
/// result type is needed.
|
||||
pub(crate) type Result<T> = anyhow::Result<T>;
|
||||
|
||||
pub(crate) fn version() -> &'static str {
|
||||
env!("CARGO_PKG_VERSION")
|
||||
}
|
||||
|
||||
pub fn run_server() -> anyhow::Result<()> {
|
||||
let four = NonZeroUsize::new(4).unwrap();
|
||||
|
||||
// by default, we set the number of worker threads to `num_cpus`, with a maximum of 4.
|
||||
let worker_threads = std::thread::available_parallelism()
|
||||
.unwrap_or(four)
|
||||
.min(four);
|
||||
|
||||
Server::new(worker_threads)
|
||||
.context("Failed to start server")?
|
||||
.run()?;
|
||||
|
||||
Ok(())
|
||||
}
|
114
crates/ty_server/src/logging.rs
Normal file
114
crates/ty_server/src/logging.rs
Normal file
|
@ -0,0 +1,114 @@
|
|||
//! The logging system for `ty server`.
|
||||
//!
|
||||
//! Log messages are controlled by the `logLevel` setting which defaults to `"info"`. Log messages
|
||||
//! are written to `stderr` by default, which should appear in the logs for most LSP clients. A
|
||||
//! `logFile` path can also be specified in the settings, and output will be directed there
|
||||
//! instead.
|
||||
use core::str;
|
||||
use serde::Deserialize;
|
||||
use std::{path::PathBuf, str::FromStr, sync::Arc};
|
||||
use tracing::level_filters::LevelFilter;
|
||||
use tracing_subscriber::{
|
||||
fmt::{time::Uptime, writer::BoxMakeWriter},
|
||||
layer::SubscriberExt,
|
||||
Layer,
|
||||
};
|
||||
|
||||
pub(crate) fn init_logging(log_level: LogLevel, log_file: Option<&std::path::Path>) {
|
||||
let log_file = log_file
|
||||
.map(|path| {
|
||||
// this expands `logFile` so that tildes and environment variables
|
||||
// are replaced with their values, if possible.
|
||||
if let Some(expanded) = shellexpand::full(&path.to_string_lossy())
|
||||
.ok()
|
||||
.and_then(|path| PathBuf::from_str(&path).ok())
|
||||
{
|
||||
expanded
|
||||
} else {
|
||||
path.to_path_buf()
|
||||
}
|
||||
})
|
||||
.and_then(|path| {
|
||||
std::fs::OpenOptions::new()
|
||||
.create(true)
|
||||
.append(true)
|
||||
.open(&path)
|
||||
.map_err(|err| {
|
||||
#[allow(clippy::print_stderr)]
|
||||
{
|
||||
eprintln!(
|
||||
"Failed to open file at {} for logging: {err}",
|
||||
path.display()
|
||||
);
|
||||
}
|
||||
})
|
||||
.ok()
|
||||
});
|
||||
|
||||
let logger = match log_file {
|
||||
Some(file) => BoxMakeWriter::new(Arc::new(file)),
|
||||
None => BoxMakeWriter::new(std::io::stderr),
|
||||
};
|
||||
let subscriber = tracing_subscriber::Registry::default().with(
|
||||
tracing_subscriber::fmt::layer()
|
||||
.with_timer(Uptime::default())
|
||||
.with_thread_names(true)
|
||||
.with_ansi(false)
|
||||
.with_writer(logger)
|
||||
.with_filter(LogLevelFilter { filter: log_level }),
|
||||
);
|
||||
|
||||
tracing::subscriber::set_global_default(subscriber)
|
||||
.expect("should be able to set global default subscriber");
|
||||
}
|
||||
|
||||
/// The log level for the server as provided by the client during initialization.
|
||||
///
|
||||
/// The default log level is `info`.
|
||||
#[derive(Clone, Copy, Debug, Deserialize, Default, PartialEq, Eq, PartialOrd, Ord)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub(crate) enum LogLevel {
|
||||
Error,
|
||||
Warn,
|
||||
#[default]
|
||||
Info,
|
||||
Debug,
|
||||
Trace,
|
||||
}
|
||||
|
||||
impl LogLevel {
|
||||
fn trace_level(self) -> tracing::Level {
|
||||
match self {
|
||||
Self::Error => tracing::Level::ERROR,
|
||||
Self::Warn => tracing::Level::WARN,
|
||||
Self::Info => tracing::Level::INFO,
|
||||
Self::Debug => tracing::Level::DEBUG,
|
||||
Self::Trace => tracing::Level::TRACE,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Filters out traces which have a log level lower than the `logLevel` set by the client.
|
||||
struct LogLevelFilter {
|
||||
filter: LogLevel,
|
||||
}
|
||||
|
||||
impl<S> tracing_subscriber::layer::Filter<S> for LogLevelFilter {
|
||||
fn enabled(
|
||||
&self,
|
||||
meta: &tracing::Metadata<'_>,
|
||||
_: &tracing_subscriber::layer::Context<'_, S>,
|
||||
) -> bool {
|
||||
let filter = if meta.target().starts_with("ty") {
|
||||
self.filter.trace_level()
|
||||
} else {
|
||||
tracing::Level::WARN
|
||||
};
|
||||
|
||||
meta.level() <= &filter
|
||||
}
|
||||
|
||||
fn max_level_hint(&self) -> Option<tracing::level_filters::LevelFilter> {
|
||||
Some(LevelFilter::from_level(self.filter.trace_level()))
|
||||
}
|
||||
}
|
46
crates/ty_server/src/message.rs
Normal file
46
crates/ty_server/src/message.rs
Normal file
|
@ -0,0 +1,46 @@
|
|||
use anyhow::Context;
|
||||
use lsp_types::notification::Notification;
|
||||
use std::sync::OnceLock;
|
||||
|
||||
use crate::server::ClientSender;
|
||||
|
||||
static MESSENGER: OnceLock<ClientSender> = OnceLock::new();
|
||||
|
||||
pub(crate) fn init_messenger(client_sender: ClientSender) {
|
||||
MESSENGER
|
||||
.set(client_sender)
|
||||
.expect("messenger should only be initialized once");
|
||||
}
|
||||
|
||||
pub(crate) fn show_message(message: String, message_type: lsp_types::MessageType) {
|
||||
try_show_message(message, message_type).unwrap();
|
||||
}
|
||||
|
||||
pub(super) fn try_show_message(
|
||||
message: String,
|
||||
message_type: lsp_types::MessageType,
|
||||
) -> crate::Result<()> {
|
||||
MESSENGER
|
||||
.get()
|
||||
.ok_or_else(|| anyhow::anyhow!("messenger not initialized"))?
|
||||
.send(lsp_server::Message::Notification(
|
||||
lsp_server::Notification {
|
||||
method: lsp_types::notification::ShowMessage::METHOD.into(),
|
||||
params: serde_json::to_value(lsp_types::ShowMessageParams {
|
||||
typ: message_type,
|
||||
message,
|
||||
})?,
|
||||
},
|
||||
))
|
||||
.context("Failed to send message")?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Sends an error to the client with a formatted message. The error is sent in a
|
||||
/// `window/showMessage` notification.
|
||||
macro_rules! show_err_msg {
|
||||
($msg:expr$(, $($arg:tt)*)?) => {
|
||||
crate::message::show_message(::core::format_args!($msg$(, $($arg)*)?).to_string(), lsp_types::MessageType::ERROR)
|
||||
};
|
||||
}
|
236
crates/ty_server/src/server.rs
Normal file
236
crates/ty_server/src/server.rs
Normal file
|
@ -0,0 +1,236 @@
|
|||
//! Scheduling, I/O, and API endpoints.
|
||||
|
||||
use std::num::NonZeroUsize;
|
||||
// The new PanicInfoHook name requires MSRV >= 1.82
|
||||
#[allow(deprecated)]
|
||||
use std::panic::PanicInfo;
|
||||
|
||||
use lsp_server::Message;
|
||||
use lsp_types::{
|
||||
ClientCapabilities, DiagnosticOptions, DiagnosticServerCapabilities, HoverProviderCapability,
|
||||
InlayHintOptions, InlayHintServerCapabilities, MessageType, ServerCapabilities,
|
||||
TextDocumentSyncCapability, TextDocumentSyncKind, TextDocumentSyncOptions,
|
||||
TypeDefinitionProviderCapability, Url,
|
||||
};
|
||||
|
||||
use self::connection::{Connection, ConnectionInitializer};
|
||||
use self::schedule::event_loop_thread;
|
||||
use crate::session::{AllSettings, ClientSettings, Session};
|
||||
use crate::PositionEncoding;
|
||||
|
||||
mod api;
|
||||
mod client;
|
||||
mod connection;
|
||||
mod schedule;
|
||||
|
||||
use crate::message::try_show_message;
|
||||
pub(crate) use connection::ClientSender;
|
||||
|
||||
pub(crate) type Result<T> = std::result::Result<T, api::Error>;
|
||||
|
||||
pub(crate) struct Server {
|
||||
connection: Connection,
|
||||
client_capabilities: ClientCapabilities,
|
||||
worker_threads: NonZeroUsize,
|
||||
session: Session,
|
||||
}
|
||||
|
||||
impl Server {
|
||||
pub(crate) fn new(worker_threads: NonZeroUsize) -> crate::Result<Self> {
|
||||
let connection = ConnectionInitializer::stdio();
|
||||
|
||||
let (id, init_params) = connection.initialize_start()?;
|
||||
|
||||
let client_capabilities = init_params.capabilities;
|
||||
let position_encoding = Self::find_best_position_encoding(&client_capabilities);
|
||||
let server_capabilities = Self::server_capabilities(position_encoding);
|
||||
|
||||
let connection = connection.initialize_finish(
|
||||
id,
|
||||
&server_capabilities,
|
||||
crate::SERVER_NAME,
|
||||
crate::version(),
|
||||
)?;
|
||||
|
||||
crate::message::init_messenger(connection.make_sender());
|
||||
|
||||
let AllSettings {
|
||||
global_settings,
|
||||
mut workspace_settings,
|
||||
} = AllSettings::from_value(
|
||||
init_params
|
||||
.initialization_options
|
||||
.unwrap_or_else(|| serde_json::Value::Object(serde_json::Map::default())),
|
||||
);
|
||||
|
||||
crate::logging::init_logging(
|
||||
global_settings.tracing.log_level.unwrap_or_default(),
|
||||
global_settings.tracing.log_file.as_deref(),
|
||||
);
|
||||
|
||||
let mut workspace_for_url = |url: Url| {
|
||||
let Some(workspace_settings) = workspace_settings.as_mut() else {
|
||||
return (url, ClientSettings::default());
|
||||
};
|
||||
let settings = workspace_settings.remove(&url).unwrap_or_else(|| {
|
||||
tracing::warn!("No workspace settings found for {}", url);
|
||||
ClientSettings::default()
|
||||
});
|
||||
(url, settings)
|
||||
};
|
||||
|
||||
let workspaces = init_params
|
||||
.workspace_folders
|
||||
.filter(|folders| !folders.is_empty())
|
||||
.map(|folders| folders.into_iter().map(|folder| {
|
||||
workspace_for_url(folder.uri)
|
||||
}).collect())
|
||||
.or_else(|| {
|
||||
tracing::warn!("No workspace(s) were provided during initialization. Using the current working directory as a default workspace...");
|
||||
let uri = Url::from_file_path(std::env::current_dir().ok()?).ok()?;
|
||||
Some(vec![workspace_for_url(uri)])
|
||||
})
|
||||
.ok_or_else(|| {
|
||||
anyhow::anyhow!("Failed to get the current working directory while creating a default workspace.")
|
||||
})?;
|
||||
|
||||
if workspaces.len() > 1 {
|
||||
// TODO(dhruvmanila): Support multi-root workspaces
|
||||
anyhow::bail!("Multi-root workspaces are not supported yet");
|
||||
}
|
||||
|
||||
Ok(Self {
|
||||
connection,
|
||||
worker_threads,
|
||||
session: Session::new(
|
||||
&client_capabilities,
|
||||
position_encoding,
|
||||
global_settings,
|
||||
&workspaces,
|
||||
)?,
|
||||
client_capabilities,
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) fn run(self) -> crate::Result<()> {
|
||||
// The new PanicInfoHook name requires MSRV >= 1.82
|
||||
#[allow(deprecated)]
|
||||
type PanicHook = Box<dyn Fn(&PanicInfo<'_>) + 'static + Sync + Send>;
|
||||
struct RestorePanicHook {
|
||||
hook: Option<PanicHook>,
|
||||
}
|
||||
|
||||
impl Drop for RestorePanicHook {
|
||||
fn drop(&mut self) {
|
||||
if let Some(hook) = self.hook.take() {
|
||||
std::panic::set_hook(hook);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// unregister any previously registered panic hook
|
||||
// The hook will be restored when this function exits.
|
||||
let _ = RestorePanicHook {
|
||||
hook: Some(std::panic::take_hook()),
|
||||
};
|
||||
|
||||
// When we panic, try to notify the client.
|
||||
std::panic::set_hook(Box::new(move |panic_info| {
|
||||
use std::io::Write;
|
||||
|
||||
let backtrace = std::backtrace::Backtrace::force_capture();
|
||||
tracing::error!("{panic_info}\n{backtrace}");
|
||||
|
||||
// we also need to print to stderr directly for when using `$logTrace` because
|
||||
// the message won't be sent to the client.
|
||||
// But don't use `eprintln` because `eprintln` itself may panic if the pipe is broken.
|
||||
let mut stderr = std::io::stderr().lock();
|
||||
writeln!(stderr, "{panic_info}\n{backtrace}").ok();
|
||||
|
||||
try_show_message(
|
||||
"The Ruff language server exited with a panic. See the logs for more details."
|
||||
.to_string(),
|
||||
MessageType::ERROR,
|
||||
)
|
||||
.ok();
|
||||
}));
|
||||
|
||||
event_loop_thread(move || {
|
||||
Self::event_loop(
|
||||
&self.connection,
|
||||
&self.client_capabilities,
|
||||
self.session,
|
||||
self.worker_threads,
|
||||
)?;
|
||||
self.connection.close()?;
|
||||
Ok(())
|
||||
})?
|
||||
.join()
|
||||
}
|
||||
|
||||
#[allow(clippy::needless_pass_by_value)] // this is because we aren't using `next_request_id` yet.
|
||||
fn event_loop(
|
||||
connection: &Connection,
|
||||
_client_capabilities: &ClientCapabilities,
|
||||
mut session: Session,
|
||||
worker_threads: NonZeroUsize,
|
||||
) -> crate::Result<()> {
|
||||
let mut scheduler =
|
||||
schedule::Scheduler::new(&mut session, worker_threads, connection.make_sender());
|
||||
|
||||
for msg in connection.incoming() {
|
||||
if connection.handle_shutdown(&msg)? {
|
||||
break;
|
||||
}
|
||||
let task = match msg {
|
||||
Message::Request(req) => api::request(req),
|
||||
Message::Notification(notification) => api::notification(notification),
|
||||
Message::Response(response) => scheduler.response(response),
|
||||
};
|
||||
scheduler.dispatch(task);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn find_best_position_encoding(client_capabilities: &ClientCapabilities) -> PositionEncoding {
|
||||
client_capabilities
|
||||
.general
|
||||
.as_ref()
|
||||
.and_then(|general_capabilities| general_capabilities.position_encodings.as_ref())
|
||||
.and_then(|encodings| {
|
||||
encodings
|
||||
.iter()
|
||||
.filter_map(|encoding| PositionEncoding::try_from(encoding).ok())
|
||||
.max() // this selects the highest priority position encoding
|
||||
})
|
||||
.unwrap_or_default()
|
||||
}
|
||||
|
||||
fn server_capabilities(position_encoding: PositionEncoding) -> ServerCapabilities {
|
||||
ServerCapabilities {
|
||||
position_encoding: Some(position_encoding.into()),
|
||||
diagnostic_provider: Some(DiagnosticServerCapabilities::Options(DiagnosticOptions {
|
||||
identifier: Some(crate::DIAGNOSTIC_NAME.into()),
|
||||
inter_file_dependencies: true,
|
||||
..Default::default()
|
||||
})),
|
||||
text_document_sync: Some(TextDocumentSyncCapability::Options(
|
||||
TextDocumentSyncOptions {
|
||||
open_close: Some(true),
|
||||
change: Some(TextDocumentSyncKind::INCREMENTAL),
|
||||
..Default::default()
|
||||
},
|
||||
)),
|
||||
type_definition_provider: Some(TypeDefinitionProviderCapability::Simple(true)),
|
||||
hover_provider: Some(HoverProviderCapability::Simple(true)),
|
||||
inlay_hint_provider: Some(lsp_types::OneOf::Right(
|
||||
InlayHintServerCapabilities::Options(InlayHintOptions::default()),
|
||||
)),
|
||||
completion_provider: Some(lsp_types::CompletionOptions {
|
||||
..Default::default()
|
||||
}),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
}
|
274
crates/ty_server/src/server/api.rs
Normal file
274
crates/ty_server/src/server/api.rs
Normal file
|
@ -0,0 +1,274 @@
|
|||
use crate::server::schedule::Task;
|
||||
use crate::session::Session;
|
||||
use crate::system::{url_to_any_system_path, AnySystemPath};
|
||||
use lsp_server as server;
|
||||
use lsp_types::notification::Notification;
|
||||
|
||||
mod diagnostics;
|
||||
mod notifications;
|
||||
mod requests;
|
||||
mod traits;
|
||||
|
||||
use notifications as notification;
|
||||
use requests as request;
|
||||
|
||||
use self::traits::{NotificationHandler, RequestHandler};
|
||||
|
||||
use super::{client::Responder, schedule::BackgroundSchedule, Result};
|
||||
|
||||
pub(super) fn request<'a>(req: server::Request) -> Task<'a> {
|
||||
let id = req.id.clone();
|
||||
|
||||
match req.method.as_str() {
|
||||
request::DocumentDiagnosticRequestHandler::METHOD => background_request_task::<
|
||||
request::DocumentDiagnosticRequestHandler,
|
||||
>(
|
||||
req, BackgroundSchedule::Worker
|
||||
),
|
||||
request::GotoTypeDefinitionRequestHandler::METHOD => background_request_task::<
|
||||
request::GotoTypeDefinitionRequestHandler,
|
||||
>(
|
||||
req, BackgroundSchedule::Worker
|
||||
),
|
||||
request::HoverRequestHandler::METHOD => {
|
||||
background_request_task::<request::HoverRequestHandler>(req, BackgroundSchedule::Worker)
|
||||
}
|
||||
request::InlayHintRequestHandler::METHOD => background_request_task::<
|
||||
request::InlayHintRequestHandler,
|
||||
>(req, BackgroundSchedule::Worker),
|
||||
request::CompletionRequestHandler::METHOD => background_request_task::<
|
||||
request::CompletionRequestHandler,
|
||||
>(
|
||||
req, BackgroundSchedule::LatencySensitive
|
||||
),
|
||||
|
||||
method => {
|
||||
tracing::warn!("Received request {method} which does not have a handler");
|
||||
return Task::nothing();
|
||||
}
|
||||
}
|
||||
.unwrap_or_else(|err| {
|
||||
tracing::error!("Encountered error when routing request with ID {id}: {err}");
|
||||
show_err_msg!(
|
||||
"Ruff failed to handle a request from the editor. Check the logs for more details."
|
||||
);
|
||||
let result: Result<()> = Err(err);
|
||||
Task::immediate(id, result)
|
||||
})
|
||||
}
|
||||
|
||||
pub(super) fn notification<'a>(notif: server::Notification) -> Task<'a> {
|
||||
match notif.method.as_str() {
|
||||
notification::DidCloseTextDocumentHandler::METHOD => local_notification_task::<notification::DidCloseTextDocumentHandler>(notif),
|
||||
notification::DidOpenTextDocumentHandler::METHOD => local_notification_task::<notification::DidOpenTextDocumentHandler>(notif),
|
||||
notification::DidChangeTextDocumentHandler::METHOD => local_notification_task::<notification::DidChangeTextDocumentHandler>(notif),
|
||||
notification::DidOpenNotebookHandler::METHOD => {
|
||||
local_notification_task::<notification::DidOpenNotebookHandler>(notif)
|
||||
}
|
||||
notification::DidCloseNotebookHandler::METHOD => {
|
||||
local_notification_task::<notification::DidCloseNotebookHandler>(notif)
|
||||
}
|
||||
lsp_types::notification::SetTrace::METHOD => {
|
||||
tracing::trace!("Ignoring `setTrace` notification");
|
||||
return Task::nothing();
|
||||
},
|
||||
|
||||
method => {
|
||||
tracing::warn!("Received notification {method} which does not have a handler.");
|
||||
return Task::nothing();
|
||||
}
|
||||
}
|
||||
.unwrap_or_else(|err| {
|
||||
tracing::error!("Encountered error when routing notification: {err}");
|
||||
show_err_msg!("Ruff failed to handle a notification from the editor. Check the logs for more details.");
|
||||
Task::nothing()
|
||||
})
|
||||
}
|
||||
|
||||
fn _local_request_task<'a, R: traits::SyncRequestHandler>(
|
||||
req: server::Request,
|
||||
) -> super::Result<Task<'a>> {
|
||||
let (id, params) = cast_request::<R>(req)?;
|
||||
Ok(Task::local(|session, notifier, requester, responder| {
|
||||
let _span = tracing::trace_span!("request", %id, method = R::METHOD).entered();
|
||||
let result = R::run(session, notifier, requester, params);
|
||||
respond::<R>(id, result, &responder);
|
||||
}))
|
||||
}
|
||||
|
||||
// TODO(micha): Calls to `db` could panic if the db gets mutated while this task is running.
|
||||
// We should either wrap `R::run_with_snapshot` with a salsa catch cancellation handler or
|
||||
// use `SemanticModel` instead of passing `db` which uses a Result for all it's methods
|
||||
// that propagate cancellations.
|
||||
fn background_request_task<'a, R: traits::BackgroundDocumentRequestHandler>(
|
||||
req: server::Request,
|
||||
schedule: BackgroundSchedule,
|
||||
) -> super::Result<Task<'a>> {
|
||||
let (id, params) = cast_request::<R>(req)?;
|
||||
Ok(Task::background(schedule, move |session: &Session| {
|
||||
let url = R::document_url(¶ms).into_owned();
|
||||
|
||||
let Ok(path) = url_to_any_system_path(&url) else {
|
||||
return Box::new(|_, _| {});
|
||||
};
|
||||
let db = match path {
|
||||
AnySystemPath::System(path) => match session.project_db_for_path(path.as_std_path()) {
|
||||
Some(db) => db.clone(),
|
||||
None => session.default_project_db().clone(),
|
||||
},
|
||||
AnySystemPath::SystemVirtual(_) => session.default_project_db().clone(),
|
||||
};
|
||||
|
||||
let Some(snapshot) = session.take_snapshot(url) else {
|
||||
return Box::new(|_, _| {});
|
||||
};
|
||||
|
||||
Box::new(move |notifier, responder| {
|
||||
let _span = tracing::trace_span!("request", %id, method = R::METHOD).entered();
|
||||
let result = R::run_with_snapshot(snapshot, db, notifier, params);
|
||||
respond::<R>(id, result, &responder);
|
||||
})
|
||||
}))
|
||||
}
|
||||
|
||||
fn local_notification_task<'a, N: traits::SyncNotificationHandler>(
|
||||
notif: server::Notification,
|
||||
) -> super::Result<Task<'a>> {
|
||||
let (id, params) = cast_notification::<N>(notif)?;
|
||||
Ok(Task::local(move |session, notifier, requester, _| {
|
||||
let _span = tracing::trace_span!("notification", method = N::METHOD).entered();
|
||||
if let Err(err) = N::run(session, notifier, requester, params) {
|
||||
tracing::error!("An error occurred while running {id}: {err}");
|
||||
show_err_msg!("Ruff encountered a problem. Check the logs for more details.");
|
||||
}
|
||||
}))
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
fn background_notification_thread<'a, N: traits::BackgroundDocumentNotificationHandler>(
|
||||
req: server::Notification,
|
||||
schedule: BackgroundSchedule,
|
||||
) -> super::Result<Task<'a>> {
|
||||
let (id, params) = cast_notification::<N>(req)?;
|
||||
Ok(Task::background(schedule, move |session: &Session| {
|
||||
// TODO(jane): we should log an error if we can't take a snapshot.
|
||||
let Some(snapshot) = session.take_snapshot(N::document_url(¶ms).into_owned()) else {
|
||||
return Box::new(|_, _| {});
|
||||
};
|
||||
Box::new(move |notifier, _| {
|
||||
let _span = tracing::trace_span!("notification", method = N::METHOD).entered();
|
||||
if let Err(err) = N::run_with_snapshot(snapshot, notifier, params) {
|
||||
tracing::error!("An error occurred while running {id}: {err}");
|
||||
show_err_msg!("Ruff encountered a problem. Check the logs for more details.");
|
||||
}
|
||||
})
|
||||
}))
|
||||
}
|
||||
|
||||
/// Tries to cast a serialized request from the server into
|
||||
/// a parameter type for a specific request handler.
|
||||
/// It is *highly* recommended to not override this function in your
|
||||
/// implementation.
|
||||
fn cast_request<Req>(
|
||||
request: server::Request,
|
||||
) -> super::Result<(
|
||||
server::RequestId,
|
||||
<<Req as RequestHandler>::RequestType as lsp_types::request::Request>::Params,
|
||||
)>
|
||||
where
|
||||
Req: traits::RequestHandler,
|
||||
{
|
||||
request
|
||||
.extract(Req::METHOD)
|
||||
.map_err(|err| match err {
|
||||
json_err @ server::ExtractError::JsonError { .. } => {
|
||||
anyhow::anyhow!("JSON parsing failure:\n{json_err}")
|
||||
}
|
||||
server::ExtractError::MethodMismatch(_) => {
|
||||
unreachable!("A method mismatch should not be possible here unless you've used a different handler (`Req`) \
|
||||
than the one whose method name was matched against earlier.")
|
||||
}
|
||||
})
|
||||
.with_failure_code(server::ErrorCode::InternalError)
|
||||
}
|
||||
|
||||
/// Sends back a response to the server using a [`Responder`].
|
||||
fn respond<Req>(
|
||||
id: server::RequestId,
|
||||
result: crate::server::Result<
|
||||
<<Req as traits::RequestHandler>::RequestType as lsp_types::request::Request>::Result,
|
||||
>,
|
||||
responder: &Responder,
|
||||
) where
|
||||
Req: traits::RequestHandler,
|
||||
{
|
||||
if let Err(err) = &result {
|
||||
tracing::error!("An error occurred with request ID {id}: {err}");
|
||||
show_err_msg!("Ruff encountered a problem. Check the logs for more details.");
|
||||
}
|
||||
if let Err(err) = responder.respond(id, result) {
|
||||
tracing::error!("Failed to send response: {err}");
|
||||
}
|
||||
}
|
||||
|
||||
/// Tries to cast a serialized request from the server into
|
||||
/// a parameter type for a specific request handler.
|
||||
fn cast_notification<N>(
|
||||
notification: server::Notification,
|
||||
) -> super::Result<
|
||||
(
|
||||
&'static str,
|
||||
<<N as traits::NotificationHandler>::NotificationType as lsp_types::notification::Notification>::Params,
|
||||
)> where N: traits::NotificationHandler{
|
||||
Ok((
|
||||
N::METHOD,
|
||||
notification
|
||||
.extract(N::METHOD)
|
||||
.map_err(|err| match err {
|
||||
json_err @ server::ExtractError::JsonError { .. } => {
|
||||
anyhow::anyhow!("JSON parsing failure:\n{json_err}")
|
||||
}
|
||||
server::ExtractError::MethodMismatch(_) => {
|
||||
unreachable!("A method mismatch should not be possible here unless you've used a different handler (`N`) \
|
||||
than the one whose method name was matched against earlier.")
|
||||
}
|
||||
})
|
||||
.with_failure_code(server::ErrorCode::InternalError)?,
|
||||
))
|
||||
}
|
||||
|
||||
pub(crate) struct Error {
|
||||
pub(crate) code: server::ErrorCode,
|
||||
pub(crate) error: anyhow::Error,
|
||||
}
|
||||
|
||||
/// A trait to convert result types into the server result type, [`super::Result`].
|
||||
trait LSPResult<T> {
|
||||
fn with_failure_code(self, code: server::ErrorCode) -> super::Result<T>;
|
||||
}
|
||||
|
||||
impl<T, E: Into<anyhow::Error>> LSPResult<T> for core::result::Result<T, E> {
|
||||
fn with_failure_code(self, code: server::ErrorCode) -> super::Result<T> {
|
||||
self.map_err(|err| Error::new(err.into(), code))
|
||||
}
|
||||
}
|
||||
|
||||
impl Error {
|
||||
pub(crate) fn new(err: anyhow::Error, code: server::ErrorCode) -> Self {
|
||||
Self { code, error: err }
|
||||
}
|
||||
}
|
||||
|
||||
// Right now, we treat the error code as invisible data that won't
|
||||
// be printed.
|
||||
impl std::fmt::Debug for Error {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
self.error.fmt(f)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for Error {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
self.error.fmt(f)
|
||||
}
|
||||
}
|
18
crates/ty_server/src/server/api/diagnostics.rs
Normal file
18
crates/ty_server/src/server/api/diagnostics.rs
Normal file
|
@ -0,0 +1,18 @@
|
|||
use lsp_server::ErrorCode;
|
||||
use lsp_types::{notification::PublishDiagnostics, PublishDiagnosticsParams, Url};
|
||||
|
||||
use crate::server::client::Notifier;
|
||||
use crate::server::Result;
|
||||
|
||||
use super::LSPResult;
|
||||
|
||||
pub(super) fn clear_diagnostics(uri: &Url, notifier: &Notifier) -> Result<()> {
|
||||
notifier
|
||||
.notify::<PublishDiagnostics>(PublishDiagnosticsParams {
|
||||
uri: uri.clone(),
|
||||
diagnostics: vec![],
|
||||
version: None,
|
||||
})
|
||||
.with_failure_code(ErrorCode::InternalError)?;
|
||||
Ok(())
|
||||
}
|
11
crates/ty_server/src/server/api/notifications.rs
Normal file
11
crates/ty_server/src/server/api/notifications.rs
Normal file
|
@ -0,0 +1,11 @@
|
|||
mod did_change;
|
||||
mod did_close;
|
||||
mod did_close_notebook;
|
||||
mod did_open;
|
||||
mod did_open_notebook;
|
||||
|
||||
pub(super) use did_change::DidChangeTextDocumentHandler;
|
||||
pub(super) use did_close::DidCloseTextDocumentHandler;
|
||||
pub(super) use did_close_notebook::DidCloseNotebookHandler;
|
||||
pub(super) use did_open::DidOpenTextDocumentHandler;
|
||||
pub(super) use did_open_notebook::DidOpenNotebookHandler;
|
55
crates/ty_server/src/server/api/notifications/did_change.rs
Normal file
55
crates/ty_server/src/server/api/notifications/did_change.rs
Normal file
|
@ -0,0 +1,55 @@
|
|||
use lsp_server::ErrorCode;
|
||||
use lsp_types::notification::DidChangeTextDocument;
|
||||
use lsp_types::DidChangeTextDocumentParams;
|
||||
|
||||
use ty_project::watch::ChangeEvent;
|
||||
|
||||
use crate::server::api::traits::{NotificationHandler, SyncNotificationHandler};
|
||||
use crate::server::api::LSPResult;
|
||||
use crate::server::client::{Notifier, Requester};
|
||||
use crate::server::Result;
|
||||
use crate::session::Session;
|
||||
use crate::system::{url_to_any_system_path, AnySystemPath};
|
||||
|
||||
pub(crate) struct DidChangeTextDocumentHandler;
|
||||
|
||||
impl NotificationHandler for DidChangeTextDocumentHandler {
|
||||
type NotificationType = DidChangeTextDocument;
|
||||
}
|
||||
|
||||
impl SyncNotificationHandler for DidChangeTextDocumentHandler {
|
||||
fn run(
|
||||
session: &mut Session,
|
||||
_notifier: Notifier,
|
||||
_requester: &mut Requester,
|
||||
params: DidChangeTextDocumentParams,
|
||||
) -> Result<()> {
|
||||
let Ok(path) = url_to_any_system_path(¶ms.text_document.uri) else {
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
let key = session.key_from_url(params.text_document.uri);
|
||||
|
||||
session
|
||||
.update_text_document(&key, params.content_changes, params.text_document.version)
|
||||
.with_failure_code(ErrorCode::InternalError)?;
|
||||
|
||||
match path {
|
||||
AnySystemPath::System(path) => {
|
||||
let db = match session.project_db_for_path_mut(path.as_std_path()) {
|
||||
Some(db) => db,
|
||||
None => session.default_project_db_mut(),
|
||||
};
|
||||
db.apply_changes(vec![ChangeEvent::file_content_changed(path)], None);
|
||||
}
|
||||
AnySystemPath::SystemVirtual(virtual_path) => {
|
||||
let db = session.default_project_db_mut();
|
||||
db.apply_changes(vec![ChangeEvent::ChangedVirtual(virtual_path)], None);
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(dhruvmanila): Publish diagnostics if the client doesn't support pull diagnostics
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
45
crates/ty_server/src/server/api/notifications/did_close.rs
Normal file
45
crates/ty_server/src/server/api/notifications/did_close.rs
Normal file
|
@ -0,0 +1,45 @@
|
|||
use lsp_server::ErrorCode;
|
||||
use lsp_types::notification::DidCloseTextDocument;
|
||||
use lsp_types::DidCloseTextDocumentParams;
|
||||
use ty_project::watch::ChangeEvent;
|
||||
|
||||
use crate::server::api::diagnostics::clear_diagnostics;
|
||||
use crate::server::api::traits::{NotificationHandler, SyncNotificationHandler};
|
||||
use crate::server::api::LSPResult;
|
||||
use crate::server::client::{Notifier, Requester};
|
||||
use crate::server::Result;
|
||||
use crate::session::Session;
|
||||
use crate::system::{url_to_any_system_path, AnySystemPath};
|
||||
|
||||
pub(crate) struct DidCloseTextDocumentHandler;
|
||||
|
||||
impl NotificationHandler for DidCloseTextDocumentHandler {
|
||||
type NotificationType = DidCloseTextDocument;
|
||||
}
|
||||
|
||||
impl SyncNotificationHandler for DidCloseTextDocumentHandler {
|
||||
fn run(
|
||||
session: &mut Session,
|
||||
notifier: Notifier,
|
||||
_requester: &mut Requester,
|
||||
params: DidCloseTextDocumentParams,
|
||||
) -> Result<()> {
|
||||
let Ok(path) = url_to_any_system_path(¶ms.text_document.uri) else {
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
let key = session.key_from_url(params.text_document.uri);
|
||||
session
|
||||
.close_document(&key)
|
||||
.with_failure_code(ErrorCode::InternalError)?;
|
||||
|
||||
if let AnySystemPath::SystemVirtual(virtual_path) = path {
|
||||
let db = session.default_project_db_mut();
|
||||
db.apply_changes(vec![ChangeEvent::DeletedVirtual(virtual_path)], None);
|
||||
}
|
||||
|
||||
clear_diagnostics(key.url(), ¬ifier)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
|
@ -0,0 +1,42 @@
|
|||
use lsp_types::notification::DidCloseNotebookDocument;
|
||||
use lsp_types::DidCloseNotebookDocumentParams;
|
||||
|
||||
use ty_project::watch::ChangeEvent;
|
||||
|
||||
use crate::server::api::traits::{NotificationHandler, SyncNotificationHandler};
|
||||
use crate::server::api::LSPResult;
|
||||
use crate::server::client::{Notifier, Requester};
|
||||
use crate::server::Result;
|
||||
use crate::session::Session;
|
||||
use crate::system::{url_to_any_system_path, AnySystemPath};
|
||||
|
||||
pub(crate) struct DidCloseNotebookHandler;
|
||||
|
||||
impl NotificationHandler for DidCloseNotebookHandler {
|
||||
type NotificationType = DidCloseNotebookDocument;
|
||||
}
|
||||
|
||||
impl SyncNotificationHandler for DidCloseNotebookHandler {
|
||||
fn run(
|
||||
session: &mut Session,
|
||||
_notifier: Notifier,
|
||||
_requester: &mut Requester,
|
||||
params: DidCloseNotebookDocumentParams,
|
||||
) -> Result<()> {
|
||||
let Ok(path) = url_to_any_system_path(¶ms.notebook_document.uri) else {
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
let key = session.key_from_url(params.notebook_document.uri);
|
||||
session
|
||||
.close_document(&key)
|
||||
.with_failure_code(lsp_server::ErrorCode::InternalError)?;
|
||||
|
||||
if let AnySystemPath::SystemVirtual(virtual_path) = path {
|
||||
let db = session.default_project_db_mut();
|
||||
db.apply_changes(vec![ChangeEvent::DeletedVirtual(virtual_path)], None);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
60
crates/ty_server/src/server/api/notifications/did_open.rs
Normal file
60
crates/ty_server/src/server/api/notifications/did_open.rs
Normal file
|
@ -0,0 +1,60 @@
|
|||
use lsp_types::notification::DidOpenTextDocument;
|
||||
use lsp_types::{DidOpenTextDocumentParams, TextDocumentItem};
|
||||
|
||||
use ruff_db::Db;
|
||||
use ty_project::watch::ChangeEvent;
|
||||
|
||||
use crate::server::api::traits::{NotificationHandler, SyncNotificationHandler};
|
||||
use crate::server::client::{Notifier, Requester};
|
||||
use crate::server::Result;
|
||||
use crate::session::Session;
|
||||
use crate::system::{url_to_any_system_path, AnySystemPath};
|
||||
use crate::TextDocument;
|
||||
|
||||
pub(crate) struct DidOpenTextDocumentHandler;
|
||||
|
||||
impl NotificationHandler for DidOpenTextDocumentHandler {
|
||||
type NotificationType = DidOpenTextDocument;
|
||||
}
|
||||
|
||||
impl SyncNotificationHandler for DidOpenTextDocumentHandler {
|
||||
fn run(
|
||||
session: &mut Session,
|
||||
_notifier: Notifier,
|
||||
_requester: &mut Requester,
|
||||
DidOpenTextDocumentParams {
|
||||
text_document:
|
||||
TextDocumentItem {
|
||||
uri,
|
||||
text,
|
||||
version,
|
||||
language_id,
|
||||
},
|
||||
}: DidOpenTextDocumentParams,
|
||||
) -> Result<()> {
|
||||
let Ok(path) = url_to_any_system_path(&uri) else {
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
let document = TextDocument::new(text, version).with_language_id(&language_id);
|
||||
session.open_text_document(uri, document);
|
||||
|
||||
match path {
|
||||
AnySystemPath::System(path) => {
|
||||
let db = match session.project_db_for_path_mut(path.as_std_path()) {
|
||||
Some(db) => db,
|
||||
None => session.default_project_db_mut(),
|
||||
};
|
||||
db.apply_changes(vec![ChangeEvent::Opened(path)], None);
|
||||
}
|
||||
AnySystemPath::SystemVirtual(virtual_path) => {
|
||||
let db = session.default_project_db_mut();
|
||||
db.files().virtual_file(db, &virtual_path);
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(dhruvmanila): Publish diagnostics if the client doesn't support pull diagnostics
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
|
@ -0,0 +1,60 @@
|
|||
use lsp_server::ErrorCode;
|
||||
use lsp_types::notification::DidOpenNotebookDocument;
|
||||
use lsp_types::DidOpenNotebookDocumentParams;
|
||||
|
||||
use ruff_db::Db;
|
||||
use ty_project::watch::ChangeEvent;
|
||||
|
||||
use crate::document::NotebookDocument;
|
||||
use crate::server::api::traits::{NotificationHandler, SyncNotificationHandler};
|
||||
use crate::server::api::LSPResult;
|
||||
use crate::server::client::{Notifier, Requester};
|
||||
use crate::server::Result;
|
||||
use crate::session::Session;
|
||||
use crate::system::{url_to_any_system_path, AnySystemPath};
|
||||
|
||||
pub(crate) struct DidOpenNotebookHandler;
|
||||
|
||||
impl NotificationHandler for DidOpenNotebookHandler {
|
||||
type NotificationType = DidOpenNotebookDocument;
|
||||
}
|
||||
|
||||
impl SyncNotificationHandler for DidOpenNotebookHandler {
|
||||
fn run(
|
||||
session: &mut Session,
|
||||
_notifier: Notifier,
|
||||
_requester: &mut Requester,
|
||||
params: DidOpenNotebookDocumentParams,
|
||||
) -> Result<()> {
|
||||
let Ok(path) = url_to_any_system_path(¶ms.notebook_document.uri) else {
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
let notebook = NotebookDocument::new(
|
||||
params.notebook_document.version,
|
||||
params.notebook_document.cells,
|
||||
params.notebook_document.metadata.unwrap_or_default(),
|
||||
params.cell_text_documents,
|
||||
)
|
||||
.with_failure_code(ErrorCode::InternalError)?;
|
||||
session.open_notebook_document(params.notebook_document.uri, notebook);
|
||||
|
||||
match path {
|
||||
AnySystemPath::System(path) => {
|
||||
let db = match session.project_db_for_path_mut(path.as_std_path()) {
|
||||
Some(db) => db,
|
||||
None => session.default_project_db_mut(),
|
||||
};
|
||||
db.apply_changes(vec![ChangeEvent::Opened(path)], None);
|
||||
}
|
||||
AnySystemPath::SystemVirtual(virtual_path) => {
|
||||
let db = session.default_project_db_mut();
|
||||
db.files().virtual_file(db, &virtual_path);
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(dhruvmanila): Publish diagnostics if the client doesn't support pull diagnostics
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
11
crates/ty_server/src/server/api/requests.rs
Normal file
11
crates/ty_server/src/server/api/requests.rs
Normal file
|
@ -0,0 +1,11 @@
|
|||
mod completion;
|
||||
mod diagnostic;
|
||||
mod goto_type_definition;
|
||||
mod hover;
|
||||
mod inlay_hints;
|
||||
|
||||
pub(super) use completion::CompletionRequestHandler;
|
||||
pub(super) use diagnostic::DocumentDiagnosticRequestHandler;
|
||||
pub(super) use goto_type_definition::GotoTypeDefinitionRequestHandler;
|
||||
pub(super) use hover::HoverRequestHandler;
|
||||
pub(super) use inlay_hints::InlayHintRequestHandler;
|
58
crates/ty_server/src/server/api/requests/completion.rs
Normal file
58
crates/ty_server/src/server/api/requests/completion.rs
Normal file
|
@ -0,0 +1,58 @@
|
|||
use std::borrow::Cow;
|
||||
|
||||
use lsp_types::request::Completion;
|
||||
use lsp_types::{CompletionItem, CompletionParams, CompletionResponse, Url};
|
||||
use ruff_db::source::{line_index, source_text};
|
||||
use ty_ide::completion;
|
||||
use ty_project::ProjectDatabase;
|
||||
|
||||
use crate::document::PositionExt;
|
||||
use crate::server::api::traits::{BackgroundDocumentRequestHandler, RequestHandler};
|
||||
use crate::server::client::Notifier;
|
||||
use crate::DocumentSnapshot;
|
||||
|
||||
pub(crate) struct CompletionRequestHandler;
|
||||
|
||||
impl RequestHandler for CompletionRequestHandler {
|
||||
type RequestType = Completion;
|
||||
}
|
||||
|
||||
impl BackgroundDocumentRequestHandler for CompletionRequestHandler {
|
||||
fn document_url(params: &CompletionParams) -> Cow<Url> {
|
||||
Cow::Borrowed(¶ms.text_document_position.text_document.uri)
|
||||
}
|
||||
|
||||
fn run_with_snapshot(
|
||||
snapshot: DocumentSnapshot,
|
||||
db: ProjectDatabase,
|
||||
_notifier: Notifier,
|
||||
params: CompletionParams,
|
||||
) -> crate::server::Result<Option<CompletionResponse>> {
|
||||
let Some(file) = snapshot.file(&db) else {
|
||||
tracing::debug!("Failed to resolve file for {:?}", params);
|
||||
return Ok(None);
|
||||
};
|
||||
|
||||
let source = source_text(&db, file);
|
||||
let line_index = line_index(&db, file);
|
||||
let offset = params.text_document_position.position.to_text_size(
|
||||
&source,
|
||||
&line_index,
|
||||
snapshot.encoding(),
|
||||
);
|
||||
let completions = completion(&db, file, offset);
|
||||
if completions.is_empty() {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let items: Vec<CompletionItem> = completions
|
||||
.into_iter()
|
||||
.map(|comp| CompletionItem {
|
||||
label: comp.label,
|
||||
..Default::default()
|
||||
})
|
||||
.collect();
|
||||
let response = CompletionResponse::Array(items);
|
||||
Ok(Some(response))
|
||||
}
|
||||
}
|
106
crates/ty_server/src/server/api/requests/diagnostic.rs
Normal file
106
crates/ty_server/src/server/api/requests/diagnostic.rs
Normal file
|
@ -0,0 +1,106 @@
|
|||
use std::borrow::Cow;
|
||||
|
||||
use lsp_types::request::DocumentDiagnosticRequest;
|
||||
use lsp_types::{
|
||||
Diagnostic, DiagnosticSeverity, DocumentDiagnosticParams, DocumentDiagnosticReport,
|
||||
DocumentDiagnosticReportResult, FullDocumentDiagnosticReport, NumberOrString, Range,
|
||||
RelatedFullDocumentDiagnosticReport, Url,
|
||||
};
|
||||
|
||||
use crate::document::ToRangeExt;
|
||||
use crate::server::api::traits::{BackgroundDocumentRequestHandler, RequestHandler};
|
||||
use crate::server::{client::Notifier, Result};
|
||||
use crate::session::DocumentSnapshot;
|
||||
use ruff_db::diagnostic::Severity;
|
||||
use ruff_db::source::{line_index, source_text};
|
||||
use ty_project::{Db, ProjectDatabase};
|
||||
|
||||
pub(crate) struct DocumentDiagnosticRequestHandler;
|
||||
|
||||
impl RequestHandler for DocumentDiagnosticRequestHandler {
|
||||
type RequestType = DocumentDiagnosticRequest;
|
||||
}
|
||||
|
||||
impl BackgroundDocumentRequestHandler for DocumentDiagnosticRequestHandler {
|
||||
fn document_url(params: &DocumentDiagnosticParams) -> Cow<Url> {
|
||||
Cow::Borrowed(¶ms.text_document.uri)
|
||||
}
|
||||
|
||||
fn run_with_snapshot(
|
||||
snapshot: DocumentSnapshot,
|
||||
db: ProjectDatabase,
|
||||
_notifier: Notifier,
|
||||
_params: DocumentDiagnosticParams,
|
||||
) -> Result<DocumentDiagnosticReportResult> {
|
||||
let diagnostics = compute_diagnostics(&snapshot, &db);
|
||||
|
||||
Ok(DocumentDiagnosticReportResult::Report(
|
||||
DocumentDiagnosticReport::Full(RelatedFullDocumentDiagnosticReport {
|
||||
related_documents: None,
|
||||
full_document_diagnostic_report: FullDocumentDiagnosticReport {
|
||||
result_id: None,
|
||||
items: diagnostics,
|
||||
},
|
||||
}),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
fn compute_diagnostics(snapshot: &DocumentSnapshot, db: &ProjectDatabase) -> Vec<Diagnostic> {
|
||||
let Some(file) = snapshot.file(db) else {
|
||||
tracing::info!(
|
||||
"No file found for snapshot for `{}`",
|
||||
snapshot.query().file_url()
|
||||
);
|
||||
return vec![];
|
||||
};
|
||||
|
||||
let diagnostics = match db.check_file(file) {
|
||||
Ok(diagnostics) => diagnostics,
|
||||
Err(cancelled) => {
|
||||
tracing::info!("Diagnostics computation {cancelled}");
|
||||
return vec![];
|
||||
}
|
||||
};
|
||||
|
||||
diagnostics
|
||||
.as_slice()
|
||||
.iter()
|
||||
.map(|message| to_lsp_diagnostic(db, message, snapshot.encoding()))
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn to_lsp_diagnostic(
|
||||
db: &dyn Db,
|
||||
diagnostic: &ruff_db::diagnostic::Diagnostic,
|
||||
encoding: crate::PositionEncoding,
|
||||
) -> Diagnostic {
|
||||
let range = if let Some(span) = diagnostic.primary_span() {
|
||||
let index = line_index(db.upcast(), span.file());
|
||||
let source = source_text(db.upcast(), span.file());
|
||||
|
||||
span.range()
|
||||
.map(|range| range.to_lsp_range(&source, &index, encoding))
|
||||
.unwrap_or_default()
|
||||
} else {
|
||||
Range::default()
|
||||
};
|
||||
|
||||
let severity = match diagnostic.severity() {
|
||||
Severity::Info => DiagnosticSeverity::INFORMATION,
|
||||
Severity::Warning => DiagnosticSeverity::WARNING,
|
||||
Severity::Error | Severity::Fatal => DiagnosticSeverity::ERROR,
|
||||
};
|
||||
|
||||
Diagnostic {
|
||||
range,
|
||||
severity: Some(severity),
|
||||
tags: None,
|
||||
code: Some(NumberOrString::String(diagnostic.id().to_string())),
|
||||
code_description: None,
|
||||
source: Some("ty".into()),
|
||||
message: diagnostic.concise_message().to_string(),
|
||||
related_information: None,
|
||||
data: None,
|
||||
}
|
||||
}
|
|
@ -0,0 +1,68 @@
|
|||
use std::borrow::Cow;
|
||||
|
||||
use lsp_types::request::{GotoTypeDefinition, GotoTypeDefinitionParams};
|
||||
use lsp_types::{GotoDefinitionResponse, Url};
|
||||
use ruff_db::source::{line_index, source_text};
|
||||
use ty_ide::goto_type_definition;
|
||||
use ty_project::ProjectDatabase;
|
||||
|
||||
use crate::document::{PositionExt, ToLink};
|
||||
use crate::server::api::traits::{BackgroundDocumentRequestHandler, RequestHandler};
|
||||
use crate::server::client::Notifier;
|
||||
use crate::DocumentSnapshot;
|
||||
|
||||
pub(crate) struct GotoTypeDefinitionRequestHandler;
|
||||
|
||||
impl RequestHandler for GotoTypeDefinitionRequestHandler {
|
||||
type RequestType = GotoTypeDefinition;
|
||||
}
|
||||
|
||||
impl BackgroundDocumentRequestHandler for GotoTypeDefinitionRequestHandler {
|
||||
fn document_url(params: &GotoTypeDefinitionParams) -> Cow<Url> {
|
||||
Cow::Borrowed(¶ms.text_document_position_params.text_document.uri)
|
||||
}
|
||||
|
||||
fn run_with_snapshot(
|
||||
snapshot: DocumentSnapshot,
|
||||
db: ProjectDatabase,
|
||||
_notifier: Notifier,
|
||||
params: GotoTypeDefinitionParams,
|
||||
) -> crate::server::Result<Option<GotoDefinitionResponse>> {
|
||||
let Some(file) = snapshot.file(&db) else {
|
||||
tracing::debug!("Failed to resolve file for {:?}", params);
|
||||
return Ok(None);
|
||||
};
|
||||
|
||||
let source = source_text(&db, file);
|
||||
let line_index = line_index(&db, file);
|
||||
let offset = params.text_document_position_params.position.to_text_size(
|
||||
&source,
|
||||
&line_index,
|
||||
snapshot.encoding(),
|
||||
);
|
||||
|
||||
let Some(ranged) = goto_type_definition(&db, file, offset) else {
|
||||
return Ok(None);
|
||||
};
|
||||
|
||||
if snapshot
|
||||
.resolved_client_capabilities()
|
||||
.type_definition_link_support
|
||||
{
|
||||
let src = Some(ranged.range);
|
||||
let links: Vec<_> = ranged
|
||||
.into_iter()
|
||||
.filter_map(|target| target.to_link(&db, src, snapshot.encoding()))
|
||||
.collect();
|
||||
|
||||
Ok(Some(GotoDefinitionResponse::Link(links)))
|
||||
} else {
|
||||
let locations: Vec<_> = ranged
|
||||
.into_iter()
|
||||
.filter_map(|target| target.to_location(&db, snapshot.encoding()))
|
||||
.collect();
|
||||
|
||||
Ok(Some(GotoDefinitionResponse::Array(locations)))
|
||||
}
|
||||
}
|
||||
}
|
71
crates/ty_server/src/server/api/requests/hover.rs
Normal file
71
crates/ty_server/src/server/api/requests/hover.rs
Normal file
|
@ -0,0 +1,71 @@
|
|||
use std::borrow::Cow;
|
||||
|
||||
use crate::document::{PositionExt, ToRangeExt};
|
||||
use crate::server::api::traits::{BackgroundDocumentRequestHandler, RequestHandler};
|
||||
use crate::server::client::Notifier;
|
||||
use crate::DocumentSnapshot;
|
||||
use lsp_types::request::HoverRequest;
|
||||
use lsp_types::{HoverContents, HoverParams, MarkupContent, Url};
|
||||
use ruff_db::source::{line_index, source_text};
|
||||
use ruff_text_size::Ranged;
|
||||
use ty_ide::{hover, MarkupKind};
|
||||
use ty_project::ProjectDatabase;
|
||||
|
||||
pub(crate) struct HoverRequestHandler;
|
||||
|
||||
impl RequestHandler for HoverRequestHandler {
|
||||
type RequestType = HoverRequest;
|
||||
}
|
||||
|
||||
impl BackgroundDocumentRequestHandler for HoverRequestHandler {
|
||||
fn document_url(params: &HoverParams) -> Cow<Url> {
|
||||
Cow::Borrowed(¶ms.text_document_position_params.text_document.uri)
|
||||
}
|
||||
|
||||
fn run_with_snapshot(
|
||||
snapshot: DocumentSnapshot,
|
||||
db: ProjectDatabase,
|
||||
_notifier: Notifier,
|
||||
params: HoverParams,
|
||||
) -> crate::server::Result<Option<lsp_types::Hover>> {
|
||||
let Some(file) = snapshot.file(&db) else {
|
||||
tracing::debug!("Failed to resolve file for {:?}", params);
|
||||
return Ok(None);
|
||||
};
|
||||
|
||||
let source = source_text(&db, file);
|
||||
let line_index = line_index(&db, file);
|
||||
let offset = params.text_document_position_params.position.to_text_size(
|
||||
&source,
|
||||
&line_index,
|
||||
snapshot.encoding(),
|
||||
);
|
||||
|
||||
let Some(range_info) = hover(&db, file, offset) else {
|
||||
return Ok(None);
|
||||
};
|
||||
|
||||
let (markup_kind, lsp_markup_kind) = if snapshot
|
||||
.resolved_client_capabilities()
|
||||
.hover_prefer_markdown
|
||||
{
|
||||
(MarkupKind::Markdown, lsp_types::MarkupKind::Markdown)
|
||||
} else {
|
||||
(MarkupKind::PlainText, lsp_types::MarkupKind::PlainText)
|
||||
};
|
||||
|
||||
let contents = range_info.display(&db, markup_kind).to_string();
|
||||
|
||||
Ok(Some(lsp_types::Hover {
|
||||
contents: HoverContents::Markup(MarkupContent {
|
||||
kind: lsp_markup_kind,
|
||||
value: contents,
|
||||
}),
|
||||
range: Some(range_info.file_range().range().to_lsp_range(
|
||||
&source,
|
||||
&line_index,
|
||||
snapshot.encoding(),
|
||||
)),
|
||||
}))
|
||||
}
|
||||
}
|
62
crates/ty_server/src/server/api/requests/inlay_hints.rs
Normal file
62
crates/ty_server/src/server/api/requests/inlay_hints.rs
Normal file
|
@ -0,0 +1,62 @@
|
|||
use std::borrow::Cow;
|
||||
|
||||
use crate::document::{RangeExt, TextSizeExt};
|
||||
use crate::server::api::traits::{BackgroundDocumentRequestHandler, RequestHandler};
|
||||
use crate::server::client::Notifier;
|
||||
use crate::DocumentSnapshot;
|
||||
use lsp_types::request::InlayHintRequest;
|
||||
use lsp_types::{InlayHintParams, Url};
|
||||
use ruff_db::source::{line_index, source_text};
|
||||
use ty_ide::inlay_hints;
|
||||
use ty_project::ProjectDatabase;
|
||||
|
||||
pub(crate) struct InlayHintRequestHandler;
|
||||
|
||||
impl RequestHandler for InlayHintRequestHandler {
|
||||
type RequestType = InlayHintRequest;
|
||||
}
|
||||
|
||||
impl BackgroundDocumentRequestHandler for InlayHintRequestHandler {
|
||||
fn document_url(params: &InlayHintParams) -> Cow<Url> {
|
||||
Cow::Borrowed(¶ms.text_document.uri)
|
||||
}
|
||||
|
||||
fn run_with_snapshot(
|
||||
snapshot: DocumentSnapshot,
|
||||
db: ProjectDatabase,
|
||||
_notifier: Notifier,
|
||||
params: InlayHintParams,
|
||||
) -> crate::server::Result<Option<Vec<lsp_types::InlayHint>>> {
|
||||
let Some(file) = snapshot.file(&db) else {
|
||||
tracing::debug!("Failed to resolve file for {:?}", params);
|
||||
return Ok(None);
|
||||
};
|
||||
|
||||
let index = line_index(&db, file);
|
||||
let source = source_text(&db, file);
|
||||
|
||||
let range = params
|
||||
.range
|
||||
.to_text_range(&source, &index, snapshot.encoding());
|
||||
|
||||
let inlay_hints = inlay_hints(&db, file, range);
|
||||
|
||||
let inlay_hints = inlay_hints
|
||||
.into_iter()
|
||||
.map(|hint| lsp_types::InlayHint {
|
||||
position: hint
|
||||
.position
|
||||
.to_position(&source, &index, snapshot.encoding()),
|
||||
label: lsp_types::InlayHintLabel::String(hint.display(&db).to_string()),
|
||||
kind: Some(lsp_types::InlayHintKind::TYPE),
|
||||
tooltip: None,
|
||||
padding_left: None,
|
||||
padding_right: None,
|
||||
data: None,
|
||||
text_edits: None,
|
||||
})
|
||||
.collect();
|
||||
|
||||
Ok(Some(inlay_hints))
|
||||
}
|
||||
}
|
78
crates/ty_server/src/server/api/traits.rs
Normal file
78
crates/ty_server/src/server/api/traits.rs
Normal file
|
@ -0,0 +1,78 @@
|
|||
//! A stateful LSP implementation that calls into the Ruff API.
|
||||
|
||||
use crate::server::client::{Notifier, Requester};
|
||||
use crate::session::{DocumentSnapshot, Session};
|
||||
|
||||
use lsp_types::notification::Notification as LSPNotification;
|
||||
use lsp_types::request::Request;
|
||||
use ty_project::ProjectDatabase;
|
||||
|
||||
/// A supertrait for any server request handler.
|
||||
pub(super) trait RequestHandler {
|
||||
type RequestType: Request;
|
||||
const METHOD: &'static str = <<Self as RequestHandler>::RequestType as Request>::METHOD;
|
||||
}
|
||||
|
||||
/// A request handler that needs mutable access to the session.
|
||||
/// This will block the main message receiver loop, meaning that no
|
||||
/// incoming requests or notifications will be handled while `run` is
|
||||
/// executing. Try to avoid doing any I/O or long-running computations.
|
||||
#[expect(dead_code)]
|
||||
pub(super) trait SyncRequestHandler: RequestHandler {
|
||||
fn run(
|
||||
session: &mut Session,
|
||||
notifier: Notifier,
|
||||
requester: &mut Requester,
|
||||
params: <<Self as RequestHandler>::RequestType as Request>::Params,
|
||||
) -> super::Result<<<Self as RequestHandler>::RequestType as Request>::Result>;
|
||||
}
|
||||
|
||||
/// A request handler that can be run on a background thread.
|
||||
pub(super) trait BackgroundDocumentRequestHandler: RequestHandler {
|
||||
fn document_url(
|
||||
params: &<<Self as RequestHandler>::RequestType as Request>::Params,
|
||||
) -> std::borrow::Cow<lsp_types::Url>;
|
||||
|
||||
fn run_with_snapshot(
|
||||
snapshot: DocumentSnapshot,
|
||||
db: ProjectDatabase,
|
||||
notifier: Notifier,
|
||||
params: <<Self as RequestHandler>::RequestType as Request>::Params,
|
||||
) -> super::Result<<<Self as RequestHandler>::RequestType as Request>::Result>;
|
||||
}
|
||||
|
||||
/// A supertrait for any server notification handler.
|
||||
pub(super) trait NotificationHandler {
|
||||
type NotificationType: LSPNotification;
|
||||
const METHOD: &'static str =
|
||||
<<Self as NotificationHandler>::NotificationType as LSPNotification>::METHOD;
|
||||
}
|
||||
|
||||
/// A notification handler that needs mutable access to the session.
|
||||
/// This will block the main message receiver loop, meaning that no
|
||||
/// incoming requests or notifications will be handled while `run` is
|
||||
/// executing. Try to avoid doing any I/O or long-running computations.
|
||||
pub(super) trait SyncNotificationHandler: NotificationHandler {
|
||||
fn run(
|
||||
session: &mut Session,
|
||||
notifier: Notifier,
|
||||
requester: &mut Requester,
|
||||
params: <<Self as NotificationHandler>::NotificationType as LSPNotification>::Params,
|
||||
) -> super::Result<()>;
|
||||
}
|
||||
|
||||
/// A notification handler that can be run on a background thread.
|
||||
pub(super) trait BackgroundDocumentNotificationHandler: NotificationHandler {
|
||||
/// `document_url` can be implemented automatically with
|
||||
/// `define_document_url!(params: &<YourParameterType>)` in the trait
|
||||
/// implementation.
|
||||
fn document_url(
|
||||
params: &<<Self as NotificationHandler>::NotificationType as LSPNotification>::Params,
|
||||
) -> std::borrow::Cow<lsp_types::Url>;
|
||||
|
||||
fn run_with_snapshot(
|
||||
snapshot: DocumentSnapshot,
|
||||
notifier: Notifier,
|
||||
params: <<Self as NotificationHandler>::NotificationType as LSPNotification>::Params,
|
||||
) -> super::Result<()>;
|
||||
}
|
169
crates/ty_server/src/server/client.rs
Normal file
169
crates/ty_server/src/server/client.rs
Normal file
|
@ -0,0 +1,169 @@
|
|||
use std::any::TypeId;
|
||||
|
||||
use lsp_server::{Notification, RequestId};
|
||||
use rustc_hash::FxHashMap;
|
||||
use serde_json::Value;
|
||||
|
||||
use super::{schedule::Task, ClientSender};
|
||||
|
||||
type ResponseBuilder<'s> = Box<dyn FnOnce(lsp_server::Response) -> Task<'s>>;
|
||||
|
||||
pub(crate) struct Client<'s> {
|
||||
notifier: Notifier,
|
||||
responder: Responder,
|
||||
pub(super) requester: Requester<'s>,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct Notifier(ClientSender);
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct Responder(ClientSender);
|
||||
|
||||
pub(crate) struct Requester<'s> {
|
||||
sender: ClientSender,
|
||||
next_request_id: i32,
|
||||
response_handlers: FxHashMap<lsp_server::RequestId, ResponseBuilder<'s>>,
|
||||
}
|
||||
|
||||
impl Client<'_> {
|
||||
pub(super) fn new(sender: ClientSender) -> Self {
|
||||
Self {
|
||||
notifier: Notifier(sender.clone()),
|
||||
responder: Responder(sender.clone()),
|
||||
requester: Requester {
|
||||
sender,
|
||||
next_request_id: 1,
|
||||
response_handlers: FxHashMap::default(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn notifier(&self) -> Notifier {
|
||||
self.notifier.clone()
|
||||
}
|
||||
|
||||
pub(super) fn responder(&self) -> Responder {
|
||||
self.responder.clone()
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(dead_code)] // we'll need to use `Notifier` in the future
|
||||
impl Notifier {
|
||||
pub(crate) fn notify<N>(&self, params: N::Params) -> crate::Result<()>
|
||||
where
|
||||
N: lsp_types::notification::Notification,
|
||||
{
|
||||
let method = N::METHOD.to_string();
|
||||
|
||||
let message = lsp_server::Message::Notification(Notification::new(method, params));
|
||||
|
||||
self.0.send(message)
|
||||
}
|
||||
|
||||
pub(crate) fn notify_method(&self, method: String) -> crate::Result<()> {
|
||||
self.0
|
||||
.send(lsp_server::Message::Notification(Notification::new(
|
||||
method,
|
||||
Value::Null,
|
||||
)))
|
||||
}
|
||||
}
|
||||
|
||||
impl Responder {
|
||||
pub(crate) fn respond<R>(
|
||||
&self,
|
||||
id: RequestId,
|
||||
result: crate::server::Result<R>,
|
||||
) -> crate::Result<()>
|
||||
where
|
||||
R: serde::Serialize,
|
||||
{
|
||||
self.0.send(
|
||||
match result {
|
||||
Ok(res) => lsp_server::Response::new_ok(id, res),
|
||||
Err(crate::server::api::Error { code, error }) => {
|
||||
lsp_server::Response::new_err(id, code as i32, format!("{error}"))
|
||||
}
|
||||
}
|
||||
.into(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'s> Requester<'s> {
|
||||
/// Sends a request of kind `R` to the client, with associated parameters.
|
||||
/// The task provided by `response_handler` will be dispatched as soon as the response
|
||||
/// comes back from the client.
|
||||
pub(crate) fn request<R>(
|
||||
&mut self,
|
||||
params: R::Params,
|
||||
response_handler: impl Fn(R::Result) -> Task<'s> + 'static,
|
||||
) -> crate::Result<()>
|
||||
where
|
||||
R: lsp_types::request::Request,
|
||||
{
|
||||
let serialized_params = serde_json::to_value(params)?;
|
||||
|
||||
self.response_handlers.insert(
|
||||
self.next_request_id.into(),
|
||||
Box::new(move |response: lsp_server::Response| {
|
||||
match (response.error, response.result) {
|
||||
(Some(err), _) => {
|
||||
tracing::error!(
|
||||
"Got an error from the client (code {}): {}",
|
||||
err.code,
|
||||
err.message
|
||||
);
|
||||
Task::nothing()
|
||||
}
|
||||
(None, Some(response)) => match serde_json::from_value(response) {
|
||||
Ok(response) => response_handler(response),
|
||||
Err(error) => {
|
||||
tracing::error!("Failed to deserialize response from server: {error}");
|
||||
Task::nothing()
|
||||
}
|
||||
},
|
||||
(None, None) => {
|
||||
if TypeId::of::<R::Result>() == TypeId::of::<()>() {
|
||||
// We can't call `response_handler(())` directly here, but
|
||||
// since we _know_ the type expected is `()`, we can use
|
||||
// `from_value(Value::Null)`. `R::Result` implements `DeserializeOwned`,
|
||||
// so this branch works in the general case but we'll only
|
||||
// hit it if the concrete type is `()`, so the `unwrap()` is safe here.
|
||||
response_handler(serde_json::from_value(Value::Null).unwrap());
|
||||
} else {
|
||||
tracing::error!(
|
||||
"Server response was invalid: did not contain a result or error"
|
||||
);
|
||||
}
|
||||
Task::nothing()
|
||||
}
|
||||
}
|
||||
}),
|
||||
);
|
||||
|
||||
self.sender
|
||||
.send(lsp_server::Message::Request(lsp_server::Request {
|
||||
id: self.next_request_id.into(),
|
||||
method: R::METHOD.into(),
|
||||
params: serialized_params,
|
||||
}))?;
|
||||
|
||||
self.next_request_id += 1;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) fn pop_response_task(&mut self, response: lsp_server::Response) -> Task<'s> {
|
||||
if let Some(handler) = self.response_handlers.remove(&response.id) {
|
||||
handler(response)
|
||||
} else {
|
||||
tracing::error!(
|
||||
"Received a response with ID {}, which was not expected",
|
||||
response.id
|
||||
);
|
||||
Task::nothing()
|
||||
}
|
||||
}
|
||||
}
|
165
crates/ty_server/src/server/connection.rs
Normal file
165
crates/ty_server/src/server/connection.rs
Normal file
|
@ -0,0 +1,165 @@
|
|||
use lsp_server as lsp;
|
||||
use lsp_types::{notification::Notification, request::Request};
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
type ConnectionSender = crossbeam::channel::Sender<lsp::Message>;
|
||||
type ConnectionReceiver = crossbeam::channel::Receiver<lsp::Message>;
|
||||
|
||||
/// A builder for `Connection` that handles LSP initialization.
|
||||
pub(crate) struct ConnectionInitializer {
|
||||
connection: lsp::Connection,
|
||||
threads: lsp::IoThreads,
|
||||
}
|
||||
|
||||
/// Handles inbound and outbound messages with the client.
|
||||
pub(crate) struct Connection {
|
||||
sender: Arc<ConnectionSender>,
|
||||
receiver: ConnectionReceiver,
|
||||
threads: lsp::IoThreads,
|
||||
}
|
||||
|
||||
impl ConnectionInitializer {
|
||||
/// Create a new LSP server connection over stdin/stdout.
|
||||
pub(super) fn stdio() -> Self {
|
||||
let (connection, threads) = lsp::Connection::stdio();
|
||||
Self {
|
||||
connection,
|
||||
threads,
|
||||
}
|
||||
}
|
||||
|
||||
/// Starts the initialization process with the client by listening for an initialization request.
|
||||
/// Returns a request ID that should be passed into `initialize_finish` later,
|
||||
/// along with the initialization parameters that were provided.
|
||||
pub(super) fn initialize_start(
|
||||
&self,
|
||||
) -> crate::Result<(lsp::RequestId, lsp_types::InitializeParams)> {
|
||||
let (id, params) = self.connection.initialize_start()?;
|
||||
Ok((id, serde_json::from_value(params)?))
|
||||
}
|
||||
|
||||
/// Finishes the initialization process with the client,
|
||||
/// returning an initialized `Connection`.
|
||||
pub(super) fn initialize_finish(
|
||||
self,
|
||||
id: lsp::RequestId,
|
||||
server_capabilities: &lsp_types::ServerCapabilities,
|
||||
name: &str,
|
||||
version: &str,
|
||||
) -> crate::Result<Connection> {
|
||||
self.connection.initialize_finish(
|
||||
id,
|
||||
serde_json::json!({
|
||||
"capabilities": server_capabilities,
|
||||
"serverInfo": {
|
||||
"name": name,
|
||||
"version": version
|
||||
}
|
||||
}),
|
||||
)?;
|
||||
let Self {
|
||||
connection: lsp::Connection { sender, receiver },
|
||||
threads,
|
||||
} = self;
|
||||
Ok(Connection {
|
||||
sender: Arc::new(sender),
|
||||
receiver,
|
||||
threads,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl Connection {
|
||||
/// Make a new `ClientSender` for sending messages to the client.
|
||||
pub(super) fn make_sender(&self) -> ClientSender {
|
||||
ClientSender {
|
||||
weak_sender: Arc::downgrade(&self.sender),
|
||||
}
|
||||
}
|
||||
|
||||
/// An iterator over incoming messages from the client.
|
||||
pub(super) fn incoming(&self) -> crossbeam::channel::Iter<lsp::Message> {
|
||||
self.receiver.iter()
|
||||
}
|
||||
|
||||
/// Check and respond to any incoming shutdown requests; returns`true` if the server should be shutdown.
|
||||
pub(super) fn handle_shutdown(&self, message: &lsp::Message) -> crate::Result<bool> {
|
||||
match message {
|
||||
lsp::Message::Request(lsp::Request { id, method, .. })
|
||||
if method == lsp_types::request::Shutdown::METHOD =>
|
||||
{
|
||||
self.sender
|
||||
.send(lsp::Response::new_ok(id.clone(), ()).into())?;
|
||||
tracing::info!("Shutdown request received. Waiting for an exit notification...");
|
||||
|
||||
loop {
|
||||
match &self
|
||||
.receiver
|
||||
.recv_timeout(std::time::Duration::from_secs(30))?
|
||||
{
|
||||
lsp::Message::Notification(lsp::Notification { method, .. })
|
||||
if method == lsp_types::notification::Exit::METHOD =>
|
||||
{
|
||||
tracing::info!("Exit notification received. Server shutting down...");
|
||||
return Ok(true);
|
||||
}
|
||||
lsp::Message::Request(lsp::Request { id, method, .. }) => {
|
||||
tracing::warn!(
|
||||
"Server received unexpected request {method} ({id}) while waiting for exit notification",
|
||||
);
|
||||
self.sender.send(lsp::Message::Response(lsp::Response::new_err(
|
||||
id.clone(),
|
||||
lsp::ErrorCode::InvalidRequest as i32,
|
||||
"Server received unexpected request while waiting for exit notification".to_string(),
|
||||
)))?;
|
||||
}
|
||||
message => {
|
||||
tracing::warn!(
|
||||
"Server received unexpected message while waiting for exit notification: {message:?}"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
lsp::Message::Notification(lsp::Notification { method, .. })
|
||||
if method == lsp_types::notification::Exit::METHOD =>
|
||||
{
|
||||
anyhow::bail!("Server received an exit notification before a shutdown request was sent. Exiting...");
|
||||
}
|
||||
_ => Ok(false),
|
||||
}
|
||||
}
|
||||
|
||||
/// Join the I/O threads that underpin this connection.
|
||||
/// This is guaranteed to be nearly immediate since
|
||||
/// we close the only active channels to these threads prior
|
||||
/// to joining them.
|
||||
pub(super) fn close(self) -> crate::Result<()> {
|
||||
std::mem::drop(
|
||||
Arc::into_inner(self.sender)
|
||||
.expect("the client sender shouldn't have more than one strong reference"),
|
||||
);
|
||||
std::mem::drop(self.receiver);
|
||||
self.threads.join()?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// A weak reference to an underlying sender channel, used for communication with the client.
|
||||
/// If the `Connection` that created this `ClientSender` is dropped, any `send` calls will throw
|
||||
/// an error.
|
||||
#[derive(Clone, Debug)]
|
||||
pub(crate) struct ClientSender {
|
||||
weak_sender: Weak<ConnectionSender>,
|
||||
}
|
||||
|
||||
// note: additional wrapper functions for senders may be implemented as needed.
|
||||
impl ClientSender {
|
||||
pub(crate) fn send(&self, msg: lsp::Message) -> crate::Result<()> {
|
||||
let Some(sender) = self.weak_sender.upgrade() else {
|
||||
anyhow::bail!("The connection with the client has been closed");
|
||||
};
|
||||
|
||||
Ok(sender.send(msg)?)
|
||||
}
|
||||
}
|
113
crates/ty_server/src/server/schedule.rs
Normal file
113
crates/ty_server/src/server/schedule.rs
Normal file
|
@ -0,0 +1,113 @@
|
|||
use std::num::NonZeroUsize;
|
||||
|
||||
use crate::session::Session;
|
||||
|
||||
mod task;
|
||||
mod thread;
|
||||
|
||||
pub(super) use task::{BackgroundSchedule, Task};
|
||||
|
||||
use self::{
|
||||
task::{BackgroundTaskBuilder, SyncTask},
|
||||
thread::ThreadPriority,
|
||||
};
|
||||
|
||||
use super::{client::Client, ClientSender};
|
||||
|
||||
/// The event loop thread is actually a secondary thread that we spawn from the
|
||||
/// _actual_ main thread. This secondary thread has a larger stack size
|
||||
/// than some OS defaults (Windows, for example) and is also designated as
|
||||
/// high-priority.
|
||||
pub(crate) fn event_loop_thread(
|
||||
func: impl FnOnce() -> crate::Result<()> + Send + 'static,
|
||||
) -> crate::Result<thread::JoinHandle<crate::Result<()>>> {
|
||||
// Override OS defaults to avoid stack overflows on platforms with low stack size defaults.
|
||||
const MAIN_THREAD_STACK_SIZE: usize = 2 * 1024 * 1024;
|
||||
const MAIN_THREAD_NAME: &str = "ruff:main";
|
||||
Ok(
|
||||
thread::Builder::new(thread::ThreadPriority::LatencySensitive)
|
||||
.name(MAIN_THREAD_NAME.into())
|
||||
.stack_size(MAIN_THREAD_STACK_SIZE)
|
||||
.spawn(func)?,
|
||||
)
|
||||
}
|
||||
|
||||
pub(crate) struct Scheduler<'s> {
|
||||
session: &'s mut Session,
|
||||
client: Client<'s>,
|
||||
fmt_pool: thread::Pool,
|
||||
background_pool: thread::Pool,
|
||||
}
|
||||
|
||||
impl<'s> Scheduler<'s> {
|
||||
pub(super) fn new(
|
||||
session: &'s mut Session,
|
||||
worker_threads: NonZeroUsize,
|
||||
sender: ClientSender,
|
||||
) -> Self {
|
||||
const FMT_THREADS: usize = 1;
|
||||
Self {
|
||||
session,
|
||||
fmt_pool: thread::Pool::new(NonZeroUsize::try_from(FMT_THREADS).unwrap()),
|
||||
background_pool: thread::Pool::new(worker_threads),
|
||||
client: Client::new(sender),
|
||||
}
|
||||
}
|
||||
|
||||
/// Immediately sends a request of kind `R` to the client, with associated parameters.
|
||||
/// The task provided by `response_handler` will be dispatched as soon as the response
|
||||
/// comes back from the client.
|
||||
#[expect(dead_code)]
|
||||
pub(super) fn request<R>(
|
||||
&mut self,
|
||||
params: R::Params,
|
||||
response_handler: impl Fn(R::Result) -> Task<'s> + 'static,
|
||||
) -> crate::Result<()>
|
||||
where
|
||||
R: lsp_types::request::Request,
|
||||
{
|
||||
self.client.requester.request::<R>(params, response_handler)
|
||||
}
|
||||
|
||||
/// Creates a task to handle a response from the client.
|
||||
pub(super) fn response(&mut self, response: lsp_server::Response) -> Task<'s> {
|
||||
self.client.requester.pop_response_task(response)
|
||||
}
|
||||
|
||||
/// Dispatches a `task` by either running it as a blocking function or
|
||||
/// executing it on a background thread pool.
|
||||
pub(super) fn dispatch(&mut self, task: task::Task<'s>) {
|
||||
match task {
|
||||
Task::Sync(SyncTask { func }) => {
|
||||
let notifier = self.client.notifier();
|
||||
let responder = self.client.responder();
|
||||
func(
|
||||
self.session,
|
||||
notifier,
|
||||
&mut self.client.requester,
|
||||
responder,
|
||||
);
|
||||
}
|
||||
Task::Background(BackgroundTaskBuilder {
|
||||
schedule,
|
||||
builder: func,
|
||||
}) => {
|
||||
let static_func = func(self.session);
|
||||
let notifier = self.client.notifier();
|
||||
let responder = self.client.responder();
|
||||
let task = move || static_func(notifier, responder);
|
||||
match schedule {
|
||||
BackgroundSchedule::Worker => {
|
||||
self.background_pool.spawn(ThreadPriority::Worker, task);
|
||||
}
|
||||
BackgroundSchedule::LatencySensitive => self
|
||||
.background_pool
|
||||
.spawn(ThreadPriority::LatencySensitive, task),
|
||||
BackgroundSchedule::Fmt => {
|
||||
self.fmt_pool.spawn(ThreadPriority::LatencySensitive, task);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
97
crates/ty_server/src/server/schedule/task.rs
Normal file
97
crates/ty_server/src/server/schedule/task.rs
Normal file
|
@ -0,0 +1,97 @@
|
|||
use lsp_server::RequestId;
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::{
|
||||
server::client::{Notifier, Requester, Responder},
|
||||
session::Session,
|
||||
};
|
||||
|
||||
type LocalFn<'s> = Box<dyn FnOnce(&mut Session, Notifier, &mut Requester, Responder) + 's>;
|
||||
|
||||
type BackgroundFn = Box<dyn FnOnce(Notifier, Responder) + Send + 'static>;
|
||||
|
||||
type BackgroundFnBuilder<'s> = Box<dyn FnOnce(&Session) -> BackgroundFn + 's>;
|
||||
|
||||
/// Describes how the task should be run.
|
||||
#[derive(Clone, Copy, Debug, Default)]
|
||||
pub(in crate::server) enum BackgroundSchedule {
|
||||
/// The task should be run on the background thread designated
|
||||
/// for formatting actions. This is a high priority thread.
|
||||
#[expect(dead_code)]
|
||||
Fmt,
|
||||
/// The task should be run on the general high-priority background
|
||||
/// thread. Reserved for actions caused by the user typing (e.g.syntax highlighting).
|
||||
LatencySensitive,
|
||||
/// The task should be run on a regular-priority background thread.
|
||||
/// The default for any request that isn't in the critical path of the user typing.
|
||||
#[default]
|
||||
Worker,
|
||||
}
|
||||
|
||||
/// A [`Task`] is a future that has not yet started, and it is the job of
|
||||
/// the [`super::Scheduler`] to make that happen, via [`super::Scheduler::dispatch`].
|
||||
/// A task can either run on the main thread (in other words, the same thread as the
|
||||
/// scheduler) or it can run in a background thread. The main difference between
|
||||
/// the two is that background threads only have a read-only snapshot of the session,
|
||||
/// while local tasks have exclusive access and can modify it as they please. Keep in mind that
|
||||
/// local tasks will **block** the main event loop, so only use local tasks if you **need**
|
||||
/// mutable state access or you need the absolute lowest latency possible.
|
||||
pub(in crate::server) enum Task<'s> {
|
||||
Background(BackgroundTaskBuilder<'s>),
|
||||
Sync(SyncTask<'s>),
|
||||
}
|
||||
|
||||
// The reason why this isn't just a 'static background closure
|
||||
// is because we need to take a snapshot of the session before sending
|
||||
// this task to the background, and the inner closure can't take the session
|
||||
// as an immutable reference since it's used mutably elsewhere. So instead,
|
||||
// a background task is built using an outer closure that borrows the session to take a snapshot,
|
||||
// that the inner closure can capture. This builder closure has a lifetime linked to the scheduler.
|
||||
// When the task is dispatched, the scheduler runs the synchronous builder, which takes the session
|
||||
// as a reference, to create the inner 'static closure. That closure is then moved to a background task pool.
|
||||
pub(in crate::server) struct BackgroundTaskBuilder<'s> {
|
||||
pub(super) schedule: BackgroundSchedule,
|
||||
pub(super) builder: BackgroundFnBuilder<'s>,
|
||||
}
|
||||
|
||||
pub(in crate::server) struct SyncTask<'s> {
|
||||
pub(super) func: LocalFn<'s>,
|
||||
}
|
||||
|
||||
impl<'s> Task<'s> {
|
||||
/// Creates a new background task.
|
||||
pub(crate) fn background(
|
||||
schedule: BackgroundSchedule,
|
||||
func: impl FnOnce(&Session) -> Box<dyn FnOnce(Notifier, Responder) + Send + 'static> + 's,
|
||||
) -> Self {
|
||||
Self::Background(BackgroundTaskBuilder {
|
||||
schedule,
|
||||
builder: Box::new(func),
|
||||
})
|
||||
}
|
||||
/// Creates a new local task.
|
||||
pub(crate) fn local(
|
||||
func: impl FnOnce(&mut Session, Notifier, &mut Requester, Responder) + 's,
|
||||
) -> Self {
|
||||
Self::Sync(SyncTask {
|
||||
func: Box::new(func),
|
||||
})
|
||||
}
|
||||
/// Creates a local task that immediately
|
||||
/// responds with the provided `request`.
|
||||
pub(crate) fn immediate<R>(id: RequestId, result: crate::server::Result<R>) -> Self
|
||||
where
|
||||
R: Serialize + Send + 'static,
|
||||
{
|
||||
Self::local(move |_, _, _, responder| {
|
||||
if let Err(err) = responder.respond(id, result) {
|
||||
tracing::error!("Unable to send immediate response: {err}");
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Creates a local task that does nothing.
|
||||
pub(crate) fn nothing() -> Self {
|
||||
Self::local(move |_, _, _, _| {})
|
||||
}
|
||||
}
|
109
crates/ty_server/src/server/schedule/thread.rs
Normal file
109
crates/ty_server/src/server/schedule/thread.rs
Normal file
|
@ -0,0 +1,109 @@
|
|||
// +------------------------------------------------------------+
|
||||
// | Code adopted from: |
|
||||
// | Repository: https://github.com/rust-lang/rust-analyzer.git |
|
||||
// | File: `crates/stdx/src/thread.rs` |
|
||||
// | Commit: 03b3cb6be9f21c082f4206b35c7fe7f291c94eaa |
|
||||
// +------------------------------------------------------------+
|
||||
//! A utility module for working with threads that automatically joins threads upon drop
|
||||
//! and abstracts over operating system quality of service (QoS) APIs
|
||||
//! through the concept of a “thread priority”.
|
||||
//!
|
||||
//! The priority of a thread is frozen at thread creation time,
|
||||
//! i.e. there is no API to change the priority of a thread once it has been spawned.
|
||||
//!
|
||||
//! As a system, rust-analyzer should have the property that
|
||||
//! old manual scheduling APIs are replaced entirely by QoS.
|
||||
//! To maintain this invariant, we panic when it is clear that
|
||||
//! old scheduling APIs have been used.
|
||||
//!
|
||||
//! Moreover, we also want to ensure that every thread has an priority set explicitly
|
||||
//! to force a decision about its importance to the system.
|
||||
//! Thus, [`ThreadPriority`] has no default value
|
||||
//! and every entry point to creating a thread requires a [`ThreadPriority`] upfront.
|
||||
|
||||
// Keeps us from getting warnings about the word `QoS`
|
||||
#![allow(clippy::doc_markdown)]
|
||||
|
||||
use std::fmt;
|
||||
|
||||
mod pool;
|
||||
mod priority;
|
||||
|
||||
pub(super) use pool::Pool;
|
||||
pub(super) use priority::ThreadPriority;
|
||||
|
||||
pub(super) struct Builder {
|
||||
priority: ThreadPriority,
|
||||
inner: jod_thread::Builder,
|
||||
}
|
||||
|
||||
impl Builder {
|
||||
pub(super) fn new(priority: ThreadPriority) -> Builder {
|
||||
Builder {
|
||||
priority,
|
||||
inner: jod_thread::Builder::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn name(self, name: String) -> Builder {
|
||||
Builder {
|
||||
inner: self.inner.name(name),
|
||||
..self
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn stack_size(self, size: usize) -> Builder {
|
||||
Builder {
|
||||
inner: self.inner.stack_size(size),
|
||||
..self
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn spawn<F, T>(self, f: F) -> std::io::Result<JoinHandle<T>>
|
||||
where
|
||||
F: FnOnce() -> T,
|
||||
F: Send + 'static,
|
||||
T: Send + 'static,
|
||||
{
|
||||
let inner_handle = self.inner.spawn(move || {
|
||||
self.priority.apply_to_current_thread();
|
||||
f()
|
||||
})?;
|
||||
|
||||
Ok(JoinHandle {
|
||||
inner: Some(inner_handle),
|
||||
allow_leak: false,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct JoinHandle<T = ()> {
|
||||
// `inner` is an `Option` so that we can
|
||||
// take ownership of the contained `JoinHandle`.
|
||||
inner: Option<jod_thread::JoinHandle<T>>,
|
||||
allow_leak: bool,
|
||||
}
|
||||
|
||||
impl<T> JoinHandle<T> {
|
||||
pub(crate) fn join(mut self) -> T {
|
||||
self.inner.take().unwrap().join()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Drop for JoinHandle<T> {
|
||||
fn drop(&mut self) {
|
||||
if !self.allow_leak {
|
||||
return;
|
||||
}
|
||||
|
||||
if let Some(join_handle) = self.inner.take() {
|
||||
join_handle.detach();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> fmt::Debug for JoinHandle<T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.pad("JoinHandle { .. }")
|
||||
}
|
||||
}
|
113
crates/ty_server/src/server/schedule/thread/pool.rs
Normal file
113
crates/ty_server/src/server/schedule/thread/pool.rs
Normal file
|
@ -0,0 +1,113 @@
|
|||
// +------------------------------------------------------------+
|
||||
// | Code adopted from: |
|
||||
// | Repository: https://github.com/rust-lang/rust-analyzer.git |
|
||||
// | File: `crates/stdx/src/thread/pool.rs` |
|
||||
// | Commit: 03b3cb6be9f21c082f4206b35c7fe7f291c94eaa |
|
||||
// +------------------------------------------------------------+
|
||||
//! [`Pool`] implements a basic custom thread pool
|
||||
//! inspired by the [`threadpool` crate](http://docs.rs/threadpool).
|
||||
//! When you spawn a task you specify a thread priority
|
||||
//! so the pool can schedule it to run on a thread with that priority.
|
||||
//! rust-analyzer uses this to prioritize work based on latency requirements.
|
||||
//!
|
||||
//! The thread pool is implemented entirely using
|
||||
//! the threading utilities in [`crate::server::schedule::thread`].
|
||||
|
||||
use std::{
|
||||
num::NonZeroUsize,
|
||||
sync::{
|
||||
atomic::{AtomicUsize, Ordering},
|
||||
Arc,
|
||||
},
|
||||
};
|
||||
|
||||
use crossbeam::channel::{Receiver, Sender};
|
||||
|
||||
use super::{Builder, JoinHandle, ThreadPriority};
|
||||
|
||||
pub(crate) struct Pool {
|
||||
// `_handles` is never read: the field is present
|
||||
// only for its `Drop` impl.
|
||||
|
||||
// The worker threads exit once the channel closes;
|
||||
// make sure to keep `job_sender` above `handles`
|
||||
// so that the channel is actually closed
|
||||
// before we join the worker threads!
|
||||
job_sender: Sender<Job>,
|
||||
_handles: Vec<JoinHandle>,
|
||||
extant_tasks: Arc<AtomicUsize>,
|
||||
}
|
||||
|
||||
struct Job {
|
||||
requested_priority: ThreadPriority,
|
||||
f: Box<dyn FnOnce() + Send + 'static>,
|
||||
}
|
||||
|
||||
impl Pool {
|
||||
pub(crate) fn new(threads: NonZeroUsize) -> Pool {
|
||||
// Override OS defaults to avoid stack overflows on platforms with low stack size defaults.
|
||||
const STACK_SIZE: usize = 2 * 1024 * 1024;
|
||||
const INITIAL_PRIORITY: ThreadPriority = ThreadPriority::Worker;
|
||||
|
||||
let threads = usize::from(threads);
|
||||
|
||||
// Channel buffer capacity is between 2 and 4, depending on the pool size.
|
||||
let (job_sender, job_receiver) = crossbeam::channel::bounded(std::cmp::min(threads * 2, 4));
|
||||
let extant_tasks = Arc::new(AtomicUsize::new(0));
|
||||
|
||||
let mut handles = Vec::with_capacity(threads);
|
||||
for i in 0..threads {
|
||||
let handle = Builder::new(INITIAL_PRIORITY)
|
||||
.stack_size(STACK_SIZE)
|
||||
.name(format!("ruff:worker:{i}"))
|
||||
.spawn({
|
||||
let extant_tasks = Arc::clone(&extant_tasks);
|
||||
let job_receiver: Receiver<Job> = job_receiver.clone();
|
||||
move || {
|
||||
let mut current_priority = INITIAL_PRIORITY;
|
||||
for job in job_receiver {
|
||||
if job.requested_priority != current_priority {
|
||||
job.requested_priority.apply_to_current_thread();
|
||||
current_priority = job.requested_priority;
|
||||
}
|
||||
extant_tasks.fetch_add(1, Ordering::SeqCst);
|
||||
(job.f)();
|
||||
extant_tasks.fetch_sub(1, Ordering::SeqCst);
|
||||
}
|
||||
}
|
||||
})
|
||||
.expect("failed to spawn thread");
|
||||
|
||||
handles.push(handle);
|
||||
}
|
||||
|
||||
Pool {
|
||||
_handles: handles,
|
||||
extant_tasks,
|
||||
job_sender,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn spawn<F>(&self, priority: ThreadPriority, f: F)
|
||||
where
|
||||
F: FnOnce() + Send + 'static,
|
||||
{
|
||||
let f = Box::new(move || {
|
||||
if cfg!(debug_assertions) {
|
||||
priority.assert_is_used_on_current_thread();
|
||||
}
|
||||
f();
|
||||
});
|
||||
|
||||
let job = Job {
|
||||
requested_priority: priority,
|
||||
f,
|
||||
};
|
||||
self.job_sender.send(job).unwrap();
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub(super) fn len(&self) -> usize {
|
||||
self.extant_tasks.load(Ordering::SeqCst)
|
||||
}
|
||||
}
|
297
crates/ty_server/src/server/schedule/thread/priority.rs
Normal file
297
crates/ty_server/src/server/schedule/thread/priority.rs
Normal file
|
@ -0,0 +1,297 @@
|
|||
// +------------------------------------------------------------+
|
||||
// | Code adopted from: |
|
||||
// | Repository: https://github.com/rust-lang/rust-analyzer.git |
|
||||
// | File: `crates/stdx/src/thread/intent.rs` |
|
||||
// | Commit: 03b3cb6be9f21c082f4206b35c7fe7f291c94eaa |
|
||||
// +------------------------------------------------------------+
|
||||
//! An opaque façade around platform-specific QoS APIs.
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
|
||||
// Please maintain order from least to most priority for the derived `Ord` impl.
|
||||
pub(crate) enum ThreadPriority {
|
||||
/// Any thread which does work that isn't in a critical path.
|
||||
Worker,
|
||||
|
||||
/// Any thread which does work caused by the user typing, or
|
||||
/// work that the editor may wait on.
|
||||
LatencySensitive,
|
||||
}
|
||||
|
||||
impl ThreadPriority {
|
||||
// These APIs must remain private;
|
||||
// we only want consumers to set thread priority
|
||||
// during thread creation.
|
||||
|
||||
pub(crate) fn apply_to_current_thread(self) {
|
||||
let class = thread_priority_to_qos_class(self);
|
||||
set_current_thread_qos_class(class);
|
||||
}
|
||||
|
||||
pub(crate) fn assert_is_used_on_current_thread(self) {
|
||||
if IS_QOS_AVAILABLE {
|
||||
let class = thread_priority_to_qos_class(self);
|
||||
assert_eq!(get_current_thread_qos_class(), Some(class));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
use imp::QoSClass;
|
||||
|
||||
const IS_QOS_AVAILABLE: bool = imp::IS_QOS_AVAILABLE;
|
||||
|
||||
fn set_current_thread_qos_class(class: QoSClass) {
|
||||
imp::set_current_thread_qos_class(class);
|
||||
}
|
||||
|
||||
fn get_current_thread_qos_class() -> Option<QoSClass> {
|
||||
imp::get_current_thread_qos_class()
|
||||
}
|
||||
|
||||
fn thread_priority_to_qos_class(priority: ThreadPriority) -> QoSClass {
|
||||
imp::thread_priority_to_qos_class(priority)
|
||||
}
|
||||
|
||||
// All Apple platforms use XNU as their kernel
|
||||
// and thus have the concept of QoS.
|
||||
#[cfg(target_vendor = "apple")]
|
||||
mod imp {
|
||||
use super::ThreadPriority;
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
|
||||
// Please maintain order from least to most priority for the derived `Ord` impl.
|
||||
pub(super) enum QoSClass {
|
||||
// Documentation adapted from https://github.com/apple-oss-distributions/libpthread/blob/67e155c94093be9a204b69637d198eceff2c7c46/include/sys/qos.h#L55
|
||||
//
|
||||
/// TLDR: invisible maintenance tasks
|
||||
///
|
||||
/// Contract:
|
||||
///
|
||||
/// * **You do not care about how long it takes for work to finish.**
|
||||
/// * **You do not care about work being deferred temporarily.**
|
||||
/// (e.g. if the device's battery is in a critical state)
|
||||
///
|
||||
/// Examples:
|
||||
///
|
||||
/// * in a video editor:
|
||||
/// creating periodic backups of project files
|
||||
/// * in a browser:
|
||||
/// cleaning up cached sites which have not been accessed in a long time
|
||||
/// * in a collaborative word processor:
|
||||
/// creating a searchable index of all documents
|
||||
///
|
||||
/// Use this QoS class for background tasks
|
||||
/// which the user did not initiate themselves
|
||||
/// and which are invisible to the user.
|
||||
/// It is expected that this work will take significant time to complete:
|
||||
/// minutes or even hours.
|
||||
///
|
||||
/// This QoS class provides the most energy and thermally-efficient execution possible.
|
||||
/// All other work is prioritized over background tasks.
|
||||
Background,
|
||||
|
||||
/// TLDR: tasks that don't block using your app
|
||||
///
|
||||
/// Contract:
|
||||
///
|
||||
/// * **Your app remains useful even as the task is executing.**
|
||||
///
|
||||
/// Examples:
|
||||
///
|
||||
/// * in a video editor:
|
||||
/// exporting a video to disk -
|
||||
/// the user can still work on the timeline
|
||||
/// * in a browser:
|
||||
/// automatically extracting a downloaded zip file -
|
||||
/// the user can still switch tabs
|
||||
/// * in a collaborative word processor:
|
||||
/// downloading images embedded in a document -
|
||||
/// the user can still make edits
|
||||
///
|
||||
/// Use this QoS class for tasks which
|
||||
/// may or may not be initiated by the user,
|
||||
/// but whose result is visible.
|
||||
/// It is expected that this work will take a few seconds to a few minutes.
|
||||
/// Typically your app will include a progress bar
|
||||
/// for tasks using this class.
|
||||
///
|
||||
/// This QoS class provides a balance between
|
||||
/// performance, responsiveness and efficiency.
|
||||
Utility,
|
||||
|
||||
/// TLDR: tasks that block using your app
|
||||
///
|
||||
/// Contract:
|
||||
///
|
||||
/// * **You need this work to complete
|
||||
/// before the user can keep interacting with your app.**
|
||||
/// * **Your work will not take more than a few seconds to complete.**
|
||||
///
|
||||
/// Examples:
|
||||
///
|
||||
/// * in a video editor:
|
||||
/// opening a saved project
|
||||
/// * in a browser:
|
||||
/// loading a list of the user's bookmarks and top sites
|
||||
/// when a new tab is created
|
||||
/// * in a collaborative word processor:
|
||||
/// running a search on the document's content
|
||||
///
|
||||
/// Use this QoS class for tasks which were initiated by the user
|
||||
/// and block the usage of your app while they are in progress.
|
||||
/// It is expected that this work will take a few seconds or less to complete;
|
||||
/// not long enough to cause the user to switch to something else.
|
||||
/// Your app will likely indicate progress on these tasks
|
||||
/// through the display of placeholder content or modals.
|
||||
///
|
||||
/// This QoS class is not energy-efficient.
|
||||
/// Rather, it provides responsiveness
|
||||
/// by prioritizing work above other tasks on the system
|
||||
/// except for critical user-interactive work.
|
||||
UserInitiated,
|
||||
|
||||
/// TLDR: render loops and nothing else
|
||||
///
|
||||
/// Contract:
|
||||
///
|
||||
/// * **You absolutely need this work to complete immediately
|
||||
/// or your app will appear to freeze.**
|
||||
/// * **Your work will always complete virtually instantaneously.**
|
||||
///
|
||||
/// Examples:
|
||||
///
|
||||
/// * the main thread in a GUI application
|
||||
/// * the update & render loop in a game
|
||||
/// * a secondary thread which progresses an animation
|
||||
///
|
||||
/// Use this QoS class for any work which, if delayed,
|
||||
/// will make your user interface unresponsive.
|
||||
/// It is expected that this work will be virtually instantaneous.
|
||||
///
|
||||
/// This QoS class is not energy-efficient.
|
||||
/// Specifying this class is a request to run with
|
||||
/// nearly all available system CPU and I/O bandwidth even under contention.
|
||||
UserInteractive,
|
||||
}
|
||||
|
||||
pub(super) const IS_QOS_AVAILABLE: bool = true;
|
||||
|
||||
pub(super) fn set_current_thread_qos_class(class: QoSClass) {
|
||||
let c = match class {
|
||||
QoSClass::UserInteractive => libc::qos_class_t::QOS_CLASS_USER_INTERACTIVE,
|
||||
QoSClass::UserInitiated => libc::qos_class_t::QOS_CLASS_USER_INITIATED,
|
||||
QoSClass::Utility => libc::qos_class_t::QOS_CLASS_UTILITY,
|
||||
QoSClass::Background => libc::qos_class_t::QOS_CLASS_BACKGROUND,
|
||||
};
|
||||
|
||||
#[allow(unsafe_code)]
|
||||
let code = unsafe { libc::pthread_set_qos_class_self_np(c, 0) };
|
||||
|
||||
if code == 0 {
|
||||
return;
|
||||
}
|
||||
|
||||
#[allow(unsafe_code)]
|
||||
let errno = unsafe { *libc::__error() };
|
||||
|
||||
match errno {
|
||||
libc::EPERM => {
|
||||
// This thread has been excluded from the QoS system
|
||||
// due to a previous call to a function such as `pthread_setschedparam`
|
||||
// which is incompatible with QoS.
|
||||
//
|
||||
// Panic instead of returning an error
|
||||
// to maintain the invariant that we only use QoS APIs.
|
||||
panic!("tried to set QoS of thread which has opted out of QoS (os error {errno})")
|
||||
}
|
||||
|
||||
libc::EINVAL => {
|
||||
// This is returned if we pass something other than a qos_class_t
|
||||
// to `pthread_set_qos_class_self_np`.
|
||||
//
|
||||
// This is impossible, so again panic.
|
||||
unreachable!(
|
||||
"invalid qos_class_t value was passed to pthread_set_qos_class_self_np"
|
||||
)
|
||||
}
|
||||
|
||||
_ => {
|
||||
// `pthread_set_qos_class_self_np`’s documentation
|
||||
// does not mention any other errors.
|
||||
unreachable!("`pthread_set_qos_class_self_np` returned unexpected error {errno}")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn get_current_thread_qos_class() -> Option<QoSClass> {
|
||||
#[allow(unsafe_code)]
|
||||
let current_thread = unsafe { libc::pthread_self() };
|
||||
let mut qos_class_raw = libc::qos_class_t::QOS_CLASS_UNSPECIFIED;
|
||||
#[allow(unsafe_code)]
|
||||
let code = unsafe {
|
||||
libc::pthread_get_qos_class_np(current_thread, &mut qos_class_raw, std::ptr::null_mut())
|
||||
};
|
||||
|
||||
if code != 0 {
|
||||
// `pthread_get_qos_class_np`’s documentation states that
|
||||
// an error value is placed into errno if the return code is not zero.
|
||||
// However, it never states what errors are possible.
|
||||
// Inspecting the source[0] shows that, as of this writing, it always returns zero.
|
||||
//
|
||||
// Whatever errors the function could report in future are likely to be
|
||||
// ones which we cannot handle anyway
|
||||
//
|
||||
// 0: https://github.com/apple-oss-distributions/libpthread/blob/67e155c94093be9a204b69637d198eceff2c7c46/src/qos.c#L171-L177
|
||||
#[allow(unsafe_code)]
|
||||
let errno = unsafe { *libc::__error() };
|
||||
unreachable!("`pthread_get_qos_class_np` failed unexpectedly (os error {errno})");
|
||||
}
|
||||
|
||||
match qos_class_raw {
|
||||
libc::qos_class_t::QOS_CLASS_USER_INTERACTIVE => Some(QoSClass::UserInteractive),
|
||||
libc::qos_class_t::QOS_CLASS_USER_INITIATED => Some(QoSClass::UserInitiated),
|
||||
libc::qos_class_t::QOS_CLASS_DEFAULT => None, // QoS has never been set
|
||||
libc::qos_class_t::QOS_CLASS_UTILITY => Some(QoSClass::Utility),
|
||||
libc::qos_class_t::QOS_CLASS_BACKGROUND => Some(QoSClass::Background),
|
||||
|
||||
libc::qos_class_t::QOS_CLASS_UNSPECIFIED => {
|
||||
// Using manual scheduling APIs causes threads to “opt out” of QoS.
|
||||
// At this point they become incompatible with QoS,
|
||||
// and as such have the “unspecified” QoS class.
|
||||
//
|
||||
// Panic instead of returning an error
|
||||
// to maintain the invariant that we only use QoS APIs.
|
||||
panic!("tried to get QoS of thread which has opted out of QoS")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn thread_priority_to_qos_class(priority: ThreadPriority) -> QoSClass {
|
||||
match priority {
|
||||
ThreadPriority::Worker => QoSClass::Utility,
|
||||
ThreadPriority::LatencySensitive => QoSClass::UserInitiated,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// FIXME: Windows has QoS APIs, we should use them!
|
||||
#[cfg(not(target_vendor = "apple"))]
|
||||
mod imp {
|
||||
use super::ThreadPriority;
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
|
||||
pub(super) enum QoSClass {
|
||||
Default,
|
||||
}
|
||||
|
||||
pub(super) const IS_QOS_AVAILABLE: bool = false;
|
||||
|
||||
pub(super) fn set_current_thread_qos_class(_: QoSClass) {}
|
||||
|
||||
pub(super) fn get_current_thread_qos_class() -> Option<QoSClass> {
|
||||
None
|
||||
}
|
||||
|
||||
pub(super) fn thread_priority_to_qos_class(_: ThreadPriority) -> QoSClass {
|
||||
QoSClass::Default
|
||||
}
|
||||
}
|
284
crates/ty_server/src/session.rs
Normal file
284
crates/ty_server/src/session.rs
Normal file
|
@ -0,0 +1,284 @@
|
|||
//! Data model, state management, and configuration resolution.
|
||||
|
||||
use std::collections::BTreeMap;
|
||||
use std::ops::{Deref, DerefMut};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::anyhow;
|
||||
use lsp_types::{ClientCapabilities, TextDocumentContentChangeEvent, Url};
|
||||
|
||||
use ruff_db::files::{system_path_to_file, File};
|
||||
use ruff_db::system::SystemPath;
|
||||
use ruff_db::Db;
|
||||
use ty_project::{ProjectDatabase, ProjectMetadata};
|
||||
|
||||
use crate::document::{DocumentKey, DocumentVersion, NotebookDocument};
|
||||
use crate::system::{url_to_any_system_path, AnySystemPath, LSPSystem};
|
||||
use crate::{PositionEncoding, TextDocument};
|
||||
|
||||
pub(crate) use self::capabilities::ResolvedClientCapabilities;
|
||||
pub use self::index::DocumentQuery;
|
||||
pub(crate) use self::settings::AllSettings;
|
||||
pub use self::settings::ClientSettings;
|
||||
|
||||
mod capabilities;
|
||||
pub(crate) mod index;
|
||||
mod settings;
|
||||
|
||||
// TODO(dhruvmanila): In general, the server shouldn't use any salsa queries directly and instead
|
||||
// should use methods on `ProjectDatabase`.
|
||||
|
||||
/// The global state for the LSP
|
||||
pub struct Session {
|
||||
/// Used to retrieve information about open documents and settings.
|
||||
///
|
||||
/// This will be [`None`] when a mutable reference is held to the index via [`index_mut`]
|
||||
/// to prevent the index from being accessed while it is being modified. It will be restored
|
||||
/// when the mutable reference ([`MutIndexGuard`]) is dropped.
|
||||
///
|
||||
/// [`index_mut`]: Session::index_mut
|
||||
index: Option<Arc<index::Index>>,
|
||||
|
||||
/// Maps workspace folders to their respective project databases.
|
||||
projects_by_workspace_folder: BTreeMap<PathBuf, ProjectDatabase>,
|
||||
|
||||
/// The global position encoding, negotiated during LSP initialization.
|
||||
position_encoding: PositionEncoding,
|
||||
/// Tracks what LSP features the client supports and doesn't support.
|
||||
resolved_client_capabilities: Arc<ResolvedClientCapabilities>,
|
||||
}
|
||||
|
||||
impl Session {
|
||||
pub fn new(
|
||||
client_capabilities: &ClientCapabilities,
|
||||
position_encoding: PositionEncoding,
|
||||
global_settings: ClientSettings,
|
||||
workspace_folders: &[(Url, ClientSettings)],
|
||||
) -> crate::Result<Self> {
|
||||
let mut workspaces = BTreeMap::new();
|
||||
let index = Arc::new(index::Index::new(global_settings));
|
||||
|
||||
for (url, _) in workspace_folders {
|
||||
let path = url
|
||||
.to_file_path()
|
||||
.map_err(|()| anyhow!("Workspace URL is not a file or directory: {:?}", url))?;
|
||||
let system_path = SystemPath::from_std_path(&path)
|
||||
.ok_or_else(|| anyhow!("Workspace path is not a valid UTF-8 path: {:?}", path))?;
|
||||
let system = LSPSystem::new(index.clone());
|
||||
|
||||
// TODO(dhruvmanila): Get the values from the client settings
|
||||
let mut metadata = ProjectMetadata::discover(system_path, &system)?;
|
||||
metadata.apply_configuration_files(&system)?;
|
||||
|
||||
// TODO(micha): Handle the case where the program settings are incorrect more gracefully.
|
||||
workspaces.insert(path, ProjectDatabase::new(metadata, system)?);
|
||||
}
|
||||
|
||||
Ok(Self {
|
||||
position_encoding,
|
||||
projects_by_workspace_folder: workspaces,
|
||||
index: Some(index),
|
||||
resolved_client_capabilities: Arc::new(ResolvedClientCapabilities::new(
|
||||
client_capabilities,
|
||||
)),
|
||||
})
|
||||
}
|
||||
|
||||
// TODO(dhruvmanila): Ideally, we should have a single method for `workspace_db_for_path_mut`
|
||||
// and `default_workspace_db_mut` but the borrow checker doesn't allow that.
|
||||
// https://github.com/astral-sh/ruff/pull/13041#discussion_r1726725437
|
||||
|
||||
/// Returns a reference to the project's [`ProjectDatabase`] corresponding to the given path, if
|
||||
/// any.
|
||||
pub(crate) fn project_db_for_path(&self, path: impl AsRef<Path>) -> Option<&ProjectDatabase> {
|
||||
self.projects_by_workspace_folder
|
||||
.range(..=path.as_ref().to_path_buf())
|
||||
.next_back()
|
||||
.map(|(_, db)| db)
|
||||
}
|
||||
|
||||
/// Returns a mutable reference to the project [`ProjectDatabase`] corresponding to the given
|
||||
/// path, if any.
|
||||
pub(crate) fn project_db_for_path_mut(
|
||||
&mut self,
|
||||
path: impl AsRef<Path>,
|
||||
) -> Option<&mut ProjectDatabase> {
|
||||
self.projects_by_workspace_folder
|
||||
.range_mut(..=path.as_ref().to_path_buf())
|
||||
.next_back()
|
||||
.map(|(_, db)| db)
|
||||
}
|
||||
|
||||
/// Returns a reference to the default project [`ProjectDatabase`]. The default project is the
|
||||
/// minimum root path in the project map.
|
||||
pub(crate) fn default_project_db(&self) -> &ProjectDatabase {
|
||||
// SAFETY: Currently, ty only support a single project.
|
||||
self.projects_by_workspace_folder.values().next().unwrap()
|
||||
}
|
||||
|
||||
/// Returns a mutable reference to the default project [`ProjectDatabase`].
|
||||
pub(crate) fn default_project_db_mut(&mut self) -> &mut ProjectDatabase {
|
||||
// SAFETY: Currently, ty only support a single project.
|
||||
self.projects_by_workspace_folder
|
||||
.values_mut()
|
||||
.next()
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
pub fn key_from_url(&self, url: Url) -> DocumentKey {
|
||||
self.index().key_from_url(url)
|
||||
}
|
||||
|
||||
/// Creates a document snapshot with the URL referencing the document to snapshot.
|
||||
pub fn take_snapshot(&self, url: Url) -> Option<DocumentSnapshot> {
|
||||
let key = self.key_from_url(url);
|
||||
Some(DocumentSnapshot {
|
||||
resolved_client_capabilities: self.resolved_client_capabilities.clone(),
|
||||
document_ref: self.index().make_document_ref(key)?,
|
||||
position_encoding: self.position_encoding,
|
||||
})
|
||||
}
|
||||
|
||||
/// Registers a notebook document at the provided `url`.
|
||||
/// If a document is already open here, it will be overwritten.
|
||||
pub fn open_notebook_document(&mut self, url: Url, document: NotebookDocument) {
|
||||
self.index_mut().open_notebook_document(url, document);
|
||||
}
|
||||
|
||||
/// Registers a text document at the provided `url`.
|
||||
/// If a document is already open here, it will be overwritten.
|
||||
pub(crate) fn open_text_document(&mut self, url: Url, document: TextDocument) {
|
||||
self.index_mut().open_text_document(url, document);
|
||||
}
|
||||
|
||||
/// Updates a text document at the associated `key`.
|
||||
///
|
||||
/// The document key must point to a text document, or this will throw an error.
|
||||
pub(crate) fn update_text_document(
|
||||
&mut self,
|
||||
key: &DocumentKey,
|
||||
content_changes: Vec<TextDocumentContentChangeEvent>,
|
||||
new_version: DocumentVersion,
|
||||
) -> crate::Result<()> {
|
||||
let position_encoding = self.position_encoding;
|
||||
self.index_mut()
|
||||
.update_text_document(key, content_changes, new_version, position_encoding)
|
||||
}
|
||||
|
||||
/// De-registers a document, specified by its key.
|
||||
/// Calling this multiple times for the same document is a logic error.
|
||||
pub(crate) fn close_document(&mut self, key: &DocumentKey) -> crate::Result<()> {
|
||||
self.index_mut().close_document(key)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns a reference to the index.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// Panics if there's a mutable reference to the index via [`index_mut`].
|
||||
///
|
||||
/// [`index_mut`]: Session::index_mut
|
||||
fn index(&self) -> &index::Index {
|
||||
self.index.as_ref().unwrap()
|
||||
}
|
||||
|
||||
/// Returns a mutable reference to the index.
|
||||
///
|
||||
/// This method drops all references to the index and returns a guard that will restore the
|
||||
/// references when dropped. This guard holds the only reference to the index and allows
|
||||
/// modifying it.
|
||||
fn index_mut(&mut self) -> MutIndexGuard {
|
||||
let index = self.index.take().unwrap();
|
||||
|
||||
for db in self.projects_by_workspace_folder.values_mut() {
|
||||
// Remove the `index` from each database. This drops the count of `Arc<Index>` down to 1
|
||||
db.system_mut()
|
||||
.as_any_mut()
|
||||
.downcast_mut::<LSPSystem>()
|
||||
.unwrap()
|
||||
.take_index();
|
||||
}
|
||||
|
||||
// There should now be exactly one reference to index which is self.index.
|
||||
let index = Arc::into_inner(index);
|
||||
|
||||
MutIndexGuard {
|
||||
session: self,
|
||||
index,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A guard that holds the only reference to the index and allows modifying it.
|
||||
///
|
||||
/// When dropped, this guard restores all references to the index.
|
||||
struct MutIndexGuard<'a> {
|
||||
session: &'a mut Session,
|
||||
index: Option<index::Index>,
|
||||
}
|
||||
|
||||
impl Deref for MutIndexGuard<'_> {
|
||||
type Target = index::Index;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
self.index.as_ref().unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
impl DerefMut for MutIndexGuard<'_> {
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
self.index.as_mut().unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for MutIndexGuard<'_> {
|
||||
fn drop(&mut self) {
|
||||
if let Some(index) = self.index.take() {
|
||||
let index = Arc::new(index);
|
||||
for db in self.session.projects_by_workspace_folder.values_mut() {
|
||||
db.system_mut()
|
||||
.as_any_mut()
|
||||
.downcast_mut::<LSPSystem>()
|
||||
.unwrap()
|
||||
.set_index(index.clone());
|
||||
}
|
||||
|
||||
self.session.index = Some(index);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// An immutable snapshot of `Session` that references
|
||||
/// a specific document.
|
||||
#[derive(Debug)]
|
||||
pub struct DocumentSnapshot {
|
||||
resolved_client_capabilities: Arc<ResolvedClientCapabilities>,
|
||||
document_ref: index::DocumentQuery,
|
||||
position_encoding: PositionEncoding,
|
||||
}
|
||||
|
||||
impl DocumentSnapshot {
|
||||
pub(crate) fn resolved_client_capabilities(&self) -> &ResolvedClientCapabilities {
|
||||
&self.resolved_client_capabilities
|
||||
}
|
||||
|
||||
pub fn query(&self) -> &index::DocumentQuery {
|
||||
&self.document_ref
|
||||
}
|
||||
|
||||
pub(crate) fn encoding(&self) -> PositionEncoding {
|
||||
self.position_encoding
|
||||
}
|
||||
|
||||
pub(crate) fn file(&self, db: &dyn Db) -> Option<File> {
|
||||
match url_to_any_system_path(self.document_ref.file_url()).ok()? {
|
||||
AnySystemPath::System(path) => system_path_to_file(db, path).ok(),
|
||||
AnySystemPath::SystemVirtual(virtual_path) => db
|
||||
.files()
|
||||
.try_virtual_file(&virtual_path)
|
||||
.map(|virtual_file| virtual_file.file()),
|
||||
}
|
||||
}
|
||||
}
|
95
crates/ty_server/src/session/capabilities.rs
Normal file
95
crates/ty_server/src/session/capabilities.rs
Normal file
|
@ -0,0 +1,95 @@
|
|||
use lsp_types::{ClientCapabilities, MarkupKind};
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Default)]
|
||||
#[allow(clippy::struct_excessive_bools)]
|
||||
pub(crate) struct ResolvedClientCapabilities {
|
||||
pub(crate) code_action_deferred_edit_resolution: bool,
|
||||
pub(crate) apply_edit: bool,
|
||||
pub(crate) document_changes: bool,
|
||||
pub(crate) workspace_refresh: bool,
|
||||
pub(crate) pull_diagnostics: bool,
|
||||
/// Whether `textDocument.typeDefinition.linkSupport` is `true`
|
||||
pub(crate) type_definition_link_support: bool,
|
||||
|
||||
/// `true`, if the first markup kind in `textDocument.hover.contentFormat` is `Markdown`
|
||||
pub(crate) hover_prefer_markdown: bool,
|
||||
}
|
||||
|
||||
impl ResolvedClientCapabilities {
|
||||
pub(super) fn new(client_capabilities: &ClientCapabilities) -> Self {
|
||||
let code_action_settings = client_capabilities
|
||||
.text_document
|
||||
.as_ref()
|
||||
.and_then(|doc_settings| doc_settings.code_action.as_ref());
|
||||
let code_action_data_support = code_action_settings
|
||||
.and_then(|code_action_settings| code_action_settings.data_support)
|
||||
.unwrap_or_default();
|
||||
let code_action_edit_resolution = code_action_settings
|
||||
.and_then(|code_action_settings| code_action_settings.resolve_support.as_ref())
|
||||
.is_some_and(|resolve_support| resolve_support.properties.contains(&"edit".into()));
|
||||
|
||||
let apply_edit = client_capabilities
|
||||
.workspace
|
||||
.as_ref()
|
||||
.and_then(|workspace| workspace.apply_edit)
|
||||
.unwrap_or_default();
|
||||
|
||||
let document_changes = client_capabilities
|
||||
.workspace
|
||||
.as_ref()
|
||||
.and_then(|workspace| workspace.workspace_edit.as_ref())
|
||||
.and_then(|workspace_edit| workspace_edit.document_changes)
|
||||
.unwrap_or_default();
|
||||
|
||||
let declaration_link_support = client_capabilities
|
||||
.text_document
|
||||
.as_ref()
|
||||
.and_then(|document| document.type_definition?.link_support)
|
||||
.unwrap_or_default();
|
||||
|
||||
let workspace_refresh = true;
|
||||
|
||||
// TODO(jane): Once the bug involving workspace.diagnostic(s) deserialization has been fixed,
|
||||
// uncomment this.
|
||||
/*
|
||||
let workspace_refresh = client_capabilities
|
||||
.workspace
|
||||
.as_ref()
|
||||
.and_then(|workspace| workspace.diagnostic.as_ref())
|
||||
.and_then(|diagnostic| diagnostic.refresh_support)
|
||||
.unwrap_or_default();
|
||||
*/
|
||||
|
||||
let pull_diagnostics = client_capabilities
|
||||
.text_document
|
||||
.as_ref()
|
||||
.and_then(|text_document| text_document.diagnostic.as_ref())
|
||||
.is_some();
|
||||
|
||||
let hover_prefer_markdown = client_capabilities
|
||||
.text_document
|
||||
.as_ref()
|
||||
.and_then(|text_document| {
|
||||
Some(
|
||||
text_document
|
||||
.hover
|
||||
.as_ref()?
|
||||
.content_format
|
||||
.as_ref()?
|
||||
.contains(&MarkupKind::Markdown),
|
||||
)
|
||||
})
|
||||
.unwrap_or_default();
|
||||
|
||||
Self {
|
||||
code_action_deferred_edit_resolution: code_action_data_support
|
||||
&& code_action_edit_resolution,
|
||||
apply_edit,
|
||||
document_changes,
|
||||
workspace_refresh,
|
||||
pull_diagnostics,
|
||||
type_definition_link_support: declaration_link_support,
|
||||
hover_prefer_markdown,
|
||||
}
|
||||
}
|
||||
}
|
319
crates/ty_server/src/session/index.rs
Normal file
319
crates/ty_server/src/session/index.rs
Normal file
|
@ -0,0 +1,319 @@
|
|||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
|
||||
use lsp_types::Url;
|
||||
use rustc_hash::FxHashMap;
|
||||
|
||||
use crate::{
|
||||
document::{DocumentKey, DocumentVersion, NotebookDocument},
|
||||
PositionEncoding, TextDocument,
|
||||
};
|
||||
|
||||
use super::ClientSettings;
|
||||
|
||||
/// Stores and tracks all open documents in a session, along with their associated settings.
|
||||
#[derive(Default, Debug)]
|
||||
pub(crate) struct Index {
|
||||
/// Maps all document file URLs to the associated document controller
|
||||
documents: FxHashMap<Url, DocumentController>,
|
||||
|
||||
/// Maps opaque cell URLs to a notebook URL (document)
|
||||
notebook_cells: FxHashMap<Url, Url>,
|
||||
|
||||
/// Global settings provided by the client.
|
||||
#[expect(dead_code)]
|
||||
global_settings: ClientSettings,
|
||||
}
|
||||
|
||||
impl Index {
|
||||
pub(super) fn new(global_settings: ClientSettings) -> Self {
|
||||
Self {
|
||||
documents: FxHashMap::default(),
|
||||
notebook_cells: FxHashMap::default(),
|
||||
global_settings,
|
||||
}
|
||||
}
|
||||
|
||||
#[expect(dead_code)]
|
||||
pub(super) fn text_document_urls(&self) -> impl Iterator<Item = &Url> + '_ {
|
||||
self.documents
|
||||
.iter()
|
||||
.filter_map(|(url, doc)| doc.as_text().and(Some(url)))
|
||||
}
|
||||
|
||||
#[expect(dead_code)]
|
||||
pub(super) fn notebook_document_urls(&self) -> impl Iterator<Item = &Url> + '_ {
|
||||
self.documents
|
||||
.iter()
|
||||
.filter(|(_, doc)| doc.as_notebook().is_some())
|
||||
.map(|(url, _)| url)
|
||||
}
|
||||
|
||||
pub(super) fn update_text_document(
|
||||
&mut self,
|
||||
key: &DocumentKey,
|
||||
content_changes: Vec<lsp_types::TextDocumentContentChangeEvent>,
|
||||
new_version: DocumentVersion,
|
||||
encoding: PositionEncoding,
|
||||
) -> crate::Result<()> {
|
||||
let controller = self.document_controller_for_key(key)?;
|
||||
let Some(document) = controller.as_text_mut() else {
|
||||
anyhow::bail!("Text document URI does not point to a text document");
|
||||
};
|
||||
|
||||
if content_changes.is_empty() {
|
||||
document.update_version(new_version);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
document.apply_changes(content_changes, new_version, encoding);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) fn key_from_url(&self, url: Url) -> DocumentKey {
|
||||
if self.notebook_cells.contains_key(&url) {
|
||||
DocumentKey::NotebookCell(url)
|
||||
} else if Path::new(url.path())
|
||||
.extension()
|
||||
.is_some_and(|ext| ext.eq_ignore_ascii_case("ipynb"))
|
||||
{
|
||||
DocumentKey::Notebook(url)
|
||||
} else {
|
||||
DocumentKey::Text(url)
|
||||
}
|
||||
}
|
||||
|
||||
#[expect(dead_code)]
|
||||
pub(super) fn update_notebook_document(
|
||||
&mut self,
|
||||
key: &DocumentKey,
|
||||
cells: Option<lsp_types::NotebookDocumentCellChange>,
|
||||
metadata: Option<serde_json::Map<String, serde_json::Value>>,
|
||||
new_version: DocumentVersion,
|
||||
encoding: PositionEncoding,
|
||||
) -> crate::Result<()> {
|
||||
// update notebook cell index
|
||||
if let Some(lsp_types::NotebookDocumentCellChangeStructure {
|
||||
did_open: Some(did_open),
|
||||
..
|
||||
}) = cells.as_ref().and_then(|cells| cells.structure.as_ref())
|
||||
{
|
||||
let Some(path) = self.url_for_key(key).cloned() else {
|
||||
anyhow::bail!("Tried to open unavailable document `{key}`");
|
||||
};
|
||||
|
||||
for opened_cell in did_open {
|
||||
self.notebook_cells
|
||||
.insert(opened_cell.uri.clone(), path.clone());
|
||||
}
|
||||
// deleted notebook cells are closed via textDocument/didClose - we don't close them here.
|
||||
}
|
||||
|
||||
let controller = self.document_controller_for_key(key)?;
|
||||
let Some(notebook) = controller.as_notebook_mut() else {
|
||||
anyhow::bail!("Notebook document URI does not point to a notebook document");
|
||||
};
|
||||
|
||||
notebook.update(cells, metadata, new_version, encoding)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) fn make_document_ref(&self, key: DocumentKey) -> Option<DocumentQuery> {
|
||||
let url = self.url_for_key(&key)?.clone();
|
||||
let controller = self.documents.get(&url)?;
|
||||
let cell_url = match key {
|
||||
DocumentKey::NotebookCell(cell_url) => Some(cell_url),
|
||||
_ => None,
|
||||
};
|
||||
Some(controller.make_ref(cell_url, url))
|
||||
}
|
||||
|
||||
pub(super) fn open_text_document(&mut self, url: Url, document: TextDocument) {
|
||||
self.documents
|
||||
.insert(url, DocumentController::new_text(document));
|
||||
}
|
||||
|
||||
pub(super) fn open_notebook_document(&mut self, notebook_url: Url, document: NotebookDocument) {
|
||||
for cell_url in document.urls() {
|
||||
self.notebook_cells
|
||||
.insert(cell_url.clone(), notebook_url.clone());
|
||||
}
|
||||
self.documents
|
||||
.insert(notebook_url, DocumentController::new_notebook(document));
|
||||
}
|
||||
|
||||
pub(super) fn close_document(&mut self, key: &DocumentKey) -> crate::Result<()> {
|
||||
// Notebook cells URIs are removed from the index here, instead of during
|
||||
// `update_notebook_document`. This is because a notebook cell, as a text document,
|
||||
// is requested to be `closed` by VS Code after the notebook gets updated.
|
||||
// This is not documented in the LSP specification explicitly, and this assumption
|
||||
// may need revisiting in the future as we support more editors with notebook support.
|
||||
if let DocumentKey::NotebookCell(uri) = key {
|
||||
if self.notebook_cells.remove(uri).is_none() {
|
||||
tracing::warn!("Tried to remove a notebook cell that does not exist: {uri}",);
|
||||
}
|
||||
return Ok(());
|
||||
}
|
||||
let Some(url) = self.url_for_key(key).cloned() else {
|
||||
anyhow::bail!("Tried to close unavailable document `{key}`");
|
||||
};
|
||||
|
||||
let Some(_) = self.documents.remove(&url) else {
|
||||
anyhow::bail!("tried to close document that didn't exist at {}", url)
|
||||
};
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn document_controller_for_key(
|
||||
&mut self,
|
||||
key: &DocumentKey,
|
||||
) -> crate::Result<&mut DocumentController> {
|
||||
let Some(url) = self.url_for_key(key).cloned() else {
|
||||
anyhow::bail!("Tried to open unavailable document `{key}`");
|
||||
};
|
||||
let Some(controller) = self.documents.get_mut(&url) else {
|
||||
anyhow::bail!("Document controller not available at `{}`", url);
|
||||
};
|
||||
Ok(controller)
|
||||
}
|
||||
|
||||
fn url_for_key<'a>(&'a self, key: &'a DocumentKey) -> Option<&'a Url> {
|
||||
match key {
|
||||
DocumentKey::Notebook(path) | DocumentKey::Text(path) => Some(path),
|
||||
DocumentKey::NotebookCell(uri) => self.notebook_cells.get(uri),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A mutable handler to an underlying document.
|
||||
#[derive(Debug)]
|
||||
enum DocumentController {
|
||||
Text(Arc<TextDocument>),
|
||||
Notebook(Arc<NotebookDocument>),
|
||||
}
|
||||
|
||||
impl DocumentController {
|
||||
fn new_text(document: TextDocument) -> Self {
|
||||
Self::Text(Arc::new(document))
|
||||
}
|
||||
|
||||
fn new_notebook(document: NotebookDocument) -> Self {
|
||||
Self::Notebook(Arc::new(document))
|
||||
}
|
||||
|
||||
fn make_ref(&self, cell_url: Option<Url>, file_url: Url) -> DocumentQuery {
|
||||
match &self {
|
||||
Self::Notebook(notebook) => DocumentQuery::Notebook {
|
||||
cell_url,
|
||||
file_url,
|
||||
notebook: notebook.clone(),
|
||||
},
|
||||
Self::Text(document) => DocumentQuery::Text {
|
||||
file_url,
|
||||
document: document.clone(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn as_notebook_mut(&mut self) -> Option<&mut NotebookDocument> {
|
||||
Some(match self {
|
||||
Self::Notebook(notebook) => Arc::make_mut(notebook),
|
||||
Self::Text(_) => return None,
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) fn as_notebook(&self) -> Option<&NotebookDocument> {
|
||||
match self {
|
||||
Self::Notebook(notebook) => Some(notebook),
|
||||
Self::Text(_) => None,
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub(crate) fn as_text(&self) -> Option<&TextDocument> {
|
||||
match self {
|
||||
Self::Text(document) => Some(document),
|
||||
Self::Notebook(_) => None,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn as_text_mut(&mut self) -> Option<&mut TextDocument> {
|
||||
Some(match self {
|
||||
Self::Text(document) => Arc::make_mut(document),
|
||||
Self::Notebook(_) => return None,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// A read-only query to an open document.
|
||||
/// This query can 'select' a text document, full notebook, or a specific notebook cell.
|
||||
/// It also includes document settings.
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum DocumentQuery {
|
||||
Text {
|
||||
file_url: Url,
|
||||
document: Arc<TextDocument>,
|
||||
},
|
||||
Notebook {
|
||||
/// The selected notebook cell, if it exists.
|
||||
cell_url: Option<Url>,
|
||||
/// The URL of the notebook.
|
||||
file_url: Url,
|
||||
notebook: Arc<NotebookDocument>,
|
||||
},
|
||||
}
|
||||
|
||||
impl DocumentQuery {
|
||||
/// Retrieve the original key that describes this document query.
|
||||
#[expect(dead_code)]
|
||||
pub(crate) fn make_key(&self) -> DocumentKey {
|
||||
match self {
|
||||
Self::Text { file_url, .. } => DocumentKey::Text(file_url.clone()),
|
||||
Self::Notebook {
|
||||
cell_url: Some(cell_uri),
|
||||
..
|
||||
} => DocumentKey::NotebookCell(cell_uri.clone()),
|
||||
Self::Notebook { file_url, .. } => DocumentKey::Notebook(file_url.clone()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Attempts to access the underlying notebook document that this query is selecting.
|
||||
pub fn as_notebook(&self) -> Option<&NotebookDocument> {
|
||||
match self {
|
||||
Self::Notebook { notebook, .. } => Some(notebook),
|
||||
Self::Text { .. } => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the version of document selected by this query.
|
||||
pub(crate) fn version(&self) -> DocumentVersion {
|
||||
match self {
|
||||
Self::Text { document, .. } => document.version(),
|
||||
Self::Notebook { notebook, .. } => notebook.version(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the URL for the document selected by this query.
|
||||
pub(crate) fn file_url(&self) -> &Url {
|
||||
match self {
|
||||
Self::Text { file_url, .. } | Self::Notebook { file_url, .. } => file_url,
|
||||
}
|
||||
}
|
||||
|
||||
/// Attempt to access the single inner text document selected by the query.
|
||||
/// If this query is selecting an entire notebook document, this will return `None`.
|
||||
#[expect(dead_code)]
|
||||
pub(crate) fn as_single_document(&self) -> Option<&TextDocument> {
|
||||
match self {
|
||||
Self::Text { document, .. } => Some(document),
|
||||
Self::Notebook {
|
||||
notebook,
|
||||
cell_url: cell_uri,
|
||||
..
|
||||
} => cell_uri
|
||||
.as_ref()
|
||||
.and_then(|cell_uri| notebook.cell_document_by_uri(cell_uri)),
|
||||
}
|
||||
}
|
||||
}
|
111
crates/ty_server/src/session/settings.rs
Normal file
111
crates/ty_server/src/session/settings.rs
Normal file
|
@ -0,0 +1,111 @@
|
|||
use std::path::PathBuf;
|
||||
|
||||
use lsp_types::Url;
|
||||
use rustc_hash::FxHashMap;
|
||||
use serde::Deserialize;
|
||||
|
||||
/// Maps a workspace URI to its associated client settings. Used during server initialization.
|
||||
pub(crate) type WorkspaceSettingsMap = FxHashMap<Url, ClientSettings>;
|
||||
|
||||
/// This is a direct representation of the settings schema sent by the client.
|
||||
#[derive(Debug, Deserialize, Default)]
|
||||
#[cfg_attr(test, derive(PartialEq, Eq))]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ClientSettings {
|
||||
// These settings are only needed for tracing, and are only read from the global configuration.
|
||||
// These will not be in the resolved settings.
|
||||
#[serde(flatten)]
|
||||
pub(crate) tracing: TracingSettings,
|
||||
}
|
||||
|
||||
/// Settings needed to initialize tracing. These will only be
|
||||
/// read from the global configuration.
|
||||
#[derive(Debug, Deserialize, Default)]
|
||||
#[cfg_attr(test, derive(PartialEq, Eq))]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub(crate) struct TracingSettings {
|
||||
pub(crate) log_level: Option<crate::logging::LogLevel>,
|
||||
/// Path to the log file - tildes and environment variables are supported.
|
||||
pub(crate) log_file: Option<PathBuf>,
|
||||
}
|
||||
|
||||
/// This is a direct representation of the workspace settings schema,
|
||||
/// which inherits the schema of [`ClientSettings`] and adds extra fields
|
||||
/// to describe the workspace it applies to.
|
||||
#[derive(Debug, Deserialize)]
|
||||
#[cfg_attr(test, derive(PartialEq, Eq))]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct WorkspaceSettings {
|
||||
#[serde(flatten)]
|
||||
settings: ClientSettings,
|
||||
workspace: Url,
|
||||
}
|
||||
|
||||
/// This is the exact schema for initialization options sent in by the client
|
||||
/// during initialization.
|
||||
#[derive(Debug, Deserialize)]
|
||||
#[cfg_attr(test, derive(PartialEq, Eq))]
|
||||
#[serde(untagged)]
|
||||
enum InitializationOptions {
|
||||
#[serde(rename_all = "camelCase")]
|
||||
HasWorkspaces {
|
||||
global_settings: ClientSettings,
|
||||
#[serde(rename = "settings")]
|
||||
workspace_settings: Vec<WorkspaceSettings>,
|
||||
},
|
||||
GlobalOnly {
|
||||
#[serde(default)]
|
||||
settings: ClientSettings,
|
||||
},
|
||||
}
|
||||
|
||||
/// Built from the initialization options provided by the client.
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct AllSettings {
|
||||
pub(crate) global_settings: ClientSettings,
|
||||
/// If this is `None`, the client only passed in global settings.
|
||||
pub(crate) workspace_settings: Option<WorkspaceSettingsMap>,
|
||||
}
|
||||
|
||||
impl AllSettings {
|
||||
/// Initializes the controller from the serialized initialization options.
|
||||
/// This fails if `options` are not valid initialization options.
|
||||
pub(crate) fn from_value(options: serde_json::Value) -> Self {
|
||||
Self::from_init_options(
|
||||
serde_json::from_value(options)
|
||||
.map_err(|err| {
|
||||
tracing::error!("Failed to deserialize initialization options: {err}. Falling back to default client settings...");
|
||||
show_err_msg!("Ruff received invalid client settings - falling back to default client settings.");
|
||||
})
|
||||
.unwrap_or_default(),
|
||||
)
|
||||
}
|
||||
|
||||
fn from_init_options(options: InitializationOptions) -> Self {
|
||||
let (global_settings, workspace_settings) = match options {
|
||||
InitializationOptions::GlobalOnly { settings } => (settings, None),
|
||||
InitializationOptions::HasWorkspaces {
|
||||
global_settings,
|
||||
workspace_settings,
|
||||
} => (global_settings, Some(workspace_settings)),
|
||||
};
|
||||
|
||||
Self {
|
||||
global_settings,
|
||||
workspace_settings: workspace_settings.map(|workspace_settings| {
|
||||
workspace_settings
|
||||
.into_iter()
|
||||
.map(|settings| (settings.workspace, settings.settings))
|
||||
.collect()
|
||||
}),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for InitializationOptions {
|
||||
fn default() -> Self {
|
||||
Self::GlobalOnly {
|
||||
settings: ClientSettings::default(),
|
||||
}
|
||||
}
|
||||
}
|
263
crates/ty_server/src/system.rs
Normal file
263
crates/ty_server/src/system.rs
Normal file
|
@ -0,0 +1,263 @@
|
|||
use std::any::Any;
|
||||
use std::fmt::Display;
|
||||
use std::sync::Arc;
|
||||
|
||||
use lsp_types::Url;
|
||||
use ruff_db::file_revision::FileRevision;
|
||||
use ruff_db::files::{File, FilePath};
|
||||
use ruff_db::system::walk_directory::WalkDirectoryBuilder;
|
||||
use ruff_db::system::{
|
||||
CaseSensitivity, DirectoryEntry, FileType, GlobError, Metadata, OsSystem, PatternError, Result,
|
||||
System, SystemPath, SystemPathBuf, SystemVirtualPath, SystemVirtualPathBuf,
|
||||
};
|
||||
use ruff_notebook::{Notebook, NotebookError};
|
||||
use ty_python_semantic::Db;
|
||||
|
||||
use crate::session::index::Index;
|
||||
use crate::DocumentQuery;
|
||||
|
||||
/// Converts the given [`Url`] to an [`AnySystemPath`].
|
||||
///
|
||||
/// If the URL scheme is `file`, then the path is converted to a [`SystemPathBuf`]. Otherwise, the
|
||||
/// URL is converted to a [`SystemVirtualPathBuf`].
|
||||
///
|
||||
/// This fails in the following cases:
|
||||
/// * The URL cannot be converted to a file path (refer to [`Url::to_file_path`]).
|
||||
/// * If the URL is not a valid UTF-8 string.
|
||||
pub(crate) fn url_to_any_system_path(url: &Url) -> std::result::Result<AnySystemPath, ()> {
|
||||
if url.scheme() == "file" {
|
||||
Ok(AnySystemPath::System(
|
||||
SystemPathBuf::from_path_buf(url.to_file_path()?).map_err(|_| ())?,
|
||||
))
|
||||
} else {
|
||||
Ok(AnySystemPath::SystemVirtual(
|
||||
SystemVirtualPath::new(url.as_str()).to_path_buf(),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn file_to_url(db: &dyn Db, file: File) -> Option<Url> {
|
||||
match file.path(db) {
|
||||
FilePath::System(system) => Url::from_file_path(system.as_std_path()).ok(),
|
||||
FilePath::SystemVirtual(path) => Url::parse(path.as_str()).ok(),
|
||||
// TODO: Not yet supported, consider an approach similar to Sorbet's custom paths
|
||||
// https://sorbet.org/docs/sorbet-uris
|
||||
FilePath::Vendored(_) => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Represents either a [`SystemPath`] or a [`SystemVirtualPath`].
|
||||
#[derive(Debug)]
|
||||
pub(crate) enum AnySystemPath {
|
||||
System(SystemPathBuf),
|
||||
SystemVirtual(SystemVirtualPathBuf),
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct LSPSystem {
|
||||
/// A read-only copy of the index where the server stores all the open documents and settings.
|
||||
///
|
||||
/// This will be [`None`] when a mutable reference is held to the index via [`index_mut`]
|
||||
/// method to prevent the index from being accessed while it is being modified. It will be
|
||||
/// restored when the mutable reference is dropped.
|
||||
///
|
||||
/// [`index_mut`]: crate::Session::index_mut
|
||||
index: Option<Arc<Index>>,
|
||||
|
||||
/// A system implementation that uses the local file system.
|
||||
os_system: OsSystem,
|
||||
}
|
||||
|
||||
impl LSPSystem {
|
||||
pub(crate) fn new(index: Arc<Index>) -> Self {
|
||||
let cwd = std::env::current_dir().unwrap();
|
||||
let os_system = OsSystem::new(SystemPathBuf::from_path_buf(cwd).unwrap());
|
||||
|
||||
Self {
|
||||
index: Some(index),
|
||||
os_system,
|
||||
}
|
||||
}
|
||||
|
||||
/// Takes the index out of the system.
|
||||
pub(crate) fn take_index(&mut self) -> Option<Arc<Index>> {
|
||||
self.index.take()
|
||||
}
|
||||
|
||||
/// Sets the index for the system.
|
||||
pub(crate) fn set_index(&mut self, index: Arc<Index>) {
|
||||
self.index = Some(index);
|
||||
}
|
||||
|
||||
/// Returns a reference to the contained index.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// Panics if the index is `None`.
|
||||
fn index(&self) -> &Index {
|
||||
self.index.as_ref().unwrap()
|
||||
}
|
||||
|
||||
fn make_document_ref(&self, url: Url) -> Option<DocumentQuery> {
|
||||
let index = self.index();
|
||||
let key = index.key_from_url(url);
|
||||
index.make_document_ref(key)
|
||||
}
|
||||
|
||||
fn system_path_to_document_ref(&self, path: &SystemPath) -> Result<Option<DocumentQuery>> {
|
||||
let url = Url::from_file_path(path.as_std_path()).map_err(|()| {
|
||||
std::io::Error::new(
|
||||
std::io::ErrorKind::InvalidInput,
|
||||
format!("Failed to convert system path to URL: {path:?}"),
|
||||
)
|
||||
})?;
|
||||
Ok(self.make_document_ref(url))
|
||||
}
|
||||
|
||||
fn system_virtual_path_to_document_ref(
|
||||
&self,
|
||||
path: &SystemVirtualPath,
|
||||
) -> Result<Option<DocumentQuery>> {
|
||||
let url = Url::parse(path.as_str()).map_err(|_| {
|
||||
std::io::Error::new(
|
||||
std::io::ErrorKind::InvalidInput,
|
||||
format!("Failed to convert virtual path to URL: {path:?}"),
|
||||
)
|
||||
})?;
|
||||
Ok(self.make_document_ref(url))
|
||||
}
|
||||
}
|
||||
|
||||
impl System for LSPSystem {
|
||||
fn path_metadata(&self, path: &SystemPath) -> Result<Metadata> {
|
||||
let document = self.system_path_to_document_ref(path)?;
|
||||
|
||||
if let Some(document) = document {
|
||||
Ok(Metadata::new(
|
||||
document_revision(&document),
|
||||
None,
|
||||
FileType::File,
|
||||
))
|
||||
} else {
|
||||
self.os_system.path_metadata(path)
|
||||
}
|
||||
}
|
||||
|
||||
fn canonicalize_path(&self, path: &SystemPath) -> Result<SystemPathBuf> {
|
||||
self.os_system.canonicalize_path(path)
|
||||
}
|
||||
|
||||
fn path_exists_case_sensitive(&self, path: &SystemPath, prefix: &SystemPath) -> bool {
|
||||
self.os_system.path_exists_case_sensitive(path, prefix)
|
||||
}
|
||||
|
||||
fn read_to_string(&self, path: &SystemPath) -> Result<String> {
|
||||
let document = self.system_path_to_document_ref(path)?;
|
||||
|
||||
match document {
|
||||
Some(DocumentQuery::Text { document, .. }) => Ok(document.contents().to_string()),
|
||||
_ => self.os_system.read_to_string(path),
|
||||
}
|
||||
}
|
||||
|
||||
fn read_to_notebook(&self, path: &SystemPath) -> std::result::Result<Notebook, NotebookError> {
|
||||
let document = self.system_path_to_document_ref(path)?;
|
||||
|
||||
match document {
|
||||
Some(DocumentQuery::Text { document, .. }) => {
|
||||
Notebook::from_source_code(document.contents())
|
||||
}
|
||||
Some(DocumentQuery::Notebook { notebook, .. }) => Ok(notebook.make_ruff_notebook()),
|
||||
None => self.os_system.read_to_notebook(path),
|
||||
}
|
||||
}
|
||||
|
||||
fn read_virtual_path_to_string(&self, path: &SystemVirtualPath) -> Result<String> {
|
||||
let document = self
|
||||
.system_virtual_path_to_document_ref(path)?
|
||||
.ok_or_else(|| virtual_path_not_found(path))?;
|
||||
|
||||
if let DocumentQuery::Text { document, .. } = &document {
|
||||
Ok(document.contents().to_string())
|
||||
} else {
|
||||
Err(not_a_text_document(path))
|
||||
}
|
||||
}
|
||||
|
||||
fn read_virtual_path_to_notebook(
|
||||
&self,
|
||||
path: &SystemVirtualPath,
|
||||
) -> std::result::Result<Notebook, NotebookError> {
|
||||
let document = self
|
||||
.system_virtual_path_to_document_ref(path)?
|
||||
.ok_or_else(|| virtual_path_not_found(path))?;
|
||||
|
||||
match document {
|
||||
DocumentQuery::Text { document, .. } => Notebook::from_source_code(document.contents()),
|
||||
DocumentQuery::Notebook { notebook, .. } => Ok(notebook.make_ruff_notebook()),
|
||||
}
|
||||
}
|
||||
|
||||
fn current_directory(&self) -> &SystemPath {
|
||||
self.os_system.current_directory()
|
||||
}
|
||||
|
||||
fn user_config_directory(&self) -> Option<SystemPathBuf> {
|
||||
self.os_system.user_config_directory()
|
||||
}
|
||||
|
||||
fn read_directory<'a>(
|
||||
&'a self,
|
||||
path: &SystemPath,
|
||||
) -> Result<Box<dyn Iterator<Item = Result<DirectoryEntry>> + 'a>> {
|
||||
self.os_system.read_directory(path)
|
||||
}
|
||||
|
||||
fn walk_directory(&self, path: &SystemPath) -> WalkDirectoryBuilder {
|
||||
self.os_system.walk_directory(path)
|
||||
}
|
||||
|
||||
fn glob(
|
||||
&self,
|
||||
pattern: &str,
|
||||
) -> std::result::Result<
|
||||
Box<dyn Iterator<Item = std::result::Result<SystemPathBuf, GlobError>>>,
|
||||
PatternError,
|
||||
> {
|
||||
self.os_system.glob(pattern)
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
|
||||
fn as_any_mut(&mut self) -> &mut dyn Any {
|
||||
self
|
||||
}
|
||||
|
||||
fn case_sensitivity(&self) -> CaseSensitivity {
|
||||
self.os_system.case_sensitivity()
|
||||
}
|
||||
}
|
||||
|
||||
fn not_a_text_document(path: impl Display) -> std::io::Error {
|
||||
std::io::Error::new(
|
||||
std::io::ErrorKind::InvalidInput,
|
||||
format!("Input is not a text document: {path}"),
|
||||
)
|
||||
}
|
||||
|
||||
fn virtual_path_not_found(path: impl Display) -> std::io::Error {
|
||||
std::io::Error::new(
|
||||
std::io::ErrorKind::NotFound,
|
||||
format!("Virtual path does not exist: {path}"),
|
||||
)
|
||||
}
|
||||
|
||||
/// Helper function to get the [`FileRevision`] of the given document.
|
||||
fn document_revision(document: &DocumentQuery) -> FileRevision {
|
||||
// The file revision is just an opaque number which doesn't have any significant meaning other
|
||||
// than that the file has changed if the revisions are different.
|
||||
#[allow(clippy::cast_sign_loss)]
|
||||
FileRevision::new(document.version() as u128)
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue