diff --git a/Cargo.lock b/Cargo.lock index d017be4..efc1695 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -482,6 +482,7 @@ dependencies = [ "salsa", "serde", "serde_json", + "tempfile", "tokio", "tower-lsp-server", "tracing", diff --git a/crates/djls-server/Cargo.toml b/crates/djls-server/Cargo.toml index e3f27f8..7829bf0 100644 --- a/crates/djls-server/Cargo.toml +++ b/crates/djls-server/Cargo.toml @@ -31,5 +31,8 @@ url = { workspace = true } [build-dependencies] djls-dev = { workspace = true } +[dev-dependencies] +tempfile = { workspace = true } + [lints] workspace = true diff --git a/crates/djls-server/src/lib.rs b/crates/djls-server/src/lib.rs index b601c7a..ba46830 100644 --- a/crates/djls-server/src/lib.rs +++ b/crates/djls-server/src/lib.rs @@ -1,8 +1,8 @@ mod client; mod logging; mod queue; -mod server; -mod session; +pub mod server; +pub mod session; use std::io::IsTerminal; @@ -10,7 +10,8 @@ use anyhow::Result; use tower_lsp_server::LspService; use tower_lsp_server::Server; -use crate::server::DjangoLanguageServer; +pub use crate::server::DjangoLanguageServer; +pub use crate::session::Session; pub fn run() -> Result<()> { if std::io::stdin().is_terminal() { diff --git a/crates/djls-server/src/session.rs b/crates/djls-server/src/session.rs index 6b4adbb..189a333 100644 --- a/crates/djls-server/src/session.rs +++ b/crates/djls-server/src/session.rs @@ -39,7 +39,7 @@ use djls_conf::Settings; use djls_project::DjangoProject; use djls_workspace::{ db::{Database, SourceFile}, - FileSystem, OsFileSystem, TextDocument, WorkspaceFileSystem, + Buffers, FileSystem, OsFileSystem, TextDocument, WorkspaceFileSystem, }; use percent_encoding::percent_decode_str; use salsa::{Setter, StorageHandle}; @@ -77,22 +77,23 @@ pub struct Session { /// LSP server settings settings: Settings, - /// Layer 1: Thread-safe overlay storage (Arc>) + /// Layer 1: Shared buffer storage for open documents /// /// This implements Ruff's two-layer architecture where Layer 1 contains - /// LSP overlays that take precedence over disk files. The overlays map - /// document URLs to TextDocuments containing current in-memory content. + /// open document buffers that take precedence over disk files. The buffers + /// are shared between Session (which manages them) and WorkspaceFileSystem + /// (which reads from them). /// /// Key properties: - /// - Thread-safe via Arc for Send+Sync requirements + /// - Thread-safe via the Buffers abstraction /// - Contains full TextDocument with content, version, and metadata /// - Never becomes Salsa inputs - only intercepted at read time - overlays: Arc>, + buffers: Buffers, - /// File system abstraction with overlay interception + /// File system abstraction with buffer interception /// - /// This LspFileSystem bridges Layer 1 (overlays) and Layer 2 (Salsa). - /// It intercepts FileSystem::read_to_string() calls to return overlay + /// This WorkspaceFileSystem bridges Layer 1 (buffers) and Layer 2 (Salsa). + /// It intercepts FileSystem::read_to_string() calls to return buffer /// content when available, falling back to disk otherwise. file_system: Arc, @@ -132,10 +133,10 @@ impl Session { (None, Settings::default()) }; - let overlays = Arc::new(DashMap::new()); + let buffers = Buffers::new(); let files = Arc::new(DashMap::new()); let file_system = Arc::new(WorkspaceFileSystem::new( - overlays.clone(), + buffers.clone(), Arc::new(OsFileSystem), )); let db_handle = Database::new(file_system.clone(), files.clone()) @@ -146,7 +147,7 @@ impl Session { Self { project, settings, - overlays, + buffers, file_system, files, client_capabilities: params.capabilities.clone(), @@ -230,32 +231,32 @@ impl Session { self.file_system.clone() } - /// Set or update an overlay for the given document URL + /// Set or update a buffer for the given document URL /// /// This implements Layer 1 of Ruff's architecture - storing in-memory /// document changes that take precedence over disk content. #[allow(dead_code)] // Used in tests pub fn set_overlay(&self, url: Url, document: TextDocument) { - self.overlays.insert(url, document); + self.buffers.open(url, document); } - /// Remove an overlay for the given document URL + /// Remove a buffer for the given document URL /// /// After removal, file reads will fall back to disk content. #[allow(dead_code)] // Used in tests pub fn remove_overlay(&self, url: &Url) -> Option { - self.overlays.remove(url).map(|(_, doc)| doc) + self.buffers.close(url) } - /// Check if an overlay exists for the given URL + /// Check if a buffer exists for the given URL #[allow(dead_code)] pub fn has_overlay(&self, url: &Url) -> bool { - self.overlays.contains_key(url) + self.buffers.contains(url) } - /// Get a copy of an overlay document + /// Get a copy of a buffered document pub fn get_overlay(&self, url: &Url) -> Option { - self.overlays.get(url).map(|doc| doc.clone()) + self.buffers.get(url) } /// Takes exclusive ownership of the database handle for mutation operations. @@ -375,41 +376,61 @@ impl Session { // These methods encapsulate the two-layer architecture coordination: // Layer 1 (overlays) and Layer 2 (Salsa revision tracking) - /// Handle opening a document - sets overlay and creates file. + /// Handle opening a document - sets buffer and creates file. /// /// This method coordinates both layers: - /// - Layer 1: Stores the document content in overlays + /// - Layer 1: Stores the document content in buffers /// - Layer 2: Creates the SourceFile in Salsa (if path is resolvable) pub fn open_document(&mut self, url: Url, document: TextDocument) { tracing::debug!("Opening document: {}", url); - // Layer 1: Set overlay - self.overlays.insert(url.clone(), document); + // Layer 1: Set buffer + self.buffers.open(url.clone(), document); - // Layer 2: Create file if needed (starts at revision 0) + // Layer 2: Create file and bump revision if it already exists + // This is crucial: if the file was already read from disk, we need to + // invalidate Salsa's cache so it re-reads through the buffer system if let Some(path) = self.url_to_path(&url) { self.with_db_mut(|db| { + // Check if file already exists (was previously read from disk) + let already_exists = db.has_file(&path); let file = db.get_or_create_file(path.clone()); - tracing::debug!( - "Created/retrieved SourceFile for {}: revision {}", - path.display(), - file.revision(db) - ); + + if already_exists { + // File was already read - bump revision to invalidate cache + let current_rev = file.revision(db); + let new_rev = current_rev + 1; + file.set_revision(db).to(new_rev); + tracing::debug!( + "Bumped revision for {} on open: {} -> {}", + path.display(), + current_rev, + new_rev + ); + + } else { + // New file - starts at revision 0 + tracing::debug!( + "Created new SourceFile for {}: revision {}", + path.display(), + file.revision(db) + ); + } }); } } - /// Handle document changes - updates overlay and bumps revision. + /// Handle document changes - updates buffer and bumps revision. /// /// This method coordinates both layers: - /// - Layer 1: Updates the document content in overlays + /// - Layer 1: Updates the document content in buffers /// - Layer 2: Bumps the file revision to trigger Salsa invalidation pub fn update_document(&mut self, url: Url, document: TextDocument) { let version = document.version(); tracing::debug!("Updating document: {} (version {})", url, version); - // Layer 1: Update overlay - self.overlays.insert(url.clone(), document); + // Layer 1: Update buffer + self.buffers.update(url.clone(), document); // Layer 2: Bump revision to trigger invalidation if let Some(path) = self.url_to_path(&url) { @@ -417,25 +438,25 @@ impl Session { } } - /// Handle closing a document - removes overlay and bumps revision. + /// Handle closing a document - removes buffer and bumps revision. /// /// This method coordinates both layers: - /// - Layer 1: Removes the overlay (falls back to disk) + /// - Layer 1: Removes the buffer (falls back to disk) /// - Layer 2: Bumps revision to trigger re-read from disk /// /// Returns the removed document if it existed. pub fn close_document(&mut self, url: &Url) -> Option { tracing::debug!("Closing document: {}", url); - // Layer 1: Remove overlay - let removed = self.overlays.remove(url).map(|(_, doc)| { + // Layer 1: Remove buffer + let removed = self.buffers.close(url); + if let Some(ref doc) = removed { tracing::debug!( - "Removed overlay for closed document: {} (was version {})", + "Removed buffer for closed document: {} (was version {})", url, doc.version() ); - doc - }); + } // Layer 2: Bump revision to trigger re-read from disk // We keep the file alive for potential re-opening @@ -512,10 +533,10 @@ impl Session { impl Default for Session { fn default() -> Self { - let overlays = Arc::new(DashMap::new()); + let buffers = Buffers::new(); let files = Arc::new(DashMap::new()); let file_system = Arc::new(WorkspaceFileSystem::new( - overlays.clone(), + buffers.clone(), Arc::new(OsFileSystem), )); let db_handle = Database::new(file_system.clone(), files.clone()) @@ -529,7 +550,7 @@ impl Default for Session { db_handle, file_system, files, - overlays, + buffers, client_capabilities: lsp_types::ClientCapabilities::default(), } } diff --git a/crates/djls-server/tests/lsp_integration.rs b/crates/djls-server/tests/lsp_integration.rs new file mode 100644 index 0000000..5c14607 --- /dev/null +++ b/crates/djls-server/tests/lsp_integration.rs @@ -0,0 +1,463 @@ +//! Integration tests for the LSP server's overlay → revision → invalidation flow +//! +//! These tests verify the complete two-layer architecture: +//! - Layer 1: LSP overlays (in-memory document state) +//! - Layer 2: Salsa database with revision tracking +//! +//! The tests ensure that document changes properly invalidate cached queries +//! and that overlays take precedence over disk content. + +use std::path::PathBuf; +use std::sync::Arc; + +use djls_server::DjangoLanguageServer; +use tempfile::TempDir; +use tower_lsp_server::lsp_types::{ + DidChangeTextDocumentParams, DidCloseTextDocumentParams, DidOpenTextDocumentParams, + InitializeParams, InitializedParams, TextDocumentContentChangeEvent, TextDocumentIdentifier, + TextDocumentItem, Uri, VersionedTextDocumentIdentifier, WorkspaceFolder, +}; +use tower_lsp_server::LanguageServer; +use url::Url; + +/// Test helper that manages an LSP server instance for testing +struct TestServer { + server: DjangoLanguageServer, + _temp_dir: TempDir, + workspace_root: PathBuf, +} + +impl TestServer { + /// Create a new test server with a temporary workspace + async fn new() -> Self { + // Create temporary directory for test workspace + let temp_dir = TempDir::new().expect("Failed to create temp dir"); + let workspace_root = temp_dir.path().to_path_buf(); + + // Set up logging + let (non_blocking, guard) = tracing_appender::non_blocking(std::io::sink()); + + // Create server (guard is moved into server, so we return it too) + let server = DjangoLanguageServer::new(guard); + + // Initialize the server + let workspace_folder = WorkspaceFolder { + uri: format!("file://{}", workspace_root.display()) + .parse() + .unwrap(), + name: "test_workspace".to_string(), + }; + + let init_params = InitializeParams { + workspace_folders: Some(vec![workspace_folder]), + ..Default::default() + }; + + server + .initialize(init_params) + .await + .expect("Failed to initialize"); + server.initialized(InitializedParams {}).await; + + Self { + server, + _temp_dir: temp_dir, + workspace_root, + } + } + + /// Helper to create a file path in the test workspace + fn workspace_file(&self, name: &str) -> PathBuf { + self.workspace_root.join(name) + } + + /// Helper to create a file URL in the test workspace + fn workspace_url(&self, name: &str) -> Url { + Url::from_file_path(self.workspace_file(name)).unwrap() + } + + /// Open a document in the LSP server + async fn open_document(&self, file_name: &str, content: &str, version: i32) { + let params = DidOpenTextDocumentParams { + text_document: TextDocumentItem { + uri: self.workspace_url(file_name).to_string().parse().unwrap(), + language_id: if file_name.ends_with(".html") { + "html".to_string() + } else if file_name.ends_with(".py") { + "python".to_string() + } else { + "plaintext".to_string() + }, + version, + text: content.to_string(), + }, + }; + + self.server.did_open(params).await; + } + + /// Change a document in the LSP server + async fn change_document(&self, file_name: &str, new_content: &str, version: i32) { + let params = DidChangeTextDocumentParams { + text_document: VersionedTextDocumentIdentifier { + uri: self.workspace_url(file_name).to_string().parse().unwrap(), + version, + }, + content_changes: vec![TextDocumentContentChangeEvent { + range: None, + range_length: None, + text: new_content.to_string(), + }], + }; + + self.server.did_change(params).await; + } + + /// Close a document in the LSP server + async fn close_document(&self, file_name: &str) { + let params = DidCloseTextDocumentParams { + text_document: TextDocumentIdentifier { + uri: self.workspace_url(file_name).to_string().parse().unwrap(), + }, + }; + + self.server.did_close(params).await; + } + + /// Get the content of a file through the session's query system + async fn get_file_content(&self, file_name: &str) -> String { + let path = self.workspace_file(file_name); + self.server + .with_session_mut(|session| session.file_content(path)) + .await + } + + /// Write a file to disk in the test workspace + fn write_file(&self, file_name: &str, content: &str) { + let path = self.workspace_file(file_name); + std::fs::write(path, content).expect("Failed to write test file"); + } + + /// Check if a file has an overlay in the session + async fn has_overlay(&self, file_name: &str) -> bool { + let url = self.workspace_url(file_name); + self.server + .with_session(|session| session.get_overlay(&url).is_some()) + .await + } + + /// Get the revision of a file + async fn get_file_revision(&self, file_name: &str) -> Option { + let path = self.workspace_file(file_name); + self.server + .with_session_mut(|session| session.file_revision(&path)) + .await + } +} + +#[tokio::test] +async fn test_full_lsp_lifecycle() { + let server = TestServer::new().await; + let file_name = "test.html"; + + // Write initial content to disk + server.write_file(file_name, "

Disk Content

"); + + // 1. Test did_open creates overlay and file + server + .open_document(file_name, "

Overlay Content

", 1) + .await; + + // Verify overlay exists + assert!(server.has_overlay(file_name).await); + + // Verify overlay content is returned (not disk content) + let content = server.get_file_content(file_name).await; + assert_eq!(content, "

Overlay Content

"); + + // Verify file was created with revision 0 + let revision = server.get_file_revision(file_name).await; + assert_eq!(revision, Some(0)); + + // 2. Test did_change updates overlay and bumps revision + server + .change_document(file_name, "

Updated Content

", 2) + .await; + + // Verify content changed + let content = server.get_file_content(file_name).await; + assert_eq!(content, "

Updated Content

"); + + // Verify revision was bumped + let revision = server.get_file_revision(file_name).await; + assert_eq!(revision, Some(1)); + + // 3. Test did_close removes overlay and bumps revision + server.close_document(file_name).await; + + // Verify overlay is removed + assert!(!server.has_overlay(file_name).await); + + // Verify content now comes from disk + let content = server.get_file_content(file_name).await; + assert_eq!(content, "

Disk Content

"); + + // Verify revision was bumped again + let revision = server.get_file_revision(file_name).await; + assert_eq!(revision, Some(2)); +} + +#[tokio::test] +async fn test_overlay_precedence() { + let server = TestServer::new().await; + let file_name = "template.html"; + + // Write content to disk + server.write_file(file_name, "{% block content %}Disk{% endblock %}"); + + // Read content before overlay - should get disk content + let content = server.get_file_content(file_name).await; + assert_eq!(content, "{% block content %}Disk{% endblock %}"); + + // Open document with different content + server + .open_document(file_name, "{% block content %}Overlay{% endblock %}", 1) + .await; + + // Verify overlay content takes precedence + let content = server.get_file_content(file_name).await; + assert_eq!(content, "{% block content %}Overlay{% endblock %}"); + + // Close document + server.close_document(file_name).await; + + // Verify we're back to disk content + let content = server.get_file_content(file_name).await; + assert_eq!(content, "{% block content %}Disk{% endblock %}"); +} + +#[tokio::test] +async fn test_template_parsing_with_overlays() { + let server = TestServer::new().await; + let file_name = "template.html"; + + // Write initial template to disk + server.write_file(file_name, "{% if true %}Original{% endif %}"); + + // Open with different template content + server + .open_document( + file_name, + "{% for item in items %}{{ item }}{% endfor %}", + 1, + ) + .await; + use djls_workspace::db::parse_template; + + // Parse template through the session + let workspace_path = server.workspace_file(file_name); + let ast = server + .server + .with_session_mut(|session| { + session.with_db_mut(|db| { + let file = db.get_or_create_file(workspace_path); + parse_template(db, file) + }) + }) + .await; + + // Verify we parsed the overlay content (for loop), not disk content (if statement) + assert!(ast.is_some()); + let ast = ast.unwrap(); + let ast_str = format!("{:?}", ast.ast); + assert!(ast_str.contains("for") || ast_str.contains("For")); + assert!(!ast_str.contains("if") && !ast_str.contains("If")); +} + +#[tokio::test] +async fn test_multiple_documents_independent() { + let server = TestServer::new().await; + + // Open multiple documents + server.open_document("file1.html", "Content 1", 1).await; + server.open_document("file2.html", "Content 2", 1).await; + server.open_document("file3.html", "Content 3", 1).await; + + // Verify all have overlays + assert!(server.has_overlay("file1.html").await); + assert!(server.has_overlay("file2.html").await); + assert!(server.has_overlay("file3.html").await); + + // Change one document + server.change_document("file2.html", "Updated 2", 2).await; + + // Verify only file2 was updated + assert_eq!(server.get_file_content("file1.html").await, "Content 1"); + assert_eq!(server.get_file_content("file2.html").await, "Updated 2"); + assert_eq!(server.get_file_content("file3.html").await, "Content 3"); + + // Verify revision changes + assert_eq!(server.get_file_revision("file1.html").await, Some(0)); + assert_eq!(server.get_file_revision("file2.html").await, Some(1)); + assert_eq!(server.get_file_revision("file3.html").await, Some(0)); +} + +#[tokio::test] +async fn test_concurrent_overlay_updates() { + let server = Arc::new(TestServer::new().await); + + // Open initial documents + for i in 0..5 { + server + .open_document(&format!("file{}.html", i), &format!("Initial {}", i), 1) + .await; + } + + // Spawn concurrent tasks to update different documents + let mut handles = vec![]; + + for i in 0..5 { + let server_clone = Arc::clone(&server); + let handle = tokio::spawn(async move { + // Each task updates its document multiple times + for version in 2..10 { + server_clone + .change_document( + &format!("file{}.html", i), + &format!("Updated {} v{}", i, version), + version, + ) + .await; + + // Small delay to encourage interleaving + tokio::time::sleep(tokio::time::Duration::from_millis(1)).await; + } + }); + handles.push(handle); + } + + // Wait for all tasks to complete + for handle in handles { + handle.await.expect("Task failed"); + } + + // Verify final state of all documents + for i in 0..5 { + let content = server.get_file_content(&format!("file{}.html", i)).await; + assert_eq!(content, format!("Updated {} v9", i)); + + // Each document should have had 8 changes (versions 2-9) + let revision = server.get_file_revision(&format!("file{}.html", i)).await; + assert_eq!(revision, Some(8)); + } +} + +#[tokio::test] +async fn test_caching_behavior() { + let server = TestServer::new().await; + + // Open three template files + server + .open_document("template1.html", "{% block a %}1{% endblock %}", 1) + .await; + server + .open_document("template2.html", "{% block b %}2{% endblock %}", 1) + .await; + server + .open_document("template3.html", "{% block c %}3{% endblock %}", 1) + .await; + + // Parse all templates once to populate cache + for i in 1..=3 { + let _ = server + .get_file_content(&format!("template{}.html", i)) + .await; + } + + // Store initial revisions + let rev1_before = server.get_file_revision("template1.html").await.unwrap(); + let rev2_before = server.get_file_revision("template2.html").await.unwrap(); + let rev3_before = server.get_file_revision("template3.html").await.unwrap(); + + // Change only template2 + server + .change_document("template2.html", "{% block b %}CHANGED{% endblock %}", 2) + .await; + + // Verify only template2's revision changed + let rev1_after = server.get_file_revision("template1.html").await.unwrap(); + let rev2_after = server.get_file_revision("template2.html").await.unwrap(); + let rev3_after = server.get_file_revision("template3.html").await.unwrap(); + + assert_eq!( + rev1_before, rev1_after, + "template1 revision should not change" + ); + assert_eq!( + rev2_before + 1, + rev2_after, + "template2 revision should increment" + ); + assert_eq!( + rev3_before, rev3_after, + "template3 revision should not change" + ); + + // Verify content + assert_eq!( + server.get_file_content("template1.html").await, + "{% block a %}1{% endblock %}" + ); + assert_eq!( + server.get_file_content("template2.html").await, + "{% block b %}CHANGED{% endblock %}" + ); + assert_eq!( + server.get_file_content("template3.html").await, + "{% block c %}3{% endblock %}" + ); +} + +#[tokio::test] +async fn test_revision_tracking_across_lifecycle() { + let server = TestServer::new().await; + let file_name = "tracked.html"; + + // Create file on disk + server.write_file(file_name, "Initial"); + + // Open document - should create file with revision 0 + server.open_document(file_name, "Opened", 1).await; + assert_eq!(server.get_file_revision(file_name).await, Some(0)); + + // Change document multiple times + for i in 2..=5 { + server + .change_document(file_name, &format!("Change {}", i), i) + .await; + assert_eq!( + server.get_file_revision(file_name).await, + Some((i - 1) as u64), + "Revision should be {} after change {}", + i - 1, + i + ); + } + + // Close document - should bump revision one more time + server.close_document(file_name).await; + assert_eq!(server.get_file_revision(file_name).await, Some(5)); + + // Re-open document - file already exists, should bump revision to invalidate cache + server.open_document(file_name, "Reopened", 10).await; + assert_eq!( + server.get_file_revision(file_name).await, + Some(6), + "Revision should bump on re-open to invalidate cache" + ); + + // Change again + server.change_document(file_name, "Final", 11).await; + assert_eq!(server.get_file_revision(file_name).await, Some(7)); +} + diff --git a/crates/djls-workspace/src/buffers.rs b/crates/djls-workspace/src/buffers.rs new file mode 100644 index 0000000..702220a --- /dev/null +++ b/crates/djls-workspace/src/buffers.rs @@ -0,0 +1,68 @@ +//! Shared buffer storage for open documents +//! +//! This module provides the `Buffers` type which represents the in-memory +//! content of open files. These buffers are shared between the Session +//! (which manages document lifecycle) and the WorkspaceFileSystem (which +//! reads from them). + +use dashmap::DashMap; +use std::sync::Arc; +use url::Url; + +use crate::document::TextDocument; + +/// Shared buffer storage between Session and FileSystem +/// +/// Buffers represent the in-memory content of open files that takes +/// precedence over disk content when reading through the FileSystem. +/// This is the key abstraction that makes the sharing between Session +/// and WorkspaceFileSystem explicit and type-safe. +#[derive(Clone, Debug)] +pub struct Buffers { + inner: Arc>, +} + +impl Buffers { + /// Create a new empty buffer storage + pub fn new() -> Self { + Self { + inner: Arc::new(DashMap::new()), + } + } + + /// Open a document in the buffers + pub fn open(&self, url: Url, document: TextDocument) { + self.inner.insert(url, document); + } + + /// Update an open document + pub fn update(&self, url: Url, document: TextDocument) { + self.inner.insert(url, document); + } + + /// Close a document and return it if it was open + pub fn close(&self, url: &Url) -> Option { + self.inner.remove(url).map(|(_, doc)| doc) + } + + /// Get a document if it's open + pub fn get(&self, url: &Url) -> Option { + self.inner.get(url).map(|entry| entry.clone()) + } + + /// Check if a document is open + pub fn contains(&self, url: &Url) -> bool { + self.inner.contains_key(url) + } + + /// Iterate over all open buffers (for debugging) + pub fn iter(&self) -> impl Iterator + '_ { + self.inner.iter().map(|entry| (entry.key().clone(), entry.value().clone())) + } +} + +impl Default for Buffers { + fn default() -> Self { + Self::new() + } +} \ No newline at end of file diff --git a/crates/djls-workspace/src/db.rs b/crates/djls-workspace/src/db.rs index 8a9220f..3fba217 100644 --- a/crates/djls-workspace/src/db.rs +++ b/crates/djls-workspace/src/db.rs @@ -398,6 +398,7 @@ pub fn template_errors(db: &dyn Db, file: SourceFile) -> Arc<[String]> { #[cfg(test)] mod tests { use super::*; + use crate::buffers::Buffers; use crate::document::TextDocument; use crate::fs::WorkspaceFileSystem; use crate::language::LanguageId; @@ -464,11 +465,11 @@ mod tests { ); // Create overlay storage - let overlays = Arc::new(DashMap::new()); + let buffers = Buffers::new(); // Create WorkspaceFileSystem that checks overlays first let file_system = Arc::new(WorkspaceFileSystem::new( - overlays.clone(), + buffers.clone(), Arc::new(memory_fs), )); @@ -490,7 +491,7 @@ mod tests { 2, LanguageId::Other, ); - overlays.insert(url, updated_document); + buffers.open(url, updated_document); // Bump the file revision to trigger re-parse file.set_revision(&mut db).to(1); @@ -520,11 +521,11 @@ mod tests { ); // Create overlay storage - let overlays = Arc::new(DashMap::new()); + let buffers = Buffers::new(); // Create WorkspaceFileSystem let file_system = Arc::new(WorkspaceFileSystem::new( - overlays.clone(), + buffers.clone(), Arc::new(memory_fs), )); @@ -549,7 +550,7 @@ mod tests { 2, LanguageId::Other, ); - overlays.insert(url, updated_document); + buffers.open(url, updated_document); // Bump revision to trigger invalidation file.set_revision(&mut db).to(1); diff --git a/crates/djls-workspace/src/fs.rs b/crates/djls-workspace/src/fs.rs index 26603d4..151c6d6 100644 --- a/crates/djls-workspace/src/fs.rs +++ b/crates/djls-workspace/src/fs.rs @@ -3,13 +3,12 @@ //! This module provides the `FileSystem` trait that abstracts file I/O operations. //! This allows the LSP to work with both real files and in-memory overlays. -use dashmap::DashMap; use std::io; use std::path::Path; use std::sync::Arc; use url::Url; -use crate::document::TextDocument; +use crate::buffers::Buffers; /// Trait for file system operations /// @@ -66,43 +65,43 @@ impl FileSystem for OsFileSystem { } } -/// LSP file system that intercepts reads for overlay files +/// LSP file system that intercepts reads for buffered files /// -/// This implements Ruff's two-layer architecture where Layer 1 (LSP overlays) +/// This implements Ruff's two-layer architecture where Layer 1 (open buffers) /// takes precedence over Layer 2 (Salsa database). When a file is read, -/// this system first checks for an overlay (in-memory changes) and returns -/// that content. If no overlay exists, it falls back to reading from disk. +/// this system first checks for a buffer (in-memory content) and returns +/// that content. If no buffer exists, it falls back to reading from disk. pub struct WorkspaceFileSystem { - /// In-memory overlays that take precedence over disk files - /// Maps URL to `TextDocument` containing current content - buffers: Arc>, + /// In-memory buffers that take precedence over disk files + buffers: Buffers, /// Fallback file system for disk operations disk: Arc, } impl WorkspaceFileSystem { - /// Create a new [`LspFileSystem`] with the given overlay storage and fallback - pub fn new(buffers: Arc>, disk: Arc) -> Self { + /// Create a new [`WorkspaceFileSystem`] with the given buffer storage and fallback + pub fn new(buffers: Buffers, disk: Arc) -> Self { Self { buffers, disk } } } impl FileSystem for WorkspaceFileSystem { fn read_to_string(&self, path: &Path) -> io::Result { - if let Some(document) = path_to_url(path).and_then(|url| self.buffers.get(&url)) { - Ok(document.content().to_string()) - } else { - self.disk.read_to_string(path) + if let Some(url) = path_to_url(path) { + if let Some(document) = self.buffers.get(&url) { + return Ok(document.content().to_string()); + } } + self.disk.read_to_string(path) } fn exists(&self, path: &Path) -> bool { - path_to_url(path).is_some_and(|url| self.buffers.contains_key(&url)) + path_to_url(path).is_some_and(|url| self.buffers.contains(&url)) || self.disk.exists(path) } fn is_file(&self, path: &Path) -> bool { - path_to_url(path).is_some_and(|url| self.buffers.contains_key(&url)) + path_to_url(path).is_some_and(|url| self.buffers.contains(&url)) || self.disk.is_file(path) } @@ -128,22 +127,24 @@ impl FileSystem for WorkspaceFileSystem { /// This is a simplified conversion - in a full implementation, /// you might want more robust path-to-URL conversion fn path_to_url(path: &Path) -> Option { - if let Ok(absolute_path) = std::fs::canonicalize(path) { - return Url::from_file_path(absolute_path).ok(); - } - - // For test scenarios where the file doesn't exist on disk, - // try to create URL from the path directly if it's absolute + // For absolute paths, use them directly without canonicalization + // This ensures consistency with how URLs are created when storing overlays if path.is_absolute() { return Url::from_file_path(path).ok(); } + // Only try to canonicalize for relative paths + if let Ok(absolute_path) = std::fs::canonicalize(path) { + return Url::from_file_path(absolute_path).ok(); + } + None } #[cfg(test)] mod tests { use super::*; + use crate::buffers::Buffers; use crate::document::TextDocument; use crate::language::LanguageId; @@ -207,22 +208,22 @@ mod tests { "original content".to_string(), ); - // Create overlay storage - let overlays = Arc::new(DashMap::new()); + // Create buffer storage + let buffers = Buffers::new(); // Create LspFileSystem with memory fallback - let lsp_fs = WorkspaceFileSystem::new(overlays.clone(), Arc::new(memory_fs)); + let lsp_fs = WorkspaceFileSystem::new(buffers.clone(), Arc::new(memory_fs)); - // Before adding overlay, should read from fallback + // Before adding buffer, should read from fallback let path = std::path::Path::new("/test/file.py"); assert_eq!(lsp_fs.read_to_string(path).unwrap(), "original content"); - // Add overlay - this simulates having an open document with changes + // Add buffer - this simulates having an open document with changes let url = Url::from_file_path("/test/file.py").unwrap(); let document = TextDocument::new("overlay content".to_string(), 1, LanguageId::Python); - overlays.insert(url, document); + buffers.open(url, document); - // Now should read from overlay + // Now should read from buffer assert_eq!(lsp_fs.read_to_string(path).unwrap(), "overlay content"); } @@ -235,13 +236,13 @@ mod tests { "disk content".to_string(), ); - // Create empty overlay storage - let overlays = Arc::new(DashMap::new()); + // Create empty buffer storage + let buffers = Buffers::new(); // Create LspFileSystem - let lsp_fs = WorkspaceFileSystem::new(overlays, Arc::new(memory_fs)); + let lsp_fs = WorkspaceFileSystem::new(buffers, Arc::new(memory_fs)); - // Should fall back to disk when no overlay exists + // Should fall back to disk when no buffer exists let path = std::path::Path::new("/test/file.py"); assert_eq!(lsp_fs.read_to_string(path).unwrap(), "disk content"); } @@ -256,8 +257,8 @@ mod tests { ); // Create LspFileSystem - let overlays = Arc::new(DashMap::new()); - let lsp_fs = WorkspaceFileSystem::new(overlays, Arc::new(memory_fs)); + let buffers = Buffers::new(); + let lsp_fs = WorkspaceFileSystem::new(buffers, Arc::new(memory_fs)); let path = std::path::Path::new("/test/file.py"); diff --git a/crates/djls-workspace/src/lib.rs b/crates/djls-workspace/src/lib.rs index 22e0faf..a3a8d01 100644 --- a/crates/djls-workspace/src/lib.rs +++ b/crates/djls-workspace/src/lib.rs @@ -1,9 +1,11 @@ +mod buffers; pub mod db; mod document; mod fs; mod language; mod template; +pub use buffers::Buffers; pub use db::Database; pub use document::TextDocument; pub use fs::{FileSystem, OsFileSystem, WorkspaceFileSystem};