This commit is contained in:
Josh Thomas 2025-08-25 10:14:43 -05:00
parent b6bc1664ac
commit fb768a86d5
7 changed files with 551 additions and 9 deletions

105
Cargo.lock generated
View file

@ -510,7 +510,10 @@ dependencies = [
"camino",
"dashmap",
"djls-templates",
"notify",
"salsa",
"tempfile",
"tokio",
"url",
]
@ -606,6 +609,15 @@ dependencies = [
"percent-encoding",
]
[[package]]
name = "fsevent-sys"
version = "4.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "76ee7a02da4d231650c7cea31349b889be2f45ddb3ef3032d2ec8185f6313fd2"
dependencies = [
"libc",
]
[[package]]
name = "futures"
version = "0.3.31"
@ -883,6 +895,26 @@ version = "2.0.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f4c7245a08504955605670dbf141fceab975f15ca21570696aebe9d2e71576bd"
[[package]]
name = "inotify"
version = "0.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f37dccff2791ab604f9babef0ba14fbe0be30bd368dc541e2b08d07c8aa908f3"
dependencies = [
"bitflags 2.9.2",
"inotify-sys",
"libc",
]
[[package]]
name = "inotify-sys"
version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e05c02b5e89bff3b946cedeca278abc628fe811e604f027c45a8aa3cf793d0eb"
dependencies = [
"libc",
]
[[package]]
name = "insta"
version = "1.43.1"
@ -938,6 +970,26 @@ dependencies = [
"serde",
]
[[package]]
name = "kqueue"
version = "1.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "eac30106d7dce88daf4a3fcb4879ea939476d5074a9b7ddd0fb97fa4bed5596a"
dependencies = [
"kqueue-sys",
"libc",
]
[[package]]
name = "kqueue-sys"
version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ed9625ffda8729b85e45cf04090035ac368927b8cebc34898e7c120f52e4838b"
dependencies = [
"bitflags 1.3.2",
"libc",
]
[[package]]
name = "lazy_static"
version = "1.5.0"
@ -1041,10 +1093,35 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "78bed444cc8a2160f01cbcf811ef18cac863ad68ae8ca62092e8db51d51c761c"
dependencies = [
"libc",
"log",
"wasi 0.11.1+wasi-snapshot-preview1",
"windows-sys 0.59.0",
]
[[package]]
name = "notify"
version = "8.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4d3d07927151ff8575b7087f245456e549fea62edf0ec4e565a5ee50c8402bc3"
dependencies = [
"bitflags 2.9.2",
"fsevent-sys",
"inotify",
"kqueue",
"libc",
"log",
"mio",
"notify-types",
"walkdir",
"windows-sys 0.60.2",
]
[[package]]
name = "notify-types"
version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5e0826a989adedc2a244799e823aece04662b66609d96af8dff7ac6df9a8925d"
[[package]]
name = "nu-ansi-term"
version = "0.46.0"
@ -1493,6 +1570,15 @@ dependencies = [
"synstructure",
]
[[package]]
name = "same-file"
version = "1.0.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502"
dependencies = [
"winapi-util",
]
[[package]]
name = "scopeguard"
version = "1.2.0"
@ -2074,6 +2160,16 @@ version = "0.9.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a"
[[package]]
name = "walkdir"
version = "2.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b"
dependencies = [
"same-file",
"winapi-util",
]
[[package]]
name = "wasi"
version = "0.11.1+wasi-snapshot-preview1"
@ -2116,6 +2212,15 @@ version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
[[package]]
name = "winapi-util"
version = "0.1.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0978bf7171b3d90bac376700cb56d606feb40f251a475a5d6634613564460b22"
dependencies = [
"windows-sys 0.60.2",
]
[[package]]
name = "winapi-x86_64-pc-windows-gnu"
version = "0.4.0"

View file

@ -23,6 +23,7 @@ clap = { version = "4.5", features = ["derive"] }
config = { version ="0.15", features = ["toml"] }
dashmap = "6.1"
directories = "6.0"
notify = "8.2"
percent-encoding = "2.3"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"

View file

@ -1,5 +1,5 @@
use std::collections::HashMap;
use std::sync::Arc;
use std::sync::{Arc, Mutex};
use anyhow::anyhow;
use anyhow::Result;
@ -26,7 +26,7 @@ use super::document::{ClosingBrace, LanguageId, LineIndex, TextDocument};
pub struct Store {
vfs: Arc<Vfs>,
file_store: FileStore,
file_store: Arc<Mutex<FileStore>>,
file_ids: HashMap<String, FileId>,
line_indices: HashMap<FileId, LineIndex>,
versions: HashMap<String, i32>,
@ -37,7 +37,7 @@ impl Default for Store {
fn default() -> Self {
Self {
vfs: Arc::new(Vfs::default()),
file_store: FileStore::new(),
file_store: Arc::new(Mutex::new(FileStore::new())),
file_ids: HashMap::new(),
line_indices: HashMap::new(),
versions: HashMap::new(),
@ -71,7 +71,8 @@ impl Store {
// Sync VFS snapshot to FileStore for Salsa tracking
let snapshot = self.vfs.snapshot();
self.file_store.apply_vfs_snapshot(&snapshot);
let mut file_store = self.file_store.lock().unwrap();
file_store.apply_vfs_snapshot(&snapshot);
// Create TextDocument metadata
let document = TextDocument::new(uri_str.clone(), version, language_id.clone(), file_id);
@ -122,7 +123,8 @@ impl Store {
// Sync VFS snapshot to FileStore for Salsa tracking
let snapshot = self.vfs.snapshot();
self.file_store.apply_vfs_snapshot(&snapshot);
let mut file_store = self.file_store.lock().unwrap();
file_store.apply_vfs_snapshot(&snapshot);
// Update cached line index and version
self.line_indices
@ -190,7 +192,8 @@ impl Store {
// Try to get cached AST from FileStore for better context analysis
// This demonstrates using the cached AST, though we still fall back to string parsing
let file_id = document.file_id();
if let Some(_ast) = self.file_store.get_template_ast(file_id) {
let file_store = self.file_store.lock().unwrap();
if let Some(_ast) = file_store.get_template_ast(file_id) {
// TODO: In a future enhancement, we could use the AST to provide
// more intelligent completions based on the current node context
// For now, we continue with the existing string-based approach
@ -258,7 +261,8 @@ impl Store {
};
// Get cached template errors from FileStore
let errors = self.file_store.get_template_errors(file_id);
let file_store = self.file_store.lock().unwrap();
let errors = file_store.get_template_errors(file_id);
// Convert template errors to LSP diagnostics
errors

View file

@ -9,10 +9,13 @@ djls-templates = { workspace = true }
anyhow = { workspace = true }
camino = { workspace = true }
dashmap = { workspace = true }
notify = { workspace = true }
salsa = { workspace = true }
tokio = { workspace = true }
url = { workspace = true }
[dev-dependencies]
tempfile = { workspace = true }
[lints]
workspace = true

View file

@ -1,11 +1,13 @@
mod bridge;
mod db;
mod vfs;
mod watcher;
// Re-export public API
pub use bridge::FileStore;
pub use db::{parse_template, template_errors, Database, FileKindMini, SourceFile, TemplateAst, TemplateLoaderOrder};
pub use vfs::{FileKind, FileMeta, FileRecord, Revision, TextSource, Vfs, VfsSnapshot};
pub use watcher::{VfsWatcher, WatchConfig, WatchEvent};
/// Stable, compact identifier for files across the subsystem.
///

View file

@ -18,7 +18,7 @@ use std::{
};
use url::Url;
use super::FileId;
use super::{FileId, watcher::{VfsWatcher, WatchConfig, WatchEvent}};
/// Monotonic counter representing global VFS state.
///
@ -113,6 +113,10 @@ pub struct Vfs {
files: DashMap<FileId, FileRecord>,
/// Global revision counter, incremented on content changes
head: AtomicU64,
/// Optional file system watcher for external change detection
watcher: std::sync::Mutex<Option<VfsWatcher>>,
/// Map from filesystem path to FileId for watcher events
by_path: DashMap<Utf8PathBuf, FileId>,
}
impl Vfs {
@ -134,11 +138,12 @@ impl Vfs {
let id = FileId(self.next_file_id.fetch_add(1, Ordering::SeqCst));
let meta = FileMeta {
uri: uri.clone(),
path,
path: path.clone(),
kind,
};
let hash = content_hash(&text);
self.by_uri.insert(uri, id);
self.by_path.insert(path, id);
self.files.insert(id, FileRecord { meta, text, hash });
id
}
@ -188,6 +193,96 @@ impl Vfs {
.collect(),
}
}
/// Enable file system watching with the given configuration.
///
/// This starts monitoring the specified root directories for external changes.
/// Returns an error if file watching is disabled in the config or fails to start.
pub fn enable_file_watching(&self, config: WatchConfig) -> Result<()> {
let watcher = VfsWatcher::new(config)?;
*self.watcher.lock().unwrap() = Some(watcher);
Ok(())
}
/// Process pending file system events from the watcher.
///
/// This should be called periodically to sync external file changes into the VFS.
/// Returns the number of files that were updated.
pub fn process_file_events(&self) -> usize {
// Get events from the watcher
let events = {
let guard = self.watcher.lock().unwrap();
if let Some(watcher) = guard.as_ref() {
watcher.try_recv_events()
} else {
return 0;
}
};
let mut updated_count = 0;
for event in events {
match event {
WatchEvent::Modified(path) | WatchEvent::Created(path) => {
if let Err(e) = self.load_from_disk(&path) {
eprintln!("Failed to load file from disk: {}: {}", path, e);
} else {
updated_count += 1;
}
}
WatchEvent::Deleted(path) => {
// For now, we don't remove deleted files from VFS
// This maintains stable FileIds for consumers
eprintln!("File deleted (keeping in VFS): {}", path);
}
WatchEvent::Renamed { from, to } => {
// Handle rename by updating the path mapping
if let Some(file_id) = self.by_path.remove(&from).map(|(_, id)| id) {
self.by_path.insert(to.clone(), file_id);
if let Err(e) = self.load_from_disk(&to) {
eprintln!("Failed to load renamed file: {}: {}", to, e);
} else {
updated_count += 1;
}
}
}
}
}
updated_count
}
/// Load a file's content from disk and update the VFS.
///
/// This method reads the file from the filesystem and updates the VFS entry
/// if the content has changed. It's used by the file watcher to sync external changes.
fn load_from_disk(&self, path: &Utf8PathBuf) -> Result<()> {
use std::fs;
// Check if we have this file tracked
if let Some(file_id) = self.by_path.get(path).map(|entry| *entry.value()) {
// Read content from disk
let content = fs::read_to_string(path.as_std_path())
.map_err(|e| anyhow!("Failed to read file {}: {}", path, e))?;
let new_text = TextSource::Disk(Arc::from(content.as_str()));
let new_hash = content_hash(&new_text);
// Update the file if content changed
if let Some(mut record) = self.files.get_mut(&file_id) {
if record.hash != new_hash {
record.text = new_text;
record.hash = new_hash;
self.head.fetch_add(1, Ordering::SeqCst);
}
}
}
Ok(())
}
/// Check if file watching is currently enabled.
pub fn is_file_watching_enabled(&self) -> bool {
self.watcher.lock().unwrap().is_some()
}
}
impl Default for Vfs {
@ -197,6 +292,8 @@ impl Default for Vfs {
by_uri: DashMap::new(),
files: DashMap::new(),
head: AtomicU64::new(0),
watcher: std::sync::Mutex::new(None),
by_path: DashMap::new(),
}
}
}

View file

@ -0,0 +1,330 @@
//! File system watching for VFS synchronization.
//!
//! This module provides file system watching capabilities to detect external changes
//! and synchronize them with the VFS. It uses cross-platform file watching with
//! debouncing to handle rapid changes efficiently.
use anyhow::{anyhow, Result};
use camino::Utf8PathBuf;
use notify::{Config, Event, EventKind, RecommendedWatcher, RecursiveMode, Watcher};
use std::{
collections::HashMap,
sync::mpsc,
thread,
time::{Duration, Instant},
};
/// Event types that can occur in the file system.
///
/// [`WatchEvent`] represents the different types of file system changes that
/// the watcher can detect and process.
#[derive(Clone, Debug, PartialEq)]
pub enum WatchEvent {
/// A file was modified (content changed)
Modified(Utf8PathBuf),
/// A new file was created
Created(Utf8PathBuf),
/// A file was deleted
Deleted(Utf8PathBuf),
/// A file was renamed from one path to another
Renamed {
from: Utf8PathBuf,
to: Utf8PathBuf,
},
}
/// Configuration for the file watcher.
///
/// [`WatchConfig`] controls how the file watcher operates, including what
/// directories to watch and how to filter events.
#[derive(Clone, Debug)]
pub struct WatchConfig {
/// Whether file watching is enabled
pub enabled: bool,
/// Root directories to watch recursively
pub roots: Vec<Utf8PathBuf>,
/// Debounce time in milliseconds (collect events for this duration before processing)
pub debounce_ms: u64,
/// File patterns to include (e.g., ["*.py", "*.html"])
pub include_patterns: Vec<String>,
/// File patterns to exclude (e.g., ["__pycache__", ".git", "*.pyc"])
pub exclude_patterns: Vec<String>,
}
impl Default for WatchConfig {
fn default() -> Self {
Self {
enabled: true,
roots: Vec::new(),
debounce_ms: 250,
include_patterns: vec!["*.py".to_string(), "*.html".to_string()],
exclude_patterns: vec![
"__pycache__".to_string(),
".git".to_string(),
".pyc".to_string(),
"node_modules".to_string(),
".venv".to_string(),
"venv".to_string(),
],
}
}
}
/// File system watcher for VFS synchronization.
///
/// [`VfsWatcher`] monitors the file system for changes and provides a channel
/// for consuming batched events. It handles debouncing and filtering internally.
pub struct VfsWatcher {
/// The underlying file system watcher
_watcher: RecommendedWatcher,
/// Receiver for processed watch events
rx: mpsc::Receiver<Vec<WatchEvent>>,
/// Configuration for the watcher
config: WatchConfig,
/// Handle to the background processing thread
_handle: thread::JoinHandle<()>,
}
impl VfsWatcher {
/// Create a new file watcher with the given configuration.
///
/// This starts watching the specified root directories and begins processing
/// events in a background thread.
pub fn new(config: WatchConfig) -> Result<Self> {
if !config.enabled {
return Err(anyhow!("File watching is disabled"));
}
let (event_tx, event_rx) = mpsc::channel();
let (watch_tx, watch_rx) = mpsc::channel();
// Create the file system watcher
let mut watcher = RecommendedWatcher::new(
move |res: notify::Result<Event>| {
if let Ok(event) = res {
let _ = event_tx.send(event);
}
},
Config::default(),
)?;
// Watch all root directories
for root in &config.roots {
let std_path = root.as_std_path();
if std_path.exists() {
watcher.watch(std_path, RecursiveMode::Recursive)?;
}
}
// Spawn background thread for event processing
let config_clone = config.clone();
let handle = thread::spawn(move || {
Self::process_events(event_rx, watch_tx, config_clone);
});
Ok(Self {
_watcher: watcher,
rx: watch_rx,
config,
_handle: handle,
})
}
/// Get the next batch of processed watch events.
///
/// This is a non-blocking operation that returns immediately. If no events
/// are available, it returns an empty vector.
pub fn try_recv_events(&self) -> Vec<WatchEvent> {
match self.rx.try_recv() {
Ok(events) => events,
Err(_) => Vec::new(),
}
}
/// Background thread function for processing raw file system events.
///
/// This function handles debouncing, filtering, and batching of events before
/// sending them to the main thread for VFS synchronization.
fn process_events(
event_rx: mpsc::Receiver<Event>,
watch_tx: mpsc::Sender<Vec<WatchEvent>>,
config: WatchConfig,
) {
let mut pending_events: HashMap<Utf8PathBuf, WatchEvent> = HashMap::new();
let mut last_batch_time = Instant::now();
let debounce_duration = Duration::from_millis(config.debounce_ms);
loop {
// Try to receive events with a timeout for batching
match event_rx.recv_timeout(Duration::from_millis(50)) {
Ok(event) => {
// Process the raw notify event into our WatchEvent format
if let Some(watch_events) = Self::convert_notify_event(event, &config) {
for watch_event in watch_events {
if let Some(path) = Self::get_event_path(&watch_event) {
// Only keep the latest event for each path
pending_events.insert(path.clone(), watch_event);
}
}
}
}
Err(mpsc::RecvTimeoutError::Timeout) => {
// Timeout - check if we should flush pending events
}
Err(mpsc::RecvTimeoutError::Disconnected) => {
// Channel disconnected, exit the thread
break;
}
}
// Check if we should flush pending events
if !pending_events.is_empty()
&& last_batch_time.elapsed() >= debounce_duration
{
let events: Vec<WatchEvent> = pending_events.values().cloned().collect();
if let Err(_) = watch_tx.send(events) {
// Main thread disconnected, exit
break;
}
pending_events.clear();
last_batch_time = Instant::now();
}
}
}
/// Convert a notify Event into our WatchEvent format.
fn convert_notify_event(event: Event, config: &WatchConfig) -> Option<Vec<WatchEvent>> {
let mut watch_events = Vec::new();
for path in event.paths {
if let Ok(utf8_path) = Utf8PathBuf::try_from(path) {
if Self::should_include_path_static(&utf8_path, config) {
match event.kind {
EventKind::Create(_) => watch_events.push(WatchEvent::Created(utf8_path)),
EventKind::Modify(_) => watch_events.push(WatchEvent::Modified(utf8_path)),
EventKind::Remove(_) => watch_events.push(WatchEvent::Deleted(utf8_path)),
_ => {} // Ignore other event types for now
}
}
}
}
if watch_events.is_empty() {
None
} else {
Some(watch_events)
}
}
/// Static version of should_include_path for use in convert_notify_event.
fn should_include_path_static(path: &Utf8PathBuf, config: &WatchConfig) -> bool {
let path_str = path.as_str();
// Check exclude patterns first
for pattern in &config.exclude_patterns {
if path_str.contains(pattern) {
return false;
}
}
// If no include patterns, include everything (that's not excluded)
if config.include_patterns.is_empty() {
return true;
}
// Check include patterns
for pattern in &config.include_patterns {
if pattern.starts_with("*.") {
let extension = &pattern[2..];
if path_str.ends_with(extension) {
return true;
}
} else if path_str.contains(pattern) {
return true;
}
}
false
}
/// Extract the path from a WatchEvent.
fn get_event_path(event: &WatchEvent) -> Option<&Utf8PathBuf> {
match event {
WatchEvent::Modified(path) => Some(path),
WatchEvent::Created(path) => Some(path),
WatchEvent::Deleted(path) => Some(path),
WatchEvent::Renamed { to, .. } => Some(to),
}
}
}
impl Drop for VfsWatcher {
fn drop(&mut self) {
// The background thread will exit when the event channel is dropped
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_watch_config_default() {
let config = WatchConfig::default();
assert!(config.enabled);
assert_eq!(config.debounce_ms, 250);
assert!(config.include_patterns.contains(&"*.py".to_string()));
assert!(config.exclude_patterns.contains(&".git".to_string()));
}
#[test]
fn test_should_include_path() {
let config = WatchConfig::default();
// Should include Python files
assert!(VfsWatcher::should_include_path_static(
&Utf8PathBuf::from("test.py"),
&config
));
// Should include HTML files
assert!(VfsWatcher::should_include_path_static(
&Utf8PathBuf::from("template.html"),
&config
));
// Should exclude .git files
assert!(!VfsWatcher::should_include_path_static(
&Utf8PathBuf::from(".git/config"),
&config
));
// Should exclude __pycache__ files
assert!(!VfsWatcher::should_include_path_static(
&Utf8PathBuf::from("__pycache__/test.pyc"),
&config
));
}
#[test]
fn test_watch_event_types() {
let path1 = Utf8PathBuf::from("test.py");
let path2 = Utf8PathBuf::from("new.py");
let modified = WatchEvent::Modified(path1.clone());
let created = WatchEvent::Created(path1.clone());
let deleted = WatchEvent::Deleted(path1.clone());
let renamed = WatchEvent::Renamed {
from: path1,
to: path2,
};
// Test that events can be created and compared
assert_ne!(modified, created);
assert_ne!(created, deleted);
assert_ne!(deleted, renamed);
}
}