use Roots in watcher

This commit is contained in:
Bernardo 2019-01-25 18:39:35 +01:00 committed by Aleksey Kladov
parent 86fadbd4e5
commit d63e1cebff
4 changed files with 275 additions and 331 deletions

View file

@ -1,95 +1,72 @@
use std::{ use std::{fs, sync::Arc, thread};
fmt, fs,
path::{Path, PathBuf},
sync::Arc,
thread,
};
use crossbeam_channel::{Receiver, Sender}; use crossbeam_channel::{Receiver, Sender};
use parking_lot::Mutex;
use relative_path::RelativePathBuf; use relative_path::RelativePathBuf;
use thread_worker::WorkerHandle; use thread_worker::WorkerHandle;
use walkdir::WalkDir; use walkdir::WalkDir;
mod watcher; mod watcher;
use watcher::Watcher; use watcher::Watcher;
pub use watcher::WatcherChange;
use crate::{RootFilter, VfsRoot}; use crate::{RootFilter, Roots, VfsRoot};
pub(crate) enum Task { pub(crate) enum Task {
AddRoot { AddRoot {
root: VfsRoot, root: VfsRoot,
path: PathBuf, filter: Arc<RootFilter>,
root_filter: Arc<RootFilter>,
nested_roots: Vec<PathBuf>,
},
/// this variant should only be created by the watcher
HandleChange(WatcherChange),
LoadChange(WatcherChange),
Watch {
dir: PathBuf,
root_filter: Arc<RootFilter>,
}, },
} }
#[derive(Debug)] #[derive(Debug)]
pub struct AddRootResult {
pub(crate) root: VfsRoot,
pub(crate) files: Vec<(RelativePathBuf, String)>,
}
#[derive(Debug)]
pub enum WatcherChangeData {
Create { path: PathBuf, text: String },
Write { path: PathBuf, text: String },
Remove { path: PathBuf },
}
pub enum TaskResult { pub enum TaskResult {
AddRoot(AddRootResult), BulkLoadRoot {
HandleChange(WatcherChange), root: VfsRoot,
LoadChange(WatcherChangeData), files: Vec<(RelativePathBuf, String)>,
} },
AddSingleFile {
impl fmt::Debug for TaskResult { root: VfsRoot,
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { path: RelativePathBuf,
match self { text: String,
TaskResult::AddRoot(..) => f.write_str("TaskResult::AddRoot(..)"), },
TaskResult::HandleChange(c) => write!(f, "TaskResult::HandleChange({:?})", c), ChangeSingleFile {
TaskResult::LoadChange(c) => write!(f, "TaskResult::LoadChange({:?})", c), root: VfsRoot,
} path: RelativePathBuf,
} text: String,
},
RemoveSingleFile {
root: VfsRoot,
path: RelativePathBuf,
},
} }
pub(crate) struct Worker { pub(crate) struct Worker {
worker: thread_worker::Worker<Task, TaskResult>, worker: thread_worker::Worker<Task, TaskResult>,
worker_handle: WorkerHandle, worker_handle: WorkerHandle,
watcher: Arc<Mutex<Option<Watcher>>>,
} }
impl Worker { impl Worker {
pub(crate) fn start() -> Worker { pub(crate) fn start(roots: Arc<Roots>) -> Worker {
let watcher = Arc::new(Mutex::new(None));
let watcher_clone = watcher.clone();
let (worker, worker_handle) = let (worker, worker_handle) =
thread_worker::spawn("vfs", 128, move |input_receiver, output_sender| { thread_worker::spawn("vfs", 128, move |input_receiver, output_sender| {
input_receiver let mut watcher = match Watcher::start(roots, output_sender.clone()) {
Ok(w) => Some(w),
Err(e) => {
log::error!("could not start watcher: {}", e);
None
}
};
let res = input_receiver
.into_iter() .into_iter()
.filter_map(|t| handle_task(t, &watcher_clone)) .filter_map(|t| handle_task(t, &mut watcher))
.try_for_each(|it| output_sender.send(it)) .try_for_each(|it| output_sender.send(it));
.unwrap() if let Some(watcher) = watcher {
let _ = watcher.shutdown();
}
res.unwrap()
}); });
match Watcher::start(worker.inp.clone()) {
Ok(w) => {
watcher.lock().replace(w);
}
Err(e) => log::error!("could not start watcher: {}", e),
};
Worker { Worker {
worker, worker,
worker_handle, worker_handle,
watcher,
} }
} }
@ -102,72 +79,31 @@ impl Worker {
} }
pub(crate) fn shutdown(self) -> thread::Result<()> { pub(crate) fn shutdown(self) -> thread::Result<()> {
if let Some(watcher) = self.watcher.lock().take() {
let _ = watcher.shutdown();
}
let _ = self.worker.shutdown(); let _ = self.worker.shutdown();
self.worker_handle.shutdown() self.worker_handle.shutdown()
} }
} }
fn watch( fn handle_task(task: Task, watcher: &mut Option<Watcher>) -> Option<TaskResult> {
watcher: &Arc<Mutex<Option<Watcher>>>,
dir: &Path,
filter_entry: &RootFilter,
emit_for_existing: bool,
) {
if let Some(watcher) = watcher.lock().as_mut() {
watcher.watch_recursive(dir, filter_entry, emit_for_existing)
}
}
fn handle_task(task: Task, watcher: &Arc<Mutex<Option<Watcher>>>) -> Option<TaskResult> {
match task { match task {
Task::AddRoot { Task::AddRoot { root, filter } => {
root, if let Some(watcher) = watcher {
path, watcher.watch_root(&filter)
root_filter, }
nested_roots, log::debug!("loading {} ...", filter.root.as_path().display());
} => { let files = load_root(filter.as_ref());
watch(watcher, &path, root_filter.as_ref(), false); log::debug!("... loaded {}", filter.root.as_path().display());
log::debug!("loading {} ...", path.as_path().display()); Some(TaskResult::BulkLoadRoot { root, files })
let files = load_root(
path.as_path(),
root_filter.as_ref(),
nested_roots.as_slice(),
);
log::debug!("... loaded {}", path.as_path().display());
Some(TaskResult::AddRoot(AddRootResult { root, files }))
}
Task::HandleChange(change) => {
// forward as is because Vfs has to decide if we should load it
Some(TaskResult::HandleChange(change))
}
Task::LoadChange(change) => {
log::debug!("loading {:?} ...", change);
load_change(change).map(TaskResult::LoadChange)
}
Task::Watch { dir, root_filter } => {
watch(watcher, &dir, root_filter.as_ref(), true);
None
} }
} }
} }
fn load_root( fn load_root(filter: &RootFilter) -> Vec<(RelativePathBuf, String)> {
root: &Path,
root_filter: &RootFilter,
nested_roots: &[PathBuf],
) -> Vec<(RelativePathBuf, String)> {
let mut res = Vec::new(); let mut res = Vec::new();
for entry in WalkDir::new(root).into_iter().filter_entry(|entry| { for entry in WalkDir::new(&filter.root)
if entry.file_type().is_dir() && nested_roots.iter().any(|it| it == entry.path()) { .into_iter()
// do not load files of a nested root .filter_entry(filter.entry_filter())
false {
} else {
root_filter.can_contain(entry.path()).is_some()
}
}) {
let entry = match entry { let entry = match entry {
Ok(entry) => entry, Ok(entry) => entry,
Err(e) => { Err(e) => {
@ -186,42 +122,8 @@ fn load_root(
continue; continue;
} }
}; };
let path = RelativePathBuf::from_path(path.strip_prefix(root).unwrap()).unwrap(); let path = RelativePathBuf::from_path(path.strip_prefix(&filter.root).unwrap()).unwrap();
res.push((path.to_owned(), text)) res.push((path.to_owned(), text))
} }
res res
} }
fn load_change(change: WatcherChange) -> Option<WatcherChangeData> {
let data = match change {
WatcherChange::Create(path) => {
if path.is_dir() {
return None;
}
let text = match fs::read_to_string(&path) {
Ok(text) => text,
Err(e) => {
log::warn!("watcher error \"{}\": {}", path.display(), e);
return None;
}
};
WatcherChangeData::Create { path, text }
}
WatcherChange::Write(path) => {
let text = match fs::read_to_string(&path) {
Ok(text) => text,
Err(e) => {
log::warn!("watcher error \"{}\": {}", path.display(), e);
return None;
}
};
WatcherChangeData::Write { path, text }
}
WatcherChange::Remove(path) => WatcherChangeData::Remove { path },
WatcherChange::Rescan => {
// this should be handled by Vfs::handle_task
return None;
}
};
Some(data)
}

View file

@ -1,118 +1,72 @@
use crate::{io, RootFilter}; use crate::{io, RootFilter, Roots, VfsRoot};
use crossbeam_channel::Sender; use crossbeam_channel::Sender;
use drop_bomb::DropBomb; use drop_bomb::DropBomb;
use notify::{DebouncedEvent, RecommendedWatcher, RecursiveMode, Watcher as NotifyWatcher}; use notify::{DebouncedEvent, RecommendedWatcher, RecursiveMode, Watcher as NotifyWatcher};
use parking_lot::Mutex;
use std::{ use std::{
fs,
path::{Path, PathBuf}, path::{Path, PathBuf},
sync::mpsc, sync::{mpsc, Arc},
thread, thread,
time::Duration, time::Duration,
}; };
use walkdir::WalkDir; use walkdir::WalkDir;
#[derive(Debug)] #[derive(Debug)]
pub enum WatcherChange { enum ChangeKind {
Create(PathBuf), Create,
Write(PathBuf), Write,
Remove(PathBuf), Remove,
Rescan,
}
fn handle_change_event(
ev: DebouncedEvent,
sender: &Sender<io::Task>,
) -> Result<(), Box<std::error::Error>> {
match ev {
DebouncedEvent::NoticeWrite(_)
| DebouncedEvent::NoticeRemove(_)
| DebouncedEvent::Chmod(_) => {
// ignore
}
DebouncedEvent::Rescan => {
sender.send(io::Task::HandleChange(WatcherChange::Rescan))?;
}
DebouncedEvent::Create(path) => {
sender.send(io::Task::HandleChange(WatcherChange::Create(path)))?;
}
DebouncedEvent::Write(path) => {
sender.send(io::Task::HandleChange(WatcherChange::Write(path)))?;
}
DebouncedEvent::Remove(path) => {
sender.send(io::Task::HandleChange(WatcherChange::Remove(path)))?;
}
DebouncedEvent::Rename(src, dst) => {
sender.send(io::Task::HandleChange(WatcherChange::Remove(src)))?;
sender.send(io::Task::HandleChange(WatcherChange::Create(dst)))?;
}
DebouncedEvent::Error(err, path) => {
// TODO should we reload the file contents?
log::warn!("watcher error \"{}\", {:?}", err, path);
}
}
Ok(())
} }
const WATCHER_DELAY: Duration = Duration::from_millis(250); const WATCHER_DELAY: Duration = Duration::from_millis(250);
pub(crate) struct Watcher { pub(crate) struct Watcher {
watcher: RecommendedWatcher,
thread: thread::JoinHandle<()>, thread: thread::JoinHandle<()>,
bomb: DropBomb, bomb: DropBomb,
sender: Sender<io::Task>, watcher: Arc<Mutex<Option<RecommendedWatcher>>>,
} }
impl Watcher { impl Watcher {
pub(crate) fn start( pub(crate) fn start(
output_sender: Sender<io::Task>, roots: Arc<Roots>,
output_sender: Sender<io::TaskResult>,
) -> Result<Watcher, Box<std::error::Error>> { ) -> Result<Watcher, Box<std::error::Error>> {
let (input_sender, input_receiver) = mpsc::channel(); let (input_sender, input_receiver) = mpsc::channel();
let watcher = notify::watcher(input_sender, WATCHER_DELAY)?; let watcher = Arc::new(Mutex::new(Some(notify::watcher(
input_sender,
WATCHER_DELAY,
)?)));
let sender = output_sender.clone(); let sender = output_sender.clone();
let watcher_clone = watcher.clone();
let thread = thread::spawn(move || { let thread = thread::spawn(move || {
let worker = WatcherWorker {
roots,
watcher: watcher_clone,
sender,
};
input_receiver input_receiver
.into_iter() .into_iter()
// forward relevant events only // forward relevant events only
.try_for_each(|change| handle_change_event(change, &output_sender)) .try_for_each(|change| worker.handle_debounced_event(change))
.unwrap() .unwrap()
}); });
Ok(Watcher { Ok(Watcher {
watcher,
thread, thread,
sender, watcher,
bomb: DropBomb::new(format!("Watcher was not shutdown")), bomb: DropBomb::new(format!("Watcher was not shutdown")),
}) })
} }
pub fn watch_recursive(&mut self, dir: &Path, filter: &RootFilter, emit_for_contents: bool) { pub fn watch_root(&mut self, filter: &RootFilter) {
for res in WalkDir::new(dir) for res in WalkDir::new(&filter.root)
.into_iter() .into_iter()
.filter_entry(|entry| filter.can_contain(entry.path()).is_some()) .filter_entry(filter.entry_filter())
{ {
match res { match res {
Ok(entry) => { Ok(entry) => {
if entry.path().is_dir() { if entry.path().is_dir() {
match self watch_one(self.watcher.as_ref(), entry.path());
.watcher
.watch(entry.path(), RecursiveMode::NonRecursive)
{
Ok(()) => log::debug!("watching \"{}\"", entry.path().display()),
Err(e) => {
log::warn!("could not watch \"{}\": {}", entry.path().display(), e)
}
}
} else {
if emit_for_contents && entry.depth() > 0 {
// emit only for files otherwise we will cause watch_recursive to be called again with a dir that we are already watching
// emit as create because we haven't seen it yet
if let Err(e) =
self.sender
.send(io::Task::HandleChange(WatcherChange::Create(
entry.path().to_path_buf(),
)))
{
log::warn!("watcher error: {}", e)
}
}
} }
} }
Err(e) => log::warn!("watcher error: {}", e), Err(e) => log::warn!("watcher error: {}", e),
@ -122,7 +76,7 @@ impl Watcher {
pub fn shutdown(mut self) -> thread::Result<()> { pub fn shutdown(mut self) -> thread::Result<()> {
self.bomb.defuse(); self.bomb.defuse();
drop(self.watcher); drop(self.watcher.lock().take());
let res = self.thread.join(); let res = self.thread.join();
match &res { match &res {
Ok(()) => log::info!("... Watcher terminated with ok"), Ok(()) => log::info!("... Watcher terminated with ok"),
@ -131,3 +85,116 @@ impl Watcher {
res res
} }
} }
struct WatcherWorker {
watcher: Arc<Mutex<Option<RecommendedWatcher>>>,
roots: Arc<Roots>,
sender: Sender<io::TaskResult>,
}
impl WatcherWorker {
fn handle_debounced_event(&self, ev: DebouncedEvent) -> Result<(), Box<std::error::Error>> {
match ev {
DebouncedEvent::NoticeWrite(_)
| DebouncedEvent::NoticeRemove(_)
| DebouncedEvent::Chmod(_) => {
// ignore
}
DebouncedEvent::Rescan => {
// TODO rescan all roots
}
DebouncedEvent::Create(path) => {
self.handle_change(path, ChangeKind::Create);
}
DebouncedEvent::Write(path) => {
self.handle_change(path, ChangeKind::Write);
}
DebouncedEvent::Remove(path) => {
self.handle_change(path, ChangeKind::Remove);
}
DebouncedEvent::Rename(src, dst) => {
self.handle_change(src, ChangeKind::Remove);
self.handle_change(dst, ChangeKind::Create);
}
DebouncedEvent::Error(err, path) => {
// TODO should we reload the file contents?
log::warn!("watcher error \"{}\", {:?}", err, path);
}
}
Ok(())
}
fn handle_change(&self, path: PathBuf, kind: ChangeKind) {
if let Err(e) = self.try_handle_change(path, kind) {
log::warn!("watcher error: {}", e)
}
}
fn try_handle_change(
&self,
path: PathBuf,
kind: ChangeKind,
) -> Result<(), Box<std::error::Error>> {
let (root, rel_path) = match self.roots.find(&path) {
Some(x) => x,
None => return Ok(()),
};
match kind {
ChangeKind::Create => {
if path.is_dir() {
self.watch_recursive(&path, root);
} else {
let text = fs::read_to_string(&path)?;
self.sender.send(io::TaskResult::AddSingleFile {
root,
path: rel_path,
text,
})?
}
}
ChangeKind::Write => {
let text = fs::read_to_string(&path)?;
self.sender.send(io::TaskResult::ChangeSingleFile {
root,
path: rel_path,
text,
})?
}
ChangeKind::Remove => self.sender.send(io::TaskResult::RemoveSingleFile {
root,
path: rel_path,
})?,
}
Ok(())
}
fn watch_recursive(&self, dir: &Path, root: VfsRoot) {
let filter = &self.roots[root];
for res in WalkDir::new(dir)
.into_iter()
.filter_entry(|entry| filter.can_contain(entry.path()).is_some())
{
match res {
Ok(entry) => {
if entry.path().is_dir() {
watch_one(self.watcher.as_ref(), entry.path());
} else {
// emit only for files otherwise we will cause watch_recursive to be called again with a dir that we are already watching
// emit as create because we haven't seen it yet
self.handle_change(entry.path().to_path_buf(), ChangeKind::Create);
}
}
Err(e) => log::warn!("watcher error: {}", e),
}
}
}
}
fn watch_one(watcher: &Mutex<Option<RecommendedWatcher>>, dir: &Path) {
if let Some(watcher) = watcher.lock().as_mut() {
match watcher.watch(dir, RecursiveMode::NonRecursive) {
Ok(()) => log::debug!("watching \"{}\"", dir.display()),
Err(e) => log::warn!("could not watch \"{}\": {}", dir.display(), e),
}
}
}

View file

@ -28,22 +28,25 @@ use crossbeam_channel::Receiver;
use ra_arena::{impl_arena_id, Arena, RawId}; use ra_arena::{impl_arena_id, Arena, RawId};
use relative_path::{Component, RelativePath, RelativePathBuf}; use relative_path::{Component, RelativePath, RelativePathBuf};
use rustc_hash::{FxHashMap, FxHashSet}; use rustc_hash::{FxHashMap, FxHashSet};
use walkdir::DirEntry;
pub use crate::io::TaskResult as VfsTask; pub use crate::io::TaskResult as VfsTask;
use io::{Task, TaskResult, WatcherChange, WatcherChangeData, Worker}; use io::{TaskResult, Worker};
/// `RootFilter` is a predicate that checks if a file can belong to a root. If /// `RootFilter` is a predicate that checks if a file can belong to a root. If
/// several filters match a file (nested dirs), the most nested one wins. /// several filters match a file (nested dirs), the most nested one wins.
pub(crate) struct RootFilter { pub(crate) struct RootFilter {
root: PathBuf, root: PathBuf,
filter: fn(&Path, &RelativePath) -> bool, filter: fn(&Path, &RelativePath) -> bool,
excluded_dirs: Vec<PathBuf>,
} }
impl RootFilter { impl RootFilter {
fn new(root: PathBuf) -> RootFilter { fn new(root: PathBuf, excluded_dirs: Vec<PathBuf>) -> RootFilter {
RootFilter { RootFilter {
root, root,
filter: default_filter, filter: default_filter,
excluded_dirs,
} }
} }
/// Check if this root can contain `path`. NB: even if this returns /// Check if this root can contain `path`. NB: even if this returns
@ -56,6 +59,17 @@ impl RootFilter {
} }
Some(rel_path) Some(rel_path)
} }
pub(crate) fn entry_filter<'a>(&'a self) -> impl FnMut(&DirEntry) -> bool + 'a {
move |entry: &DirEntry| {
if entry.path().is_dir() && self.excluded_dirs.iter().any(|it| it == entry.path()) {
// do not walk nested roots
false
} else {
self.can_contain(entry.path()).is_some()
}
}
}
} }
pub(crate) fn default_filter(path: &Path, rel_path: &RelativePath) -> bool { pub(crate) fn default_filter(path: &Path, rel_path: &RelativePath) -> bool {
@ -94,10 +108,22 @@ pub(crate) struct Roots {
} }
impl Roots { impl Roots {
pub(crate) fn new() -> Roots { pub(crate) fn new(mut paths: Vec<PathBuf>) -> Roots {
Roots { let mut roots = Arena::default();
roots: Arena::default(), // A hack to make nesting work.
paths.sort_by_key(|it| Reverse(it.as_os_str().len()));
for (i, path) in paths.iter().enumerate() {
let nested_roots = paths[..i]
.iter()
.filter(|it| it.starts_with(path))
.map(|it| it.clone())
.collect::<Vec<_>>();
let root_filter = Arc::new(RootFilter::new(path.clone(), nested_roots));
roots.alloc(root_filter.clone());
} }
Roots { roots }
} }
pub(crate) fn find(&self, path: &Path) -> Option<(VfsRoot, RelativePathBuf)> { pub(crate) fn find(&self, path: &Path) -> Option<(VfsRoot, RelativePathBuf)> {
self.roots self.roots
@ -135,36 +161,22 @@ impl fmt::Debug for Vfs {
impl Vfs { impl Vfs {
pub fn new(roots: Vec<PathBuf>) -> (Vfs, Vec<VfsRoot>) { pub fn new(roots: Vec<PathBuf>) -> (Vfs, Vec<VfsRoot>) {
let mut root_paths = roots; let roots = Arc::new(Roots::new(roots));
let worker = io::Worker::start(); let worker = io::Worker::start(roots.clone());
let mut roots = Roots::new();
let mut root2files = FxHashMap::default(); let mut root2files = FxHashMap::default();
// A hack to make nesting work. for (root, filter) in roots.iter() {
root_paths.sort_by_key(|it| Reverse(it.as_os_str().len()));
for (i, path) in root_paths.iter().enumerate() {
let root_filter = Arc::new(RootFilter::new(path.clone()));
let root = roots.alloc(root_filter.clone());
root2files.insert(root, Default::default()); root2files.insert(root, Default::default());
worker
let nested_roots = root_paths[..i] .sender()
.iter() .send(io::Task::AddRoot {
.filter(|it| it.starts_with(path)) root,
.map(|it| it.clone()) filter: filter.clone(),
.collect::<Vec<_>>(); })
.unwrap();
let task = io::Task::AddRoot {
root,
path: path.clone(),
root_filter,
nested_roots,
};
worker.sender().send(task).unwrap();
} }
let res = Vfs { let res = Vfs {
roots: Arc::new(roots), roots,
files: Arena::default(), files: Arena::default(),
root2files, root2files,
worker, worker,
@ -225,90 +237,46 @@ impl Vfs {
pub fn handle_task(&mut self, task: io::TaskResult) { pub fn handle_task(&mut self, task: io::TaskResult) {
match task { match task {
TaskResult::AddRoot(task) => { TaskResult::BulkLoadRoot { root, files } => {
let mut files = Vec::new(); let mut cur_files = Vec::new();
// While we were scanning the root in the backgound, a file might have // While we were scanning the root in the backgound, a file might have
// been open in the editor, so we need to account for that. // been open in the editor, so we need to account for that.
let exising = self.root2files[&task.root] let exising = self.root2files[&root]
.iter() .iter()
.map(|&file| (self.files[file].path.clone(), file)) .map(|&file| (self.files[file].path.clone(), file))
.collect::<FxHashMap<_, _>>(); .collect::<FxHashMap<_, _>>();
for (path, text) in task.files { for (path, text) in files {
if let Some(&file) = exising.get(&path) { if let Some(&file) = exising.get(&path) {
let text = Arc::clone(&self.files[file].text); let text = Arc::clone(&self.files[file].text);
files.push((file, path, text)); cur_files.push((file, path, text));
continue; continue;
} }
let text = Arc::new(text); let text = Arc::new(text);
let file = self.add_file(task.root, path.clone(), Arc::clone(&text), false); let file = self.add_file(root, path.clone(), Arc::clone(&text), false);
files.push((file, path, text)); cur_files.push((file, path, text));
} }
let change = VfsChange::AddRoot { let change = VfsChange::AddRoot {
root: task.root, root,
files, files: cur_files,
}; };
self.pending_changes.push(change); self.pending_changes.push(change);
} }
TaskResult::HandleChange(change) => match &change { TaskResult::AddSingleFile { root, path, text } => {
WatcherChange::Create(path) if path.is_dir() => { self.do_add_file(root, path, text, false);
if let Some((root, _path, _file)) = self.find_root(&path) { }
let root_filter = self.roots[root].clone(); TaskResult::ChangeSingleFile { root, path, text } => {
self.worker if let Some(file) = self.find_file(root, &path) {
.sender() self.do_change_file(file, text, false);
.send(Task::Watch { } else {
dir: path.to_path_buf(), self.do_add_file(root, path, text, false);
root_filter, }
}) }
.unwrap() TaskResult::RemoveSingleFile { root, path } => {
} if let Some(file) = self.find_file(root, &path) {
} self.do_remove_file(root, path, file, false);
WatcherChange::Create(path)
| WatcherChange::Remove(path)
| WatcherChange::Write(path) => {
if self.should_handle_change(&path) {
self.worker.sender().send(Task::LoadChange(change)).unwrap()
}
}
WatcherChange::Rescan => {
// TODO we should reload all files
}
},
TaskResult::LoadChange(change) => match change {
WatcherChangeData::Create { path, text }
| WatcherChangeData::Write { path, text } => {
if let Some((root, path, file)) = self.find_root(&path) {
if let Some(file) = file {
self.do_change_file(file, text, false);
} else {
self.do_add_file(root, path, text, false);
}
}
}
WatcherChangeData::Remove { path } => {
if let Some((root, path, file)) = self.find_root(&path) {
if let Some(file) = file {
self.do_remove_file(root, path, file, false);
}
}
}
},
}
}
fn should_handle_change(&self, path: &Path) -> bool {
if let Some((_root, _rel_path, file)) = self.find_root(&path) {
if let Some(file) = file {
if self.files[file].is_overlayed {
// file is overlayed
log::debug!("skipping overlayed \"{}\"", path.display());
return false;
} }
} }
true
} else {
// file doesn't belong to any root
false
} }
} }
@ -434,11 +402,15 @@ impl Vfs {
fn find_root(&self, path: &Path) -> Option<(VfsRoot, RelativePathBuf, Option<VfsFile>)> { fn find_root(&self, path: &Path) -> Option<(VfsRoot, RelativePathBuf, Option<VfsFile>)> {
let (root, path) = self.roots.find(&path)?; let (root, path) = self.roots.find(&path)?;
let file = self.root2files[&root] let file = self.find_file(root, &path);
Some((root, path, file))
}
fn find_file(&self, root: VfsRoot, path: &RelativePath) -> Option<VfsFile> {
self.root2files[&root]
.iter() .iter()
.map(|&it| it) .map(|&it| it)
.find(|&file| self.files[file].path == path); .find(|&file| self.files[file].path == path)
Some((root, path, file))
} }
} }

View file

@ -75,27 +75,31 @@ fn test_vfs_works() -> std::io::Result<()> {
} }
fs::write(&dir.path().join("a/b/baz.rs"), "quux").unwrap(); fs::write(&dir.path().join("a/b/baz.rs"), "quux").unwrap();
// 2 tasks per change, HandleChange and then LoadChange process_tasks(&mut vfs, 1);
process_tasks(&mut vfs, 2);
assert_match!( assert_match!(
vfs.commit_changes().as_slice(), vfs.commit_changes().as_slice(),
[VfsChange::ChangeFile { text, .. }], [VfsChange::ChangeFile { text, .. }],
assert_eq!(text.as_str(), "quux") assert_eq!(text.as_str(), "quux")
); );
vfs.change_file_overlay(&dir.path().join("a/b/baz.rs"), "m".to_string()); vfs.add_file_overlay(&dir.path().join("a/b/baz.rs"), "m".to_string());
assert_match!( assert_match!(
vfs.commit_changes().as_slice(), vfs.commit_changes().as_slice(),
[VfsChange::ChangeFile { text, .. }], [VfsChange::ChangeFile { text, .. }],
assert_eq!(text.as_str(), "m") assert_eq!(text.as_str(), "m")
); );
// changing file on disk while overlayed doesn't generate a VfsChange
fs::write(&dir.path().join("a/b/baz.rs"), "corge").unwrap();
process_tasks(&mut vfs, 1);
assert_match!(vfs.commit_changes().as_slice(), []);
// removing overlay restores data on disk // removing overlay restores data on disk
vfs.remove_file_overlay(&dir.path().join("a/b/baz.rs")); vfs.remove_file_overlay(&dir.path().join("a/b/baz.rs"));
assert_match!( assert_match!(
vfs.commit_changes().as_slice(), vfs.commit_changes().as_slice(),
[VfsChange::ChangeFile { text, .. }], [VfsChange::ChangeFile { text, .. }],
assert_eq!(text.as_str(), "quux") assert_eq!(text.as_str(), "corge")
); );
vfs.add_file_overlay(&dir.path().join("a/b/spam.rs"), "spam".to_string()); vfs.add_file_overlay(&dir.path().join("a/b/spam.rs"), "spam".to_string());
@ -117,7 +121,7 @@ fn test_vfs_works() -> std::io::Result<()> {
fs::create_dir_all(dir.path().join("a/sub1/sub2")).unwrap(); fs::create_dir_all(dir.path().join("a/sub1/sub2")).unwrap();
fs::write(dir.path().join("a/sub1/sub2/new.rs"), "new hello").unwrap(); fs::write(dir.path().join("a/sub1/sub2/new.rs"), "new hello").unwrap();
process_tasks(&mut vfs, 3); process_tasks(&mut vfs, 1);
assert_match!( assert_match!(
vfs.commit_changes().as_slice(), vfs.commit_changes().as_slice(),
[VfsChange::AddFile { text, path, .. }], [VfsChange::AddFile { text, path, .. }],
@ -132,7 +136,7 @@ fn test_vfs_works() -> std::io::Result<()> {
&dir.path().join("a/sub1/sub2/new1.rs"), &dir.path().join("a/sub1/sub2/new1.rs"),
) )
.unwrap(); .unwrap();
process_tasks(&mut vfs, 4); process_tasks(&mut vfs, 2);
assert_match!( assert_match!(
vfs.commit_changes().as_slice(), vfs.commit_changes().as_slice(),
[VfsChange::RemoveFile { [VfsChange::RemoveFile {
@ -150,17 +154,16 @@ fn test_vfs_works() -> std::io::Result<()> {
); );
fs::remove_file(&dir.path().join("a/sub1/sub2/new1.rs")).unwrap(); fs::remove_file(&dir.path().join("a/sub1/sub2/new1.rs")).unwrap();
process_tasks(&mut vfs, 2); process_tasks(&mut vfs, 1);
assert_match!( assert_match!(
vfs.commit_changes().as_slice(), vfs.commit_changes().as_slice(),
[VfsChange::RemoveFile { path, .. }], [VfsChange::RemoveFile { path, .. }],
assert_eq!(path, "sub1/sub2/new1.rs") assert_eq!(path, "sub1/sub2/new1.rs")
); );
fs::create_dir_all(dir.path().join("a/target")).unwrap();
// should be ignored // should be ignored
fs::create_dir_all(dir.path().join("a/target")).unwrap();
fs::write(&dir.path().join("a/target/new.rs"), "ignore me").unwrap(); fs::write(&dir.path().join("a/target/new.rs"), "ignore me").unwrap();
process_tasks(&mut vfs, 1); // 1 task because no LoadChange will happen, just HandleChange for dir creation
assert_match!( assert_match!(
vfs.task_receiver().try_recv(), vfs.task_receiver().try_recv(),