9625: internal: simplify handling of the build scripts r=matklad a=matklad



Co-authored-by: Aleksey Kladov <aleksey.kladov@gmail.com>
This commit is contained in:
bors[bot] 2021-07-19 15:28:36 +00:00 committed by GitHub
commit c595676f7e
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
19 changed files with 543 additions and 528 deletions

View file

@ -68,7 +68,6 @@ impl AnalysisStatsCmd {
cargo_config.no_sysroot = self.no_sysroot;
let load_cargo_config = LoadCargoConfig {
load_out_dirs_from_check: self.enable_build_scripts,
wrap_rustc: false,
with_proc_macro: self.enable_proc_macros,
prefill_caches: false,
};

View file

@ -34,12 +34,8 @@ pub fn diagnostics(
with_proc_macro: bool,
) -> Result<()> {
let cargo_config = Default::default();
let load_cargo_config = LoadCargoConfig {
load_out_dirs_from_check,
with_proc_macro,
wrap_rustc: false,
prefill_caches: false,
};
let load_cargo_config =
LoadCargoConfig { load_out_dirs_from_check, with_proc_macro, prefill_caches: false };
let (host, _vfs, _proc_macro) =
load_workspace_at(path, &cargo_config, &load_cargo_config, &|_| {})?;
let db = host.raw_database();

View file

@ -8,7 +8,7 @@ use hir::db::DefDatabase;
use ide::{AnalysisHost, Change};
use ide_db::base_db::CrateGraph;
use project_model::{
BuildDataCollector, CargoConfig, ProcMacroClient, ProjectManifest, ProjectWorkspace,
CargoConfig, ProcMacroClient, ProjectManifest, ProjectWorkspace, WorkspaceBuildScripts,
};
use vfs::{loader::Handle, AbsPath, AbsPathBuf};
@ -16,7 +16,6 @@ use crate::reload::{ProjectFolders, SourceRootConfig};
pub(crate) struct LoadCargoConfig {
pub(crate) load_out_dirs_from_check: bool,
pub(crate) wrap_rustc: bool,
pub(crate) with_proc_macro: bool,
pub(crate) prefill_caches: bool,
}
@ -28,17 +27,16 @@ pub(crate) fn load_workspace_at(
progress: &dyn Fn(String),
) -> Result<(AnalysisHost, vfs::Vfs, Option<ProcMacroClient>)> {
let root = AbsPathBuf::assert(std::env::current_dir()?.join(root));
eprintln!("root = {:?}", root);
let root = ProjectManifest::discover_single(&root)?;
eprintln!("root = {:?}", root);
let workspace = ProjectWorkspace::load(root, cargo_config, progress)?;
load_workspace(workspace, load_config, progress)
load_workspace(workspace, cargo_config, load_config, progress)
}
fn load_workspace(
ws: ProjectWorkspace,
config: &LoadCargoConfig,
mut ws: ProjectWorkspace,
cargo_config: &CargoConfig,
load_config: &LoadCargoConfig,
progress: &dyn Fn(String),
) -> Result<(AnalysisHost, vfs::Vfs, Option<ProcMacroClient>)> {
let (sender, receiver) = unbounded();
@ -49,33 +47,27 @@ fn load_workspace(
Box::new(loader)
};
let proc_macro_client = if config.with_proc_macro {
let proc_macro_client = if load_config.with_proc_macro {
let path = AbsPathBuf::assert(std::env::current_exe()?);
Some(ProcMacroClient::extern_process(path, &["proc-macro"]).unwrap())
} else {
None
};
let build_data = if config.load_out_dirs_from_check {
let mut collector = BuildDataCollector::new(config.wrap_rustc);
ws.collect_build_data_configs(&mut collector);
Some(collector.collect(progress)?)
ws.set_build_scripts(if load_config.load_out_dirs_from_check {
ws.run_build_scripts(cargo_config, progress)?
} else {
None
};
WorkspaceBuildScripts::default()
});
let crate_graph = ws.to_crate_graph(
build_data.as_ref(),
proc_macro_client.as_ref(),
&mut |path: &AbsPath| {
let contents = loader.load_sync(path);
let path = vfs::VfsPath::from(path.to_path_buf());
vfs.set_file_contents(path.clone(), contents);
vfs.file_id(&path)
},
);
let crate_graph = ws.to_crate_graph(proc_macro_client.as_ref(), &mut |path: &AbsPath| {
let contents = loader.load_sync(path);
let path = vfs::VfsPath::from(path.to_path_buf());
vfs.set_file_contents(path.clone(), contents);
vfs.file_id(&path)
});
let project_folders = ProjectFolders::new(&[ws], &[], build_data.as_ref());
let project_folders = ProjectFolders::new(&[ws], &[]);
loader.set_config(vfs::loader::Config {
load: project_folders.load,
watch: vec![],
@ -86,7 +78,7 @@ fn load_workspace(
let host =
load_crate_graph(crate_graph, project_folders.source_root_config, &mut vfs, &receiver);
if config.prefill_caches {
if load_config.prefill_caches {
host.analysis().prime_caches(|_| {})?;
}
Ok((host, vfs, proc_macro_client))
@ -146,10 +138,9 @@ mod tests {
#[test]
fn test_loading_rust_analyzer() {
let path = Path::new(env!("CARGO_MANIFEST_DIR")).parent().unwrap().parent().unwrap();
let cargo_config = Default::default();
let cargo_config = CargoConfig::default();
let load_cargo_config = LoadCargoConfig {
load_out_dirs_from_check: false,
wrap_rustc: false,
with_proc_macro: false,
prefill_caches: false,
};

View file

@ -5,13 +5,13 @@ use crate::cli::{
Result,
};
use ide_ssr::{MatchFinder, SsrPattern, SsrRule};
use project_model::CargoConfig;
pub fn apply_ssr_rules(rules: Vec<SsrRule>) -> Result<()> {
use ide_db::base_db::SourceDatabaseExt;
let cargo_config = Default::default();
let cargo_config = CargoConfig::default();
let load_cargo_config = LoadCargoConfig {
load_out_dirs_from_check: true,
wrap_rustc: false,
with_proc_macro: true,
prefill_caches: false,
};
@ -39,10 +39,9 @@ pub fn apply_ssr_rules(rules: Vec<SsrRule>) -> Result<()> {
pub fn search_for_patterns(patterns: Vec<SsrPattern>, debug_snippet: Option<String>) -> Result<()> {
use ide_db::base_db::SourceDatabaseExt;
use ide_db::symbol_index::SymbolsDatabase;
let cargo_config = Default::default();
let cargo_config = CargoConfig::default();
let load_cargo_config = LoadCargoConfig {
load_out_dirs_from_check: true,
wrap_rustc: true,
with_proc_macro: true,
prefill_caches: false,
};

View file

@ -628,9 +628,6 @@ impl Config {
pub fn run_build_scripts(&self) -> bool {
self.data.cargo_runBuildScripts || self.data.procMacro_enable
}
pub fn wrap_rustc(&self) -> bool {
self.data.cargo_useRustcWrapperForBuildScripts
}
pub fn cargo(&self) -> CargoConfig {
let rustc_source = self.data.rustcSource.as_ref().map(|rustc_src| {
if rustc_src == "discover" {
@ -648,6 +645,7 @@ impl Config {
rustc_source,
no_sysroot: self.data.cargo_noSysroot,
unset_test_crates: self.data.cargo_unsetTest.clone(),
wrap_rustc_in_build_scripts: self.data.cargo_useRustcWrapperForBuildScripts,
}
}

View file

@ -12,7 +12,7 @@ use ide_db::base_db::{CrateId, VfsPath};
use lsp_types::{SemanticTokens, Url};
use parking_lot::{Mutex, RwLock};
use project_model::{
BuildDataCollector, BuildDataResult, CargoWorkspace, ProcMacroClient, ProjectWorkspace, Target,
CargoWorkspace, ProcMacroClient, ProjectWorkspace, Target, WorkspaceBuildScripts,
};
use rustc_hash::FxHashMap;
use vfs::AnchoredPathBuf;
@ -74,17 +74,37 @@ pub(crate) struct GlobalState {
pub(crate) vfs_progress_n_total: usize,
pub(crate) vfs_progress_n_done: usize,
/// For both `workspaces` and `workspace_build_data`, the field stores the
/// data we actually use, while the `OpQueue` stores the result of the last
/// fetch.
/// `workspaces` field stores the data we actually use, while the `OpQueue`
/// stores the result of the last fetch.
///
/// If the fetch (partially) fails, we do not update the values.
/// If the fetch (partially) fails, we do not update the current value.
///
/// The handling of build data is subtle. We fetch workspace in two phases:
///
/// *First*, we run `cargo metadata`, which gives us fast results for
/// initial analysis.
///
/// *Second*, we run `cargo check` which runs build scripts and compiles
/// proc macros.
///
/// We need both for the precise analysis, but we want rust-analyzer to be
/// at least partially available just after the first phase. That's because
/// first phase is much faster, and is much less likely to fail.
///
/// This creates a complication -- by the time the second phase completes,
/// the results of the fist phase could be invalid. That is, while we run
/// `cargo check`, the user edits `Cargo.toml`, we notice this, and the new
/// `cargo metadata` completes before `cargo check`.
///
/// An additional complication is that we want to avoid needless work. When
/// the user just adds comments or whitespace to Cargo.toml, we do not want
/// to invalidate any salsa caches.
pub(crate) workspaces: Arc<Vec<ProjectWorkspace>>,
pub(crate) fetch_workspaces_queue: OpQueue<(), Vec<anyhow::Result<ProjectWorkspace>>>,
pub(crate) workspace_build_data: Option<BuildDataResult>,
pub(crate) fetch_workspaces_queue: OpQueue<Vec<anyhow::Result<ProjectWorkspace>>>,
pub(crate) fetch_build_data_queue:
OpQueue<BuildDataCollector, Option<anyhow::Result<BuildDataResult>>>,
pub(crate) prime_caches_queue: OpQueue<(), ()>,
OpQueue<(Arc<Vec<ProjectWorkspace>>, Vec<anyhow::Result<WorkspaceBuildScripts>>)>,
pub(crate) prime_caches_queue: OpQueue<()>,
latest_requests: Arc<RwLock<LatestRequests>>,
}
@ -146,7 +166,6 @@ impl GlobalState {
workspaces: Arc::new(Vec::new()),
fetch_workspaces_queue: OpQueue::default(),
workspace_build_data: None,
prime_caches_queue: OpQueue::default(),
fetch_build_data_queue: OpQueue::default(),

View file

@ -17,6 +17,7 @@ use ide_db::helpers::{
insert_use::{ImportGranularity, InsertUseConfig},
SnippetCap,
};
use project_model::CargoConfig;
use test_utils::project_root;
use vfs::{AbsPathBuf, VfsPath};
@ -32,10 +33,9 @@ fn integrated_highlighting_benchmark() {
let workspace_to_load = project_root();
let file = "./crates/ide_db/src/apply_change.rs";
let cargo_config = Default::default();
let cargo_config = CargoConfig::default();
let load_cargo_config = LoadCargoConfig {
load_out_dirs_from_check: true,
wrap_rustc: false,
with_proc_macro: false,
prefill_caches: false,
};
@ -87,10 +87,9 @@ fn integrated_completion_benchmark() {
let workspace_to_load = project_root();
let file = "./crates/hir/src/lib.rs";
let cargo_config = Default::default();
let cargo_config = CargoConfig::default();
let load_cargo_config = LoadCargoConfig {
load_out_dirs_from_check: true,
wrap_rustc: false,
with_proc_macro: false,
prefill_caches: true,
};

View file

@ -12,7 +12,6 @@ use ide::{FileId, PrimeCachesProgress};
use ide_db::base_db::VfsPath;
use lsp_server::{Connection, Notification, Request, Response};
use lsp_types::notification::Notification as _;
use project_model::BuildDataCollector;
use vfs::ChangeKind;
use crate::{
@ -236,12 +235,7 @@ impl GlobalState {
let workspaces_updated = !Arc::ptr_eq(&old, &self.workspaces);
if self.config.run_build_scripts() && workspaces_updated {
let mut collector =
BuildDataCollector::new(self.config.wrap_rustc());
for ws in self.workspaces.iter() {
ws.collect_build_data_configs(&mut collector);
}
self.fetch_build_data_request(collector)
self.fetch_build_data_request()
}
(Progress::End, None)
@ -719,23 +713,21 @@ impl GlobalState {
self.maybe_update_diagnostics();
// Ensure that only one cache priming task can run at a time
self.prime_caches_queue.request_op(());
if self.prime_caches_queue.should_start_op().is_none() {
return;
}
self.task_pool.handle.spawn_with_sender({
let snap = self.snapshot();
move |sender| {
let cb = |progress| {
sender.send(Task::PrimeCaches(progress)).unwrap();
};
match snap.analysis.prime_caches(cb) {
Ok(()) => (),
Err(_canceled) => (),
self.prime_caches_queue.request_op();
if self.prime_caches_queue.should_start_op() {
self.task_pool.handle.spawn_with_sender({
let snap = self.snapshot();
move |sender| {
let cb = |progress| {
sender.send(Task::PrimeCaches(progress)).unwrap();
};
match snap.analysis.prime_caches(cb) {
Ok(()) => (),
Err(_canceled) => (),
}
}
}
});
});
}
}
fn maybe_update_diagnostics(&mut self) {
let subscriptions = self

View file

@ -1,28 +1,29 @@
//! Bookkeeping to make sure only one long-running operation is being executed
//! at a time.
pub(crate) struct OpQueue<Args, Output> {
op_requested: Option<Args>,
pub(crate) struct OpQueue<Output> {
op_requested: bool,
op_in_progress: bool,
last_op_result: Output,
}
impl<Args, Output: Default> Default for OpQueue<Args, Output> {
impl<Output: Default> Default for OpQueue<Output> {
fn default() -> Self {
Self { op_requested: None, op_in_progress: false, last_op_result: Default::default() }
Self { op_requested: false, op_in_progress: false, last_op_result: Default::default() }
}
}
impl<Args, Output> OpQueue<Args, Output> {
pub(crate) fn request_op(&mut self, data: Args) {
self.op_requested = Some(data);
impl<Output> OpQueue<Output> {
pub(crate) fn request_op(&mut self) {
self.op_requested = true;
}
pub(crate) fn should_start_op(&mut self) -> Option<Args> {
pub(crate) fn should_start_op(&mut self) -> bool {
if self.op_in_progress {
return None;
return false;
}
self.op_in_progress = self.op_requested.is_some();
self.op_requested.take()
self.op_in_progress = self.op_requested;
self.op_requested = false;
self.op_in_progress
}
pub(crate) fn op_completed(&mut self, result: Output) {
assert!(self.op_in_progress);
@ -30,7 +31,6 @@ impl<Args, Output> OpQueue<Args, Output> {
self.last_op_result = result;
}
#[allow(unused)]
pub(crate) fn last_op_result(&self) -> &Output {
&self.last_op_result
}
@ -38,6 +38,6 @@ impl<Args, Output> OpQueue<Args, Output> {
self.op_in_progress
}
pub(crate) fn op_requested(&self) -> bool {
self.op_requested.is_some()
self.op_requested
}
}

View file

@ -5,7 +5,7 @@ use flycheck::{FlycheckConfig, FlycheckHandle};
use hir::db::DefDatabase;
use ide::Change;
use ide_db::base_db::{CrateGraph, SourceRoot, VfsPath};
use project_model::{BuildDataCollector, BuildDataResult, ProcMacroClient, ProjectWorkspace};
use project_model::{ProcMacroClient, ProjectWorkspace, WorkspaceBuildScripts};
use vfs::{file_set::FileSetConfig, AbsPath, AbsPathBuf, ChangeKind};
use crate::{
@ -26,7 +26,7 @@ pub(crate) enum ProjectWorkspaceProgress {
pub(crate) enum BuildDataProgress {
Begin,
Report(String),
End(anyhow::Result<BuildDataResult>),
End((Arc<Vec<ProjectWorkspace>>, Vec<anyhow::Result<WorkspaceBuildScripts>>)),
}
impl GlobalState {
@ -113,7 +113,7 @@ impl GlobalState {
message: None,
};
if let Some(error) = self.build_data_error() {
if let Some(error) = self.fetch_build_data_error() {
status.health = lsp_ext::Health::Warning;
status.message = Some(error)
}
@ -144,10 +144,10 @@ impl GlobalState {
}
pub(crate) fn fetch_workspaces_request(&mut self) {
self.fetch_workspaces_queue.request_op(())
self.fetch_workspaces_queue.request_op()
}
pub(crate) fn fetch_workspaces_if_needed(&mut self) {
if self.fetch_workspaces_queue.should_start_op().is_none() {
if !self.fetch_workspaces_queue.should_start_op() {
return;
}
log::info!("will fetch workspaces");
@ -207,14 +207,16 @@ impl GlobalState {
self.fetch_workspaces_queue.op_completed(workspaces)
}
pub(crate) fn fetch_build_data_request(&mut self, build_data_collector: BuildDataCollector) {
self.fetch_build_data_queue.request_op(build_data_collector);
pub(crate) fn fetch_build_data_request(&mut self) {
self.fetch_build_data_queue.request_op();
}
pub(crate) fn fetch_build_data_if_needed(&mut self) {
let mut build_data_collector = match self.fetch_build_data_queue.should_start_op() {
Some(it) => it,
None => return,
};
if !self.fetch_build_data_queue.should_start_op() {
return;
}
let workspaces = Arc::clone(&self.workspaces);
let config = self.config.cargo();
self.task_pool.handle.spawn_with_sender(move |sender| {
sender.send(Task::FetchBuildData(BuildDataProgress::Begin)).unwrap();
@ -224,15 +226,26 @@ impl GlobalState {
sender.send(Task::FetchBuildData(BuildDataProgress::Report(msg))).unwrap()
}
};
let res = build_data_collector.collect(&progress);
sender.send(Task::FetchBuildData(BuildDataProgress::End(res))).unwrap();
let mut res = Vec::new();
for ws in workspaces.iter() {
res.push(ws.run_build_scripts(&config, &progress));
let ws = match ws {
ProjectWorkspace::Cargo { cargo, .. } => cargo,
ProjectWorkspace::DetachedFiles { .. } | ProjectWorkspace::Json { .. } => {
res.push(Ok(WorkspaceBuildScripts::default()));
continue;
}
};
res.push(WorkspaceBuildScripts::run(&config, ws, &progress))
}
sender.send(Task::FetchBuildData(BuildDataProgress::End((workspaces, res)))).unwrap();
});
}
pub(crate) fn fetch_build_data_completed(
&mut self,
build_data: anyhow::Result<BuildDataResult>,
build_data: (Arc<Vec<ProjectWorkspace>>, Vec<anyhow::Result<WorkspaceBuildScripts>>),
) {
self.fetch_build_data_queue.op_completed(Some(build_data))
self.fetch_build_data_queue.op_completed(build_data)
}
pub(crate) fn switch_workspaces(&mut self) {
@ -242,11 +255,13 @@ impl GlobalState {
if let Some(error_message) = self.fetch_workspace_error() {
log::error!("failed to switch workspaces: {}", error_message);
if !self.workspaces.is_empty() {
// It only makes sense to switch to a partially broken workspace
// if we don't have any workspace at all yet.
return;
}
}
if let Some(error_message) = self.build_data_error() {
if let Some(error_message) = self.fetch_build_data_error() {
log::error!("failed to switch build data: {}", error_message);
}
@ -257,21 +272,67 @@ impl GlobalState {
.filter_map(|res| res.as_ref().ok().cloned())
.collect::<Vec<_>>();
let workspace_build_data = match self.fetch_build_data_queue.last_op_result() {
Some(Ok(it)) => Some(it.clone()),
None | Some(Err(_)) => None,
};
fn eq_ignore_build_data<'a>(
left: &'a ProjectWorkspace,
right: &'a ProjectWorkspace,
) -> bool {
let key = |p: &'a ProjectWorkspace| match p {
ProjectWorkspace::Cargo {
cargo,
sysroot,
rustc,
rustc_cfg,
cfg_overrides,
if *self.workspaces == workspaces && self.workspace_build_data == workspace_build_data {
return;
build_scripts: _,
} => Some((cargo, sysroot, rustc, rustc_cfg, cfg_overrides)),
_ => None,
};
match (key(left), key(right)) {
(Some(lk), Some(rk)) => lk == rk,
_ => left == right,
}
}
let same_workspaces = workspaces.len() == self.workspaces.len()
&& workspaces
.iter()
.zip(self.workspaces.iter())
.all(|(l, r)| eq_ignore_build_data(l, r));
if same_workspaces {
let (workspaces, build_scripts) = self.fetch_build_data_queue.last_op_result();
if Arc::ptr_eq(&workspaces, &self.workspaces) {
let workspaces = workspaces
.iter()
.cloned()
.zip(build_scripts)
.map(|(mut ws, bs)| {
ws.set_build_scripts(bs.as_ref().ok().cloned().unwrap_or_default());
ws
})
.collect::<Vec<_>>();
// Workspaces are the same, but we've updated build data.
self.workspaces = Arc::new(workspaces);
} else {
// Current build scripts do not match the version of the active
// workspace, so there's nothing for us to update.
return;
}
} else {
// Here, we completely changed the workspace (Cargo.toml edit), so
// we don't care about build-script results, they are stale.
self.workspaces = Arc::new(workspaces)
}
if let FilesWatcher::Client = self.config.files().watcher {
if self.config.did_change_watched_files_dynamic_registration() {
let registration_options = lsp_types::DidChangeWatchedFilesRegistrationOptions {
watchers: workspaces
watchers: self
.workspaces
.iter()
.flat_map(|it| it.to_roots(workspace_build_data.as_ref()))
.flat_map(|ws| ws.to_roots())
.filter(|it| it.is_member)
.flat_map(|root| {
root.include.into_iter().flat_map(|it| {
@ -303,8 +364,7 @@ impl GlobalState {
let mut change = Change::new();
let files_config = self.config.files();
let project_folders =
ProjectFolders::new(&workspaces, &files_config.exclude, workspace_build_data.as_ref());
let project_folders = ProjectFolders::new(&self.workspaces, &files_config.exclude);
if self.proc_macro_client.is_none() {
self.proc_macro_client = match self.config.proc_macro_srv() {
@ -353,12 +413,8 @@ impl GlobalState {
}
res
};
for ws in workspaces.iter() {
crate_graph.extend(ws.to_crate_graph(
workspace_build_data.as_ref(),
self.proc_macro_client.as_ref(),
&mut load,
));
for ws in self.workspaces.iter() {
crate_graph.extend(ws.to_crate_graph(self.proc_macro_client.as_ref(), &mut load));
}
crate_graph
@ -366,8 +422,6 @@ impl GlobalState {
change.set_crate_graph(crate_graph);
self.source_root_config = project_folders.source_root_config;
self.workspaces = Arc::new(workspaces);
self.workspace_build_data = workspace_build_data;
self.analysis_host.apply_change(change);
self.process_changes();
@ -391,14 +445,20 @@ impl GlobalState {
Some(buf)
}
fn build_data_error(&self) -> Option<String> {
match self.fetch_build_data_queue.last_op_result() {
Some(Err(err)) => {
Some(format!("rust-analyzer failed to fetch build data: {:#}\n", err))
fn fetch_build_data_error(&self) -> Option<String> {
let mut buf = String::new();
for ws in &self.fetch_build_data_queue.last_op_result().1 {
if let Err(err) = ws {
stdx::format_to!(buf, "rust-analyzer failed to run custom build: {:#}\n", err);
}
Some(Ok(data)) => data.error(),
None => None,
}
if buf.is_empty() {
return None;
}
Some(buf)
}
fn reload_flycheck(&mut self) {
@ -452,13 +512,12 @@ impl ProjectFolders {
pub(crate) fn new(
workspaces: &[ProjectWorkspace],
global_excludes: &[AbsPathBuf],
build_data: Option<&BuildDataResult>,
) -> ProjectFolders {
let mut res = ProjectFolders::default();
let mut fsc = FileSetConfig::builder();
let mut local_filesets = vec![];
for root in workspaces.iter().flat_map(|it| it.to_roots(build_data)) {
for root in workspaces.iter().flat_map(|ws| ws.to_roots()) {
let file_set_roots: Vec<VfsPath> =
root.include.iter().cloned().map(VfsPath::from).collect();

View file

@ -11,7 +11,18 @@ pub(crate) struct TestDir {
impl TestDir {
pub(crate) fn new() -> TestDir {
let base = std::env::temp_dir().join("testdir");
let temp_dir = std::env::temp_dir();
// On MacOS builders on GitHub actions, the temp dir is a symlink, and
// that causes problems down the line. Specifically:
// * Cargo may emit different PackageId depending on the working directory
// * rust-analyzer may fail to map LSP URIs to correct paths.
//
// Work-around this by canonicalizing. Note that we don't want to do this
// on *every* OS, as on windows `canonicalize` itself creates problems.
#[cfg(target_os = "macos")]
let temp_dir = temp_dir.canonicalize().unwrap();
let base = temp_dir.join("testdir");
let pid = std::process::id();
static CNT: AtomicUsize = AtomicUsize::new(0);