Cache trait solving across queries in the same revision

Caching trait solving can do a lot to speed. Unfortunately it also consume a huge amount of memory. Therefore, as part of the migration to the new solver Jack Huey disabled caching of trait solving (he made the query transparent).

The PR proposes a middle ground: do cache trait solving, but only for the same revision. This allows us to be safe because during a revision the inputs cannot change.

The result is hopefully much better performance to features that tend to do a bulk of trait solving, and also repeat the same query (e.g. inference then IDE features).

There is another limitation: results are only cached in the same thread, to remove the need for synchronization which will be expensive. More measurements are required to check whether it's better to use a synchronized global cache, or maybe stay with a thread-local cache but batch multiple feature requests (highlighting, inlay hints etc.) of the same file to the same thread.

Alongside the actual cache we store the revision, because we need to verify it (we can't eagerly clear caches when incrementing the revision), and also the address of the db to prevent multiple dbs from interleaving (this is mostly relevant in tests, although injected highlighting also uses a new db, therefore maybe it's better to move it to a separate thread).

This "games" analysis-stats to both be way faster and use way more memory; the former is because analysis-stats doesn't increment revisions, therefore all queries share the cache and hit ratio is way too good, the latter is because analysis-stats doesn't increment revisions and therefore the cache isn't cleared. Both are not representative of a typical IDE scenario.
This commit is contained in:
Chayim Refael Friedman 2025-08-24 16:44:28 +03:00
parent e6cd085099
commit 1c8a07cfd6
7 changed files with 140 additions and 66 deletions

View file

@ -7,7 +7,12 @@ pub use salsa_macros;
mod change; mod change;
mod input; mod input;
use std::{cell::RefCell, hash::BuildHasherDefault, panic, sync::Once}; use std::{
cell::RefCell,
hash::BuildHasherDefault,
panic,
sync::{Once, atomic::AtomicUsize},
};
pub use crate::{ pub use crate::{
change::FileChange, change::FileChange,
@ -328,6 +333,27 @@ pub trait SourceDatabase: salsa::Database {
#[doc(hidden)] #[doc(hidden)]
fn crates_map(&self) -> Arc<CratesMap>; fn crates_map(&self) -> Arc<CratesMap>;
fn nonce_and_revision(&self) -> (Nonce, salsa::Revision);
}
static NEXT_NONCE: AtomicUsize = AtomicUsize::new(0);
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct Nonce(usize);
impl Default for Nonce {
#[inline]
fn default() -> Self {
Nonce::new()
}
}
impl Nonce {
#[inline]
pub fn new() -> Nonce {
Nonce(NEXT_NONCE.fetch_add(1, std::sync::atomic::Ordering::SeqCst))
}
} }
/// Crate related data shared by the whole workspace. /// Crate related data shared by the whole workspace.

View file

@ -3,7 +3,7 @@
use std::{fmt, panic, sync::Mutex}; use std::{fmt, panic, sync::Mutex};
use base_db::{ use base_db::{
Crate, CrateGraphBuilder, CratesMap, FileSourceRootInput, FileText, RootQueryDb, Crate, CrateGraphBuilder, CratesMap, FileSourceRootInput, FileText, Nonce, RootQueryDb,
SourceDatabase, SourceRoot, SourceRootId, SourceRootInput, SourceDatabase, SourceRoot, SourceRootId, SourceRootInput,
}; };
use hir_expand::{InFile, files::FilePosition}; use hir_expand::{InFile, files::FilePosition};
@ -20,12 +20,12 @@ use crate::{
}; };
#[salsa_macros::db] #[salsa_macros::db]
#[derive(Clone)]
pub(crate) struct TestDB { pub(crate) struct TestDB {
storage: salsa::Storage<Self>, storage: salsa::Storage<Self>,
files: Arc<base_db::Files>, files: Arc<base_db::Files>,
crates_map: Arc<CratesMap>, crates_map: Arc<CratesMap>,
events: Arc<Mutex<Option<Vec<salsa::Event>>>>, events: Arc<Mutex<Option<Vec<salsa::Event>>>>,
nonce: Nonce,
} }
impl Default for TestDB { impl Default for TestDB {
@ -44,6 +44,7 @@ impl Default for TestDB {
events, events,
files: Default::default(), files: Default::default(),
crates_map: Default::default(), crates_map: Default::default(),
nonce: Nonce::new(),
}; };
this.set_expand_proc_attr_macros_with_durability(true, Durability::HIGH); this.set_expand_proc_attr_macros_with_durability(true, Durability::HIGH);
// This needs to be here otherwise `CrateGraphBuilder` panics. // This needs to be here otherwise `CrateGraphBuilder` panics.
@ -53,6 +54,18 @@ impl Default for TestDB {
} }
} }
impl Clone for TestDB {
fn clone(&self) -> Self {
Self {
storage: self.storage.clone(),
files: self.files.clone(),
crates_map: self.crates_map.clone(),
events: self.events.clone(),
nonce: Nonce::new(),
}
}
}
#[salsa_macros::db] #[salsa_macros::db]
impl salsa::Database for TestDB {} impl salsa::Database for TestDB {}
@ -117,6 +130,10 @@ impl SourceDatabase for TestDB {
fn crates_map(&self) -> Arc<CratesMap> { fn crates_map(&self) -> Arc<CratesMap> {
self.crates_map.clone() self.crates_map.clone()
} }
fn nonce_and_revision(&self) -> (Nonce, salsa::Revision) {
(self.nonce, salsa::plumbing::ZalsaDatabase::zalsa(self).current_revision())
}
} }
impl TestDB { impl TestDB {

View file

@ -88,54 +88,52 @@ pub(crate) use closure::{CaptureKind, CapturedItem, CapturedItemWithoutTy};
/// The entry point of type inference. /// The entry point of type inference.
pub(crate) fn infer_query(db: &dyn HirDatabase, def: DefWithBodyId) -> Arc<InferenceResult> { pub(crate) fn infer_query(db: &dyn HirDatabase, def: DefWithBodyId) -> Arc<InferenceResult> {
crate::next_solver::with_new_cache(|| { let _p = tracing::info_span!("infer_query").entered();
let _p = tracing::info_span!("infer_query").entered(); let resolver = def.resolver(db);
let resolver = def.resolver(db); let body = db.body(def);
let body = db.body(def); let mut ctx = InferenceContext::new(db, def, &body, resolver);
let mut ctx = InferenceContext::new(db, def, &body, resolver);
match def { match def {
DefWithBodyId::FunctionId(f) => { DefWithBodyId::FunctionId(f) => {
ctx.collect_fn(f); ctx.collect_fn(f);
}
DefWithBodyId::ConstId(c) => ctx.collect_const(c, &db.const_signature(c)),
DefWithBodyId::StaticId(s) => ctx.collect_static(&db.static_signature(s)),
DefWithBodyId::VariantId(v) => {
ctx.return_ty = TyBuilder::builtin(
match db.enum_signature(v.lookup(db).parent).variant_body_type() {
hir_def::layout::IntegerType::Pointer(signed) => match signed {
true => BuiltinType::Int(BuiltinInt::Isize),
false => BuiltinType::Uint(BuiltinUint::Usize),
},
hir_def::layout::IntegerType::Fixed(size, signed) => match signed {
true => BuiltinType::Int(match size {
Integer::I8 => BuiltinInt::I8,
Integer::I16 => BuiltinInt::I16,
Integer::I32 => BuiltinInt::I32,
Integer::I64 => BuiltinInt::I64,
Integer::I128 => BuiltinInt::I128,
}),
false => BuiltinType::Uint(match size {
Integer::I8 => BuiltinUint::U8,
Integer::I16 => BuiltinUint::U16,
Integer::I32 => BuiltinUint::U32,
Integer::I64 => BuiltinUint::U64,
Integer::I128 => BuiltinUint::U128,
}),
},
},
);
}
} }
DefWithBodyId::ConstId(c) => ctx.collect_const(c, &db.const_signature(c)),
DefWithBodyId::StaticId(s) => ctx.collect_static(&db.static_signature(s)),
DefWithBodyId::VariantId(v) => {
ctx.return_ty = TyBuilder::builtin(
match db.enum_signature(v.lookup(db).parent).variant_body_type() {
hir_def::layout::IntegerType::Pointer(signed) => match signed {
true => BuiltinType::Int(BuiltinInt::Isize),
false => BuiltinType::Uint(BuiltinUint::Usize),
},
hir_def::layout::IntegerType::Fixed(size, signed) => match signed {
true => BuiltinType::Int(match size {
Integer::I8 => BuiltinInt::I8,
Integer::I16 => BuiltinInt::I16,
Integer::I32 => BuiltinInt::I32,
Integer::I64 => BuiltinInt::I64,
Integer::I128 => BuiltinInt::I128,
}),
false => BuiltinType::Uint(match size {
Integer::I8 => BuiltinUint::U8,
Integer::I16 => BuiltinUint::U16,
Integer::I32 => BuiltinUint::U32,
Integer::I64 => BuiltinUint::U64,
Integer::I128 => BuiltinUint::U128,
}),
},
},
);
}
}
ctx.infer_body(); ctx.infer_body();
ctx.infer_mut_body(); ctx.infer_mut_body();
ctx.infer_closures(); ctx.infer_closures();
Arc::new(ctx.resolve_all()) Arc::new(ctx.resolve_all())
})
} }
pub(crate) fn infer_cycle_result(_: &dyn HirDatabase, _: DefWithBodyId) -> Arc<InferenceResult> { pub(crate) fn infer_cycle_result(_: &dyn HirDatabase, _: DefWithBodyId) -> Arc<InferenceResult> {

View file

@ -2168,9 +2168,7 @@ pub fn mir_body_query(db: &dyn HirDatabase, def: DefWithBodyId) -> Result<Arc<Mi
let _p = tracing::info_span!("mir_body_query", ?detail).entered(); let _p = tracing::info_span!("mir_body_query", ?detail).entered();
let body = db.body(def); let body = db.body(def);
let infer = db.infer(def); let infer = db.infer(def);
let mut result = crate::next_solver::with_new_cache(|| { let mut result = lower_to_mir(db, def, &body, &infer, body.body_expr)?;
lower_to_mir(db, def, &body, &infer, body.body_expr)
})?;
result.shrink_to_fit(); result.shrink_to_fit();
Ok(Arc::new(result)) Ok(Arc::new(result))
} }

View file

@ -2149,37 +2149,48 @@ TrivialTypeTraversalImpls! {
Placeholder<BoundVar>, Placeholder<BoundVar>,
} }
pub(crate) use tls_cache::with_new_cache;
mod tls_cache { mod tls_cache {
use crate::db::HirDatabase; use crate::db::HirDatabase;
use super::DbInterner; use super::DbInterner;
use base_db::Nonce;
use rustc_type_ir::search_graph::GlobalCache; use rustc_type_ir::search_graph::GlobalCache;
use salsa::Revision;
use std::cell::RefCell; use std::cell::RefCell;
scoped_tls::scoped_thread_local!(static GLOBAL_CACHE: RefCell<rustc_type_ir::search_graph::GlobalCache<DbInterner<'static>>>); struct Cache {
cache: GlobalCache<DbInterner<'static>>,
revision: Revision,
db_nonce: Nonce,
}
pub(crate) fn with_new_cache<T>(f: impl FnOnce() -> T) -> T { thread_local! {
GLOBAL_CACHE.set(&RefCell::new(GlobalCache::default()), f) static GLOBAL_CACHE: RefCell<Option<Cache>> = const { RefCell::new(None) };
} }
pub(super) fn with_cache<'db, T>( pub(super) fn with_cache<'db, T>(
_db: &'db dyn HirDatabase, db: &'db dyn HirDatabase,
f: impl FnOnce(&mut GlobalCache<DbInterner<'db>>) -> T, f: impl FnOnce(&mut GlobalCache<DbInterner<'db>>) -> T,
) -> T { ) -> T {
// SAFETY: No idea GLOBAL_CACHE.with_borrow_mut(|handle| {
let call = move |slot: &RefCell<_>| { let (db_nonce, revision) = db.nonce_and_revision();
let handle = match handle {
Some(handle) => {
if handle.revision != revision || db_nonce != handle.db_nonce {
*handle = Cache { cache: GlobalCache::default(), revision, db_nonce };
}
handle
}
None => handle.insert(Cache { cache: GlobalCache::default(), revision, db_nonce }),
};
// SAFETY: No idea
f(unsafe { f(unsafe {
std::mem::transmute::< std::mem::transmute::<
&mut GlobalCache<DbInterner<'static>>, &mut GlobalCache<DbInterner<'static>>,
&mut GlobalCache<DbInterner<'db>>, &mut GlobalCache<DbInterner<'db>>,
>(&mut *slot.borrow_mut()) >(&mut handle.cache)
}) })
}; })
if GLOBAL_CACHE.is_set() {
GLOBAL_CACHE.with(call)
} else {
GLOBAL_CACHE.set(&RefCell::new(GlobalCache::default()), || GLOBAL_CACHE.with(call))
}
} }
} }

View file

@ -3,8 +3,8 @@
use std::{fmt, panic, sync::Mutex}; use std::{fmt, panic, sync::Mutex};
use base_db::{ use base_db::{
CrateGraphBuilder, CratesMap, FileSourceRootInput, FileText, RootQueryDb, SourceDatabase, CrateGraphBuilder, CratesMap, FileSourceRootInput, FileText, Nonce, RootQueryDb,
SourceRoot, SourceRootId, SourceRootInput, SourceDatabase, SourceRoot, SourceRootId, SourceRootInput,
}; };
use hir_def::{ModuleId, db::DefDatabase, nameres::crate_def_map}; use hir_def::{ModuleId, db::DefDatabase, nameres::crate_def_map};
@ -17,12 +17,12 @@ use test_utils::extract_annotations;
use triomphe::Arc; use triomphe::Arc;
#[salsa_macros::db] #[salsa_macros::db]
#[derive(Clone)]
pub(crate) struct TestDB { pub(crate) struct TestDB {
storage: salsa::Storage<Self>, storage: salsa::Storage<Self>,
files: Arc<base_db::Files>, files: Arc<base_db::Files>,
crates_map: Arc<CratesMap>, crates_map: Arc<CratesMap>,
events: Arc<Mutex<Option<Vec<salsa::Event>>>>, events: Arc<Mutex<Option<Vec<salsa::Event>>>>,
nonce: Nonce,
} }
impl Default for TestDB { impl Default for TestDB {
@ -41,6 +41,7 @@ impl Default for TestDB {
events, events,
files: Default::default(), files: Default::default(),
crates_map: Default::default(), crates_map: Default::default(),
nonce: Nonce::new(),
}; };
this.set_expand_proc_attr_macros_with_durability(true, Durability::HIGH); this.set_expand_proc_attr_macros_with_durability(true, Durability::HIGH);
// This needs to be here otherwise `CrateGraphBuilder` panics. // This needs to be here otherwise `CrateGraphBuilder` panics.
@ -50,6 +51,18 @@ impl Default for TestDB {
} }
} }
impl Clone for TestDB {
fn clone(&self) -> Self {
Self {
storage: self.storage.clone(),
files: self.files.clone(),
crates_map: self.crates_map.clone(),
events: self.events.clone(),
nonce: Nonce::new(),
}
}
}
impl fmt::Debug for TestDB { impl fmt::Debug for TestDB {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("TestDB").finish() f.debug_struct("TestDB").finish()
@ -109,6 +122,10 @@ impl SourceDatabase for TestDB {
fn crates_map(&self) -> Arc<CratesMap> { fn crates_map(&self) -> Arc<CratesMap> {
self.crates_map.clone() self.crates_map.clone()
} }
fn nonce_and_revision(&self) -> (Nonce, salsa::Revision) {
(self.nonce, salsa::plumbing::ZalsaDatabase::zalsa(self).current_revision())
}
} }
#[salsa_macros::db] #[salsa_macros::db]

View file

@ -51,7 +51,7 @@ use salsa::Durability;
use std::{fmt, mem::ManuallyDrop}; use std::{fmt, mem::ManuallyDrop};
use base_db::{ use base_db::{
CrateGraphBuilder, CratesMap, FileSourceRootInput, FileText, Files, RootQueryDb, CrateGraphBuilder, CratesMap, FileSourceRootInput, FileText, Files, Nonce, RootQueryDb,
SourceDatabase, SourceRoot, SourceRootId, SourceRootInput, query_group, SourceDatabase, SourceRoot, SourceRootId, SourceRootInput, query_group,
}; };
use hir::{ use hir::{
@ -83,6 +83,7 @@ pub struct RootDatabase {
storage: ManuallyDrop<salsa::Storage<Self>>, storage: ManuallyDrop<salsa::Storage<Self>>,
files: Arc<Files>, files: Arc<Files>,
crates_map: Arc<CratesMap>, crates_map: Arc<CratesMap>,
nonce: Nonce,
} }
impl std::panic::RefUnwindSafe for RootDatabase {} impl std::panic::RefUnwindSafe for RootDatabase {}
@ -102,6 +103,7 @@ impl Clone for RootDatabase {
storage: self.storage.clone(), storage: self.storage.clone(),
files: self.files.clone(), files: self.files.clone(),
crates_map: self.crates_map.clone(), crates_map: self.crates_map.clone(),
nonce: Nonce::new(),
} }
} }
} }
@ -165,6 +167,10 @@ impl SourceDatabase for RootDatabase {
fn crates_map(&self) -> Arc<CratesMap> { fn crates_map(&self) -> Arc<CratesMap> {
self.crates_map.clone() self.crates_map.clone()
} }
fn nonce_and_revision(&self) -> (Nonce, salsa::Revision) {
(self.nonce, salsa::plumbing::ZalsaDatabase::zalsa(self).current_revision())
}
} }
impl Default for RootDatabase { impl Default for RootDatabase {
@ -179,6 +185,7 @@ impl RootDatabase {
storage: ManuallyDrop::new(salsa::Storage::default()), storage: ManuallyDrop::new(salsa::Storage::default()),
files: Default::default(), files: Default::default(),
crates_map: Default::default(), crates_map: Default::default(),
nonce: Nonce::new(),
}; };
// This needs to be here otherwise `CrateGraphBuilder` will panic. // This needs to be here otherwise `CrateGraphBuilder` will panic.
db.set_all_crates(Arc::new(Box::new([]))); db.set_all_crates(Arc::new(Box::new([])));