mirror of
https://github.com/salsa-rs/salsa.git
synced 2025-07-07 21:35:17 +00:00
Enable Garbage Collection for Interned Values (#602)
* remove table-wide dependencies * add plumbing to reuse interned slots * record durabilities on interned values * appease clippy * remove immortal interned value logic * pass correct revision when tracking interned reads * force new revision when resetting interned values * avoid unnecessary calls to `Database::zalsa` * add log events for value internment * Only log event kind because thread id can differ between runs/computers * cargo fmt --------- Co-authored-by: Micha Reiser <micha@reiser.io>
This commit is contained in:
parent
1d1523b8b8
commit
a86db59e99
27 changed files with 435 additions and 305 deletions
|
@ -146,9 +146,6 @@ It combines an [`IngredientIndex`] with a `key_index`, which is a `salsa::Id`:
|
|||
{{#include ../../../src/key.rs:DatabaseKeyIndex}}
|
||||
```
|
||||
|
||||
A `DependencyIndex` is similar, but the `key_index` is optional.
|
||||
This is used when we sometimes wish to refer to the ingredient as a whole, and not any specific value within the ingredient.
|
||||
|
||||
These kinds of indices are used to store connetions between ingredients.
|
||||
For example, each memoized value has to track its inputs.
|
||||
Those inputs are stored as dependency indices.
|
||||
|
|
|
@ -138,7 +138,7 @@ impl<A: Accumulator> Ingredient for IngredientImpl<A> {
|
|||
) {
|
||||
}
|
||||
|
||||
fn fmt_index(&self, index: Option<crate::Id>, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
fn fmt_index(&self, index: crate::Id, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
fmt_index(A::DEBUG_NAME, index, fmt)
|
||||
}
|
||||
|
||||
|
|
|
@ -2,7 +2,6 @@ use std::ops::Not;
|
|||
|
||||
use super::zalsa_local::{QueryEdges, QueryOrigin, QueryRevisions};
|
||||
use crate::accumulator::accumulated_map::AtomicInputAccumulatedValues;
|
||||
use crate::key::OutputDependencyIndex;
|
||||
use crate::tracked_struct::{DisambiguatorMap, IdentityHash, IdentityMap};
|
||||
use crate::zalsa_local::QueryEdge;
|
||||
use crate::{
|
||||
|
@ -10,7 +9,7 @@ use crate::{
|
|||
cycle::CycleHeads,
|
||||
durability::Durability,
|
||||
hash::FxIndexSet,
|
||||
key::{DatabaseKeyIndex, InputDependencyIndex},
|
||||
key::DatabaseKeyIndex,
|
||||
tracked_struct::Disambiguator,
|
||||
Revision,
|
||||
};
|
||||
|
@ -81,7 +80,7 @@ impl ActiveQuery {
|
|||
|
||||
pub(super) fn add_read(
|
||||
&mut self,
|
||||
input: InputDependencyIndex,
|
||||
input: DatabaseKeyIndex,
|
||||
durability: Durability,
|
||||
revision: Revision,
|
||||
accumulated: InputAccumulatedValues,
|
||||
|
@ -96,7 +95,7 @@ impl ActiveQuery {
|
|||
|
||||
pub(super) fn add_read_simple(
|
||||
&mut self,
|
||||
input: InputDependencyIndex,
|
||||
input: DatabaseKeyIndex,
|
||||
durability: Durability,
|
||||
revision: Revision,
|
||||
) {
|
||||
|
@ -118,12 +117,12 @@ impl ActiveQuery {
|
|||
}
|
||||
|
||||
/// Adds a key to our list of outputs.
|
||||
pub(super) fn add_output(&mut self, key: OutputDependencyIndex) {
|
||||
pub(super) fn add_output(&mut self, key: DatabaseKeyIndex) {
|
||||
self.input_outputs.insert(QueryEdge::Output(key));
|
||||
}
|
||||
|
||||
/// True if the given key was output by this query.
|
||||
pub(super) fn is_output(&self, key: OutputDependencyIndex) -> bool {
|
||||
pub(super) fn is_output(&self, key: DatabaseKeyIndex) -> bool {
|
||||
self.input_outputs.contains(&QueryEdge::Output(key))
|
||||
}
|
||||
|
||||
|
|
|
@ -56,7 +56,7 @@ pub trait Database: Send + AsDynDatabase + Any + ZalsaDatabase {
|
|||
/// which are the fine-grained components we use to track data. This is intended
|
||||
/// for debugging and the contents of the returned string are not semver-guaranteed.
|
||||
///
|
||||
/// Ingredient indices can be extracted from [`DependencyIndex`](`crate::DependencyIndex`) values.
|
||||
/// Ingredient indices can be extracted from [`DatabaseKeyIndex`](`crate::DatabaseKeyIndex`) values.
|
||||
fn ingredient_debug_name(&self, ingredient_index: IngredientIndex) -> Cow<'_, str> {
|
||||
Cow::Borrowed(
|
||||
self.zalsa()
|
||||
|
|
|
@ -35,6 +35,17 @@ enum DurabilityVal {
|
|||
High = 2,
|
||||
}
|
||||
|
||||
impl From<u8> for DurabilityVal {
|
||||
fn from(value: u8) -> Self {
|
||||
match value {
|
||||
0 => DurabilityVal::Low,
|
||||
1 => DurabilityVal::Medium,
|
||||
2 => DurabilityVal::High,
|
||||
_ => panic!("invalid durability"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Durability {
|
||||
/// Low durability: things that change frequently.
|
||||
///
|
||||
|
@ -68,6 +79,14 @@ impl Durability {
|
|||
pub(crate) fn index(self) -> usize {
|
||||
self.0 as usize
|
||||
}
|
||||
|
||||
pub(crate) fn as_u8(self) -> u8 {
|
||||
self.0 as u8
|
||||
}
|
||||
|
||||
pub(crate) fn from_u8(value: u8) -> Self {
|
||||
Self(DurabilityVal::from(value))
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for Durability {
|
||||
|
|
27
src/event.rs
27
src/event.rs
|
@ -1,9 +1,6 @@
|
|||
use std::thread::ThreadId;
|
||||
|
||||
use crate::{
|
||||
key::DatabaseKeyIndex,
|
||||
key::{InputDependencyIndex, OutputDependencyIndex},
|
||||
};
|
||||
use crate::{key::DatabaseKeyIndex, Id, Revision};
|
||||
|
||||
/// The `Event` struct identifies various notable things that can
|
||||
/// occur during salsa execution. Instances of this struct are given
|
||||
|
@ -76,7 +73,7 @@ pub enum EventKind {
|
|||
execute_key: DatabaseKeyIndex,
|
||||
|
||||
/// Key for the query that is no longer output
|
||||
output_key: OutputDependencyIndex,
|
||||
output_key: DatabaseKeyIndex,
|
||||
},
|
||||
|
||||
/// Tracked structs or memoized data were discarded (freed).
|
||||
|
@ -91,6 +88,24 @@ pub enum EventKind {
|
|||
executor_key: DatabaseKeyIndex,
|
||||
|
||||
/// Accumulator that was accumulated into
|
||||
accumulator: InputDependencyIndex,
|
||||
accumulator: DatabaseKeyIndex,
|
||||
},
|
||||
|
||||
/// Indicates that a value was newly interned.
|
||||
DidInternValue {
|
||||
// The ID of the interned value.
|
||||
id: Id,
|
||||
|
||||
// The revision the value was interned in.
|
||||
revision: Revision,
|
||||
},
|
||||
|
||||
/// Indicates that a previously interned value was read in a new revision.
|
||||
DidReinternValue {
|
||||
// The ID of the interned value.
|
||||
id: Id,
|
||||
|
||||
// The revision the value was interned in.
|
||||
revision: Revision,
|
||||
},
|
||||
}
|
||||
|
|
|
@ -160,11 +160,8 @@ where
|
|||
}
|
||||
}
|
||||
|
||||
pub fn database_key_index(&self, k: Id) -> DatabaseKeyIndex {
|
||||
DatabaseKeyIndex {
|
||||
ingredient_index: self.index,
|
||||
key_index: k,
|
||||
}
|
||||
pub fn database_key_index(&self, key: Id) -> DatabaseKeyIndex {
|
||||
DatabaseKeyIndex::new(self.index, key)
|
||||
}
|
||||
|
||||
pub fn set_capacity(&mut self, capacity: usize) {
|
||||
|
@ -302,7 +299,7 @@ where
|
|||
std::mem::take(&mut self.deleted_entries);
|
||||
}
|
||||
|
||||
fn fmt_index(&self, index: Option<crate::Id>, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
fn fmt_index(&self, index: crate::Id, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
fmt_index(C::DEBUG_NAME, index, fmt)
|
||||
}
|
||||
|
||||
|
|
|
@ -54,9 +54,9 @@ where
|
|||
continue;
|
||||
}
|
||||
|
||||
let ingredient = zalsa.lookup_ingredient(k.ingredient_index);
|
||||
let ingredient = zalsa.lookup_ingredient(k.ingredient_index());
|
||||
// Extend `output` with any values accumulated by `k`.
|
||||
let (accumulated_map, input) = ingredient.accumulated(db, k.key_index);
|
||||
let (accumulated_map, input) = ingredient.accumulated(db, k.key_index());
|
||||
if let Some(accumulated_map) = accumulated_map {
|
||||
accumulated_map.extend_with_accumulated(accumulator.index(), &mut output);
|
||||
}
|
||||
|
@ -71,7 +71,7 @@ where
|
|||
// output vector, we want to push in execution order, so reverse order to
|
||||
// ensure the first child that was executed will be the first child popped
|
||||
// from the stack.
|
||||
let Some(origin) = ingredient.origin(db, k.key_index) else {
|
||||
let Some(origin) = ingredient.origin(db, k.key_index()) else {
|
||||
continue;
|
||||
};
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
use super::{memo::Memo, Configuration, IngredientImpl};
|
||||
use crate::{
|
||||
hash::FxHashSet, key::OutputDependencyIndex, zalsa::Zalsa, zalsa_local::QueryRevisions,
|
||||
AsDynDatabase as _, Database, DatabaseKeyIndex, Event, EventKind,
|
||||
hash::FxHashSet, zalsa::Zalsa, zalsa_local::QueryRevisions, AsDynDatabase as _, Database,
|
||||
DatabaseKeyIndex, Event, EventKind,
|
||||
};
|
||||
|
||||
impl<C> IngredientImpl<C>
|
||||
|
@ -38,7 +38,7 @@ where
|
|||
// Remove the outputs that are no longer present in the current revision
|
||||
// to prevent that the next revision is seeded with a id mapping that no longer exists.
|
||||
revisions.tracked_struct_ids.retain(|&k, &mut value| {
|
||||
!old_outputs.contains(&OutputDependencyIndex::new(k.ingredient_index(), value))
|
||||
!old_outputs.contains(&DatabaseKeyIndex::new(k.ingredient_index(), value))
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -51,7 +51,7 @@ where
|
|||
zalsa: &Zalsa,
|
||||
db: &C::DbView,
|
||||
key: DatabaseKeyIndex,
|
||||
output: OutputDependencyIndex,
|
||||
output: DatabaseKeyIndex,
|
||||
provisional: bool,
|
||||
) {
|
||||
db.salsa_event(&|| {
|
||||
|
|
|
@ -29,7 +29,7 @@ where
|
|||
let (zalsa, zalsa_local) = db.zalsas();
|
||||
let revision_now = zalsa.current_revision();
|
||||
let database_key_index = active_query.database_key_index;
|
||||
let id = database_key_index.key_index;
|
||||
let id = database_key_index.key_index();
|
||||
|
||||
tracing::info!("{:?}: executing query", database_key_index);
|
||||
|
||||
|
|
|
@ -30,7 +30,7 @@ where
|
|||
self.lru.record_use(id);
|
||||
|
||||
zalsa_local.report_tracked_read(
|
||||
self.database_key_index(id).into(),
|
||||
self.database_key_index(id),
|
||||
durability,
|
||||
changed_at,
|
||||
match &memo.revisions.accumulated {
|
||||
|
@ -130,7 +130,7 @@ where
|
|||
}
|
||||
// no provisional value; create/insert/return initial provisional value
|
||||
return self
|
||||
.initial_value(db, database_key_index.key_index)
|
||||
.initial_value(db, database_key_index.key_index())
|
||||
.map(|initial_value| {
|
||||
tracing::debug!(
|
||||
"hit cycle at {database_key_index:#?}, \
|
||||
|
|
|
@ -250,8 +250,8 @@ where
|
|||
);
|
||||
if (&memo.revisions.cycle_heads).into_iter().any(|cycle_head| {
|
||||
zalsa
|
||||
.lookup_ingredient(cycle_head.ingredient_index)
|
||||
.is_provisional_cycle_head(db.as_dyn_database(), cycle_head.key_index)
|
||||
.lookup_ingredient(cycle_head.ingredient_index())
|
||||
.is_provisional_cycle_head(db.as_dyn_database(), cycle_head.key_index())
|
||||
}) {
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -183,13 +183,13 @@ impl<V> Memo<V> {
|
|||
.into_iter()
|
||||
.filter(|&head| head != database_key_index)
|
||||
.any(|head| {
|
||||
let ingredient = zalsa.lookup_ingredient(head.ingredient_index);
|
||||
if !ingredient.is_provisional_cycle_head(db, head.key_index) {
|
||||
let ingredient = zalsa.lookup_ingredient(head.ingredient_index());
|
||||
if !ingredient.is_provisional_cycle_head(db, head.key_index()) {
|
||||
// This cycle is already finalized, so we don't need to wait on it;
|
||||
// keep looping through cycle heads.
|
||||
retry = true;
|
||||
false
|
||||
} else if ingredient.wait_for(db, head.key_index) {
|
||||
} else if ingredient.wait_for(db, head.key_index()) {
|
||||
// There's a new memo available for the cycle head; fetch our own
|
||||
// updated memo and see if it's still provisional or if the cycle
|
||||
// has resolved.
|
||||
|
|
|
@ -41,8 +41,7 @@ where
|
|||
//
|
||||
// Now, if We invoke Q3 first, We get one result for Q2, but if We invoke Q4 first, We get a different value. That's no good.
|
||||
let database_key_index = <C::Input<'db>>::database_key_index(db.as_dyn_database(), key);
|
||||
let dependency_index = database_key_index.into();
|
||||
if !zalsa_local.is_output_of_active_query(dependency_index) {
|
||||
if !zalsa_local.is_output_of_active_query(database_key_index) {
|
||||
panic!("can only use `specify` on salsa structs created during the current tracked fn");
|
||||
}
|
||||
|
||||
|
@ -105,7 +104,7 @@ where
|
|||
|
||||
// Record that the current query *specified* a value for this cell.
|
||||
let database_key_index = self.database_key_index(key);
|
||||
zalsa_local.add_output(database_key_index.into());
|
||||
zalsa_local.add_output(database_key_index);
|
||||
}
|
||||
|
||||
/// Invoked when the query `executor` has been validated as having green inputs
|
||||
|
|
|
@ -141,7 +141,7 @@ pub trait Ingredient: Any + std::fmt::Debug + Send + Sync {
|
|||
);
|
||||
}
|
||||
|
||||
fn fmt_index(&self, index: Option<crate::Id>, fmt: &mut fmt::Formatter<'_>) -> fmt::Result;
|
||||
fn fmt_index(&self, index: crate::Id, fmt: &mut fmt::Formatter<'_>) -> fmt::Result;
|
||||
}
|
||||
|
||||
impl dyn Ingredient {
|
||||
|
@ -177,14 +177,6 @@ impl dyn Ingredient {
|
|||
}
|
||||
|
||||
/// A helper function to show human readable fmt.
|
||||
pub(crate) fn fmt_index(
|
||||
debug_name: &str,
|
||||
id: Option<Id>,
|
||||
fmt: &mut fmt::Formatter<'_>,
|
||||
) -> fmt::Result {
|
||||
if let Some(i) = id {
|
||||
write!(fmt, "{debug_name}({i:?})")
|
||||
} else {
|
||||
write!(fmt, "{debug_name}()")
|
||||
}
|
||||
pub(crate) fn fmt_index(debug_name: &str, id: Id, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(fmt, "{debug_name}({id:?})")
|
||||
}
|
||||
|
|
11
src/input.rs
11
src/input.rs
|
@ -15,7 +15,7 @@ use crate::{
|
|||
id::{AsId, FromIdWithDb},
|
||||
ingredient::{fmt_index, Ingredient},
|
||||
input::singleton::{Singleton, SingletonChoice},
|
||||
key::{DatabaseKeyIndex, InputDependencyIndex},
|
||||
key::DatabaseKeyIndex,
|
||||
plumbing::{Jar, Stamp},
|
||||
table::{memo::MemoTable, sync::SyncTable, Slot, Table},
|
||||
zalsa::{IngredientIndex, Zalsa},
|
||||
|
@ -96,10 +96,7 @@ impl<C: Configuration> IngredientImpl<C> {
|
|||
}
|
||||
|
||||
pub fn database_key_index(&self, id: C::Struct) -> DatabaseKeyIndex {
|
||||
DatabaseKeyIndex {
|
||||
ingredient_index: self.ingredient_index,
|
||||
key_index: id.as_id(),
|
||||
}
|
||||
DatabaseKeyIndex::new(self.ingredient_index, id.as_id())
|
||||
}
|
||||
|
||||
pub fn new_input(&self, db: &dyn Database, fields: C::Fields, stamps: C::Stamps) -> C::Struct {
|
||||
|
@ -177,7 +174,7 @@ impl<C: Configuration> IngredientImpl<C> {
|
|||
let value = Self::data(zalsa, id);
|
||||
let stamp = &value.stamps[field_index];
|
||||
zalsa_local.report_tracked_read_simple(
|
||||
InputDependencyIndex::new(field_ingredient_index, id),
|
||||
DatabaseKeyIndex::new(field_ingredient_index, id),
|
||||
stamp.durability,
|
||||
stamp.changed_at,
|
||||
);
|
||||
|
@ -261,7 +258,7 @@ impl<C: Configuration> Ingredient for IngredientImpl<C> {
|
|||
);
|
||||
}
|
||||
|
||||
fn fmt_index(&self, index: Option<Id>, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
fn fmt_index(&self, index: Id, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
fmt_index(C::DEBUG_NAME, index, fmt)
|
||||
}
|
||||
|
||||
|
|
|
@ -85,7 +85,7 @@ where
|
|||
) {
|
||||
}
|
||||
|
||||
fn fmt_index(&self, index: Option<crate::Id>, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
fn fmt_index(&self, index: crate::Id, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
fmt_index(C::FIELD_DEBUG_NAMES[self.field_index], index, fmt)
|
||||
}
|
||||
|
||||
|
|
195
src/interned.rs
195
src/interned.rs
|
@ -3,19 +3,20 @@ use dashmap::SharedValue;
|
|||
use crate::durability::Durability;
|
||||
use crate::function::VerifyResult;
|
||||
use crate::ingredient::fmt_index;
|
||||
use crate::key::InputDependencyIndex;
|
||||
use crate::plumbing::{IngredientIndices, Jar};
|
||||
use crate::revision::AtomicRevision;
|
||||
use crate::table::memo::MemoTable;
|
||||
use crate::table::sync::SyncTable;
|
||||
use crate::table::Slot;
|
||||
use crate::zalsa::{IngredientIndex, Zalsa};
|
||||
use crate::zalsa_local::QueryOrigin;
|
||||
use crate::{Database, DatabaseKeyIndex, Id};
|
||||
use crate::{Database, DatabaseKeyIndex, Event, EventKind, Id};
|
||||
use std::any::TypeId;
|
||||
use std::fmt;
|
||||
use std::hash::{BuildHasher, Hash, Hasher};
|
||||
use std::marker::PhantomData;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::atomic::{AtomicU8, Ordering};
|
||||
|
||||
use super::hash::FxDashMap;
|
||||
use super::ingredient::Ingredient;
|
||||
|
@ -61,12 +62,6 @@ pub struct IngredientImpl<C: Configuration> {
|
|||
///
|
||||
/// Deadlock requirement: We access `value_map` while holding lock on `key_map`, but not vice versa.
|
||||
key_map: FxDashMap<C::Fields<'static>, Id>,
|
||||
|
||||
/// Stores the revision when this interned ingredient was last cleared.
|
||||
/// You can clear an interned table at any point, deleting all its entries,
|
||||
/// but that will make anything dependent on those entries dirty and in need
|
||||
/// of being recomputed.
|
||||
reset_at: Revision,
|
||||
}
|
||||
|
||||
/// Struct storing the interned fields.
|
||||
|
@ -77,12 +72,29 @@ where
|
|||
fields: C::Fields<'static>,
|
||||
memos: MemoTable,
|
||||
syncs: SyncTable,
|
||||
|
||||
/// The revision the value was first interned in.
|
||||
first_interned_at: Revision,
|
||||
|
||||
/// The most recent interned revision.
|
||||
last_interned_at: AtomicRevision,
|
||||
|
||||
/// The minimum durability of all inputs consumed by the creator
|
||||
/// query prior to creating this tracked struct. If any of those
|
||||
/// inputs changes, then the creator query may create this struct
|
||||
/// with different values.
|
||||
durability: AtomicU8,
|
||||
}
|
||||
|
||||
impl<C> Value<C>
|
||||
where
|
||||
C: Configuration,
|
||||
{
|
||||
// Loads the durability of this interned struct.
|
||||
fn durability(&self) -> Durability {
|
||||
Durability::from_u8(self.durability.load(Ordering::Acquire))
|
||||
}
|
||||
|
||||
/// Fields of this interned struct.
|
||||
#[cfg(feature = "salsa_unstable")]
|
||||
pub fn fields(&self) -> &C::Fields<'static> {
|
||||
|
@ -120,7 +132,6 @@ where
|
|||
Self {
|
||||
ingredient_index,
|
||||
key_map: Default::default(),
|
||||
reset_at: Revision::start(),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -177,12 +188,8 @@ where
|
|||
// so instead we go with this and transmute the lifetime in the `eq` closure
|
||||
C::Fields<'db>: HashEqLike<Key>,
|
||||
{
|
||||
let zalsa_local = db.zalsa_local();
|
||||
zalsa_local.report_tracked_read_simple(
|
||||
InputDependencyIndex::for_table(self.ingredient_index),
|
||||
Durability::MAX,
|
||||
self.reset_at,
|
||||
);
|
||||
let (zalsa, zalsa_local) = db.zalsas();
|
||||
let current_revision = zalsa.current_revision();
|
||||
|
||||
// Optimization to only get read lock on the map if the data has already been interned.
|
||||
let data_hash = self.key_map.hasher().hash_one(&key);
|
||||
|
@ -198,7 +205,37 @@ where
|
|||
let lock = shard.read();
|
||||
if let Some(bucket) = lock.find(data_hash, eq) {
|
||||
// SAFETY: Read lock on map is held during this block
|
||||
return unsafe { *bucket.as_ref().1.get() };
|
||||
let id = unsafe { *bucket.as_ref().1.get() };
|
||||
|
||||
let value = zalsa.table().get::<Value<C>>(id);
|
||||
|
||||
// Sync the value's revision.
|
||||
if value.last_interned_at.load() < current_revision {
|
||||
value.last_interned_at.store(current_revision);
|
||||
db.salsa_event(&|| {
|
||||
Event::new(EventKind::DidReinternValue {
|
||||
id,
|
||||
revision: current_revision,
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
let durability = if let Some((_, stamp)) = zalsa_local.active_query() {
|
||||
// Record the maximum durability across all queries that intern this value.
|
||||
let previous_durability = value
|
||||
.durability
|
||||
.fetch_max(stamp.durability.as_u8(), Ordering::AcqRel);
|
||||
|
||||
Durability::from_u8(previous_durability).max(stamp.durability)
|
||||
} else {
|
||||
value.durability()
|
||||
};
|
||||
|
||||
// Record a dependency on this value.
|
||||
let index = self.database_key_index(id);
|
||||
zalsa_local.report_tracked_read_simple(index, durability, value.first_interned_at);
|
||||
|
||||
return id;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -207,42 +244,106 @@ where
|
|||
self.key_map.hasher().hash_one(element)
|
||||
}) {
|
||||
// Data has been interned by a racing call, use that ID instead
|
||||
Ok(slot) => unsafe { *slot.as_ref().1.get() },
|
||||
Ok(slot) => {
|
||||
let id = unsafe { *slot.as_ref().1.get() };
|
||||
let value = zalsa.table().get::<Value<C>>(id);
|
||||
|
||||
// Sync the value's revision.
|
||||
if value.last_interned_at.load() < current_revision {
|
||||
value.last_interned_at.store(current_revision);
|
||||
db.salsa_event(&|| {
|
||||
Event::new(EventKind::DidReinternValue {
|
||||
id,
|
||||
revision: current_revision,
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
let durability = if let Some((_, stamp)) = zalsa_local.active_query() {
|
||||
// Record the maximum durability across all queries that intern this value.
|
||||
let previous_durability = value
|
||||
.durability
|
||||
.fetch_max(stamp.durability.as_u8(), Ordering::AcqRel);
|
||||
|
||||
Durability::from_u8(previous_durability).max(stamp.durability)
|
||||
} else {
|
||||
value.durability()
|
||||
};
|
||||
|
||||
// Record a dependency on this value.
|
||||
let index = self.database_key_index(id);
|
||||
zalsa_local.report_tracked_read_simple(index, durability, value.first_interned_at);
|
||||
|
||||
id
|
||||
}
|
||||
|
||||
// We won any races so should intern the data
|
||||
Err(slot) => {
|
||||
let zalsa = db.zalsa();
|
||||
let table = zalsa.table();
|
||||
|
||||
// Record the durability of the current query on the interned value.
|
||||
let durability = zalsa_local
|
||||
.active_query()
|
||||
.map(|(_, stamp)| stamp.durability)
|
||||
// If there is no active query this durability does not actually matter.
|
||||
.unwrap_or(Durability::MAX);
|
||||
|
||||
let id = zalsa_local.allocate(table, self.ingredient_index, |id| Value::<C> {
|
||||
fields: unsafe { self.to_internal_data(assemble(id, key)) },
|
||||
memos: Default::default(),
|
||||
syncs: Default::default(),
|
||||
durability: AtomicU8::new(durability.as_u8()),
|
||||
// Record the revision we are interning in.
|
||||
first_interned_at: current_revision,
|
||||
last_interned_at: AtomicRevision::from(current_revision),
|
||||
});
|
||||
unsafe {
|
||||
lock.insert_in_slot(
|
||||
data_hash,
|
||||
slot,
|
||||
(
|
||||
table.get::<Value<C>>(id).fields.clone(),
|
||||
SharedValue::new(id),
|
||||
),
|
||||
)
|
||||
};
|
||||
|
||||
let value = table.get::<Value<C>>(id);
|
||||
|
||||
let slot_value = (value.fields.clone(), SharedValue::new(id));
|
||||
unsafe { lock.insert_in_slot(data_hash, slot, slot_value) };
|
||||
|
||||
debug_assert_eq!(
|
||||
data_hash,
|
||||
self.key_map
|
||||
.hasher()
|
||||
.hash_one(table.get::<Value<C>>(id).fields.clone())
|
||||
);
|
||||
|
||||
// Record a dependency on this value.
|
||||
let index = self.database_key_index(id);
|
||||
zalsa_local.report_tracked_read_simple(index, durability, value.first_interned_at);
|
||||
|
||||
db.salsa_event(&|| {
|
||||
Event::new(EventKind::DidInternValue {
|
||||
id,
|
||||
revision: current_revision,
|
||||
})
|
||||
});
|
||||
|
||||
id
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the database key index for an interned value with the given id.
|
||||
pub fn database_key_index(&self, id: Id) -> DatabaseKeyIndex {
|
||||
DatabaseKeyIndex::new(self.ingredient_index, id)
|
||||
}
|
||||
|
||||
/// Lookup the data for an interned value based on its id.
|
||||
/// Rarely used since end-users generally carry a struct with a pointer directly
|
||||
/// to the interned item.
|
||||
pub fn data<'db>(&'db self, db: &'db dyn Database, id: Id) -> &'db C::Fields<'db> {
|
||||
let internal_data = db.zalsa().table().get::<Value<C>>(id);
|
||||
let zalsa = db.zalsa();
|
||||
let internal_data = zalsa.table().get::<Value<C>>(id);
|
||||
let last_changed_revision = zalsa.last_changed_revision(internal_data.durability());
|
||||
|
||||
assert!(
|
||||
internal_data.last_interned_at.load() >= last_changed_revision,
|
||||
"Data was not interned in the latest revision for its durability."
|
||||
);
|
||||
|
||||
unsafe { Self::from_internal_data(&internal_data.fields) }
|
||||
}
|
||||
|
||||
|
@ -252,6 +353,11 @@ where
|
|||
self.data(db, C::deref_struct(s))
|
||||
}
|
||||
|
||||
pub fn reset(&mut self, db: &mut dyn Database) {
|
||||
db.zalsa_mut().new_revision();
|
||||
self.key_map.clear();
|
||||
}
|
||||
|
||||
#[cfg(feature = "salsa_unstable")]
|
||||
/// Returns all data corresponding to the interned struct.
|
||||
pub fn entries<'db>(
|
||||
|
@ -265,12 +371,6 @@ where
|
|||
.filter_map(|(_, page)| page.cast_type::<crate::table::Page<Value<C>>>())
|
||||
.flat_map(|page| page.slots())
|
||||
}
|
||||
|
||||
pub fn reset(&mut self, revision: Revision) {
|
||||
assert!(revision > self.reset_at);
|
||||
self.reset_at = revision;
|
||||
self.key_map.clear();
|
||||
}
|
||||
}
|
||||
|
||||
impl<C> Ingredient for IngredientImpl<C>
|
||||
|
@ -283,11 +383,28 @@ where
|
|||
|
||||
unsafe fn maybe_changed_after(
|
||||
&self,
|
||||
_db: &dyn Database,
|
||||
_input: Id,
|
||||
db: &dyn Database,
|
||||
input: Id,
|
||||
revision: Revision,
|
||||
) -> VerifyResult {
|
||||
VerifyResult::changed_if(revision < self.reset_at)
|
||||
let value = db.zalsa().table().get::<Value<C>>(input);
|
||||
if value.first_interned_at > revision {
|
||||
// The slot was reused.
|
||||
return VerifyResult::Changed;
|
||||
}
|
||||
|
||||
// The slot is valid in this revision but we have to sync the value's revision.
|
||||
let current_revision = db.zalsa().current_revision();
|
||||
value.last_interned_at.store(current_revision);
|
||||
|
||||
db.salsa_event(&|| {
|
||||
Event::new(EventKind::DidReinternValue {
|
||||
id: input,
|
||||
revision: current_revision,
|
||||
})
|
||||
});
|
||||
|
||||
VerifyResult::unchanged()
|
||||
}
|
||||
|
||||
fn is_provisional_cycle_head<'db>(&'db self, _db: &'db dyn Database, _input: Id) -> bool {
|
||||
|
@ -334,7 +451,7 @@ where
|
|||
false
|
||||
}
|
||||
|
||||
fn fmt_index(&self, index: Option<crate::Id>, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
fn fmt_index(&self, index: crate::Id, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
fmt_index(C::DEBUG_NAME, index, fmt)
|
||||
}
|
||||
|
||||
|
|
170
src/key.rs
170
src/key.rs
|
@ -6,31 +6,44 @@ use crate::{
|
|||
Database, Id,
|
||||
};
|
||||
|
||||
// ANCHOR: DatabaseKeyIndex
|
||||
/// An integer that uniquely identifies a particular query instance within the
|
||||
/// database. Used to track output dependencies between queries. Fully ordered and
|
||||
/// equatable but those orderings are arbitrary, and meant to be used only for
|
||||
/// inserting into maps and the like.
|
||||
/// database. Used to track input and output dependencies between queries. Fully
|
||||
/// ordered and equatable but those orderings are arbitrary, and meant to be used
|
||||
/// only for inserting into maps and the like.
|
||||
#[derive(Copy, Clone, PartialEq, Eq, Hash)]
|
||||
pub struct OutputDependencyIndex {
|
||||
pub struct DatabaseKeyIndex {
|
||||
ingredient_index: IngredientIndex,
|
||||
key_index: Id,
|
||||
}
|
||||
// ANCHOR_END: DatabaseKeyIndex
|
||||
|
||||
/// An integer that uniquely identifies a particular query instance within the
|
||||
/// database. Used to track input dependencies between queries. Fully ordered and
|
||||
/// equatable but those orderings are arbitrary, and meant to be used only for
|
||||
/// inserting into maps and the like.
|
||||
#[derive(Copy, Clone, PartialEq, Eq, Hash)]
|
||||
pub struct InputDependencyIndex {
|
||||
ingredient_index: IngredientIndex,
|
||||
key_index: Option<Id>,
|
||||
}
|
||||
|
||||
impl OutputDependencyIndex {
|
||||
impl DatabaseKeyIndex {
|
||||
pub(crate) fn new(ingredient_index: IngredientIndex, key_index: Id) -> Self {
|
||||
Self {
|
||||
ingredient_index,
|
||||
key_index,
|
||||
ingredient_index,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn ingredient_index(self) -> IngredientIndex {
|
||||
self.ingredient_index
|
||||
}
|
||||
|
||||
pub fn key_index(self) -> Id {
|
||||
self.key_index
|
||||
}
|
||||
|
||||
pub(crate) fn maybe_changed_after(
|
||||
&self,
|
||||
db: &dyn Database,
|
||||
last_verified_at: crate::Revision,
|
||||
) -> VerifyResult {
|
||||
// SAFETY: The `db` belongs to the ingredient
|
||||
unsafe {
|
||||
db.zalsa()
|
||||
.lookup_ingredient(self.ingredient_index)
|
||||
.maybe_changed_after(db, self.key_index, last_verified_at)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -58,105 +71,12 @@ impl OutputDependencyIndex {
|
|||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for OutputDependencyIndex {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
crate::attach::with_attached_database(|db| {
|
||||
let ingredient = db.zalsa().lookup_ingredient(self.ingredient_index);
|
||||
ingredient.fmt_index(Some(self.key_index), f)
|
||||
})
|
||||
.unwrap_or_else(|| {
|
||||
f.debug_tuple("OutputDependencyIndex")
|
||||
.field(&self.ingredient_index)
|
||||
.field(&self.key_index)
|
||||
.finish()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl InputDependencyIndex {
|
||||
/// Create a database-key-index for an interning or entity table.
|
||||
/// The `key_index` here is always `None`, which deliberately corresponds to
|
||||
/// no particular id or entry. This is because the data in such tables
|
||||
/// remains valid until the table as a whole is reset. Using a single id avoids
|
||||
/// creating tons of dependencies in the dependency listings.
|
||||
pub(crate) fn for_table(ingredient_index: IngredientIndex) -> Self {
|
||||
Self {
|
||||
ingredient_index,
|
||||
key_index: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn new(ingredient_index: IngredientIndex, key_index: Id) -> Self {
|
||||
Self {
|
||||
ingredient_index,
|
||||
key_index: Some(key_index),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn maybe_changed_after(
|
||||
&self,
|
||||
db: &dyn Database,
|
||||
last_verified_at: crate::Revision,
|
||||
) -> VerifyResult {
|
||||
match self.key_index {
|
||||
// SAFETY: The `db` belongs to the ingredient
|
||||
Some(key_index) => unsafe {
|
||||
db.zalsa()
|
||||
.lookup_ingredient(self.ingredient_index)
|
||||
.maybe_changed_after(db, key_index, last_verified_at)
|
||||
},
|
||||
// Data in tables themselves remain valid until the table as a whole is reset.
|
||||
None => VerifyResult::unchanged(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_key_index(&mut self, key_index: Id) {
|
||||
self.key_index = Some(key_index);
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for InputDependencyIndex {
|
||||
impl fmt::Debug for DatabaseKeyIndex {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
crate::attach::with_attached_database(|db| {
|
||||
let ingredient = db.zalsa().lookup_ingredient(self.ingredient_index);
|
||||
ingredient.fmt_index(self.key_index, f)
|
||||
})
|
||||
.unwrap_or_else(|| {
|
||||
f.debug_tuple("InputDependencyIndex")
|
||||
.field(&self.ingredient_index)
|
||||
.field(&self.key_index)
|
||||
.finish()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// ANCHOR: DatabaseKeyIndex
|
||||
/// An "active" database key index represents a database key index
|
||||
/// that is actively executing. In that case, the `key_index` cannot be
|
||||
/// None.
|
||||
#[derive(Copy, Clone, PartialEq, Eq, Hash)]
|
||||
pub struct DatabaseKeyIndex {
|
||||
pub(crate) ingredient_index: IngredientIndex,
|
||||
pub(crate) key_index: Id,
|
||||
}
|
||||
// ANCHOR_END: DatabaseKeyIndex
|
||||
|
||||
impl DatabaseKeyIndex {
|
||||
pub fn ingredient_index(self) -> IngredientIndex {
|
||||
self.ingredient_index
|
||||
}
|
||||
|
||||
pub fn key_index(self) -> Id {
|
||||
self.key_index
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for DatabaseKeyIndex {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
crate::attach::with_attached_database(|db| {
|
||||
let ingredient = db.zalsa().lookup_ingredient(self.ingredient_index);
|
||||
ingredient.fmt_index(Some(self.key_index), f)
|
||||
})
|
||||
.unwrap_or_else(|| {
|
||||
f.debug_tuple("DatabaseKeyIndex")
|
||||
.field(&self.ingredient_index)
|
||||
|
@ -165,33 +85,3 @@ impl std::fmt::Debug for DatabaseKeyIndex {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl From<DatabaseKeyIndex> for InputDependencyIndex {
|
||||
fn from(value: DatabaseKeyIndex) -> Self {
|
||||
Self {
|
||||
ingredient_index: value.ingredient_index,
|
||||
key_index: Some(value.key_index),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<DatabaseKeyIndex> for OutputDependencyIndex {
|
||||
fn from(value: DatabaseKeyIndex) -> Self {
|
||||
Self {
|
||||
ingredient_index: value.ingredient_index,
|
||||
key_index: value.key_index,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<InputDependencyIndex> for DatabaseKeyIndex {
|
||||
type Error = ();
|
||||
|
||||
fn try_from(value: InputDependencyIndex) -> Result<Self, Self::Error> {
|
||||
let key_index = value.key_index.ok_or(())?;
|
||||
Ok(Self {
|
||||
ingredient_index: value.ingredient_index,
|
||||
key_index,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -6,7 +6,7 @@ use tracked_field::FieldIngredientImpl;
|
|||
use crate::{
|
||||
function::VerifyResult,
|
||||
ingredient::{fmt_index, Ingredient, Jar},
|
||||
key::{DatabaseKeyIndex, InputDependencyIndex},
|
||||
key::DatabaseKeyIndex,
|
||||
plumbing::ZalsaLocal,
|
||||
revision::OptionalAtomicRevision,
|
||||
runtime::StampedValue,
|
||||
|
@ -259,10 +259,10 @@ pub struct Value<C>
|
|||
where
|
||||
C: Configuration,
|
||||
{
|
||||
/// The durability minimum durability of all inputs consumed
|
||||
/// by the creator query prior to creating this tracked struct.
|
||||
/// If any of those inputs changes, then the creator query may
|
||||
/// create this struct with different values.
|
||||
/// The minimum durability of all inputs consumed by the creator
|
||||
/// query prior to creating this tracked struct. If any of those
|
||||
/// inputs changes, then the creator query may create this struct
|
||||
/// with different values.
|
||||
durability: Durability,
|
||||
|
||||
/// The revision in which the tracked struct was first created.
|
||||
|
@ -373,10 +373,7 @@ where
|
|||
|
||||
/// Returns the database key index for a tracked struct with the given id.
|
||||
pub fn database_key_index(&self, id: Id) -> DatabaseKeyIndex {
|
||||
DatabaseKeyIndex {
|
||||
ingredient_index: self.ingredient_index,
|
||||
key_index: id,
|
||||
}
|
||||
DatabaseKeyIndex::new(self.ingredient_index, id)
|
||||
}
|
||||
|
||||
pub fn new_struct<'db>(
|
||||
|
@ -403,7 +400,7 @@ where
|
|||
match zalsa_local.tracked_struct_id(&identity) {
|
||||
Some(id) => {
|
||||
// The struct already exists in the intern map.
|
||||
zalsa_local.add_output(self.database_key_index(id).into());
|
||||
zalsa_local.add_output(self.database_key_index(id));
|
||||
self.update(zalsa, current_revision, id, ¤t_deps, fields);
|
||||
C::struct_from_id(id)
|
||||
}
|
||||
|
@ -412,7 +409,7 @@ where
|
|||
// This is a new tracked struct, so create an entry in the struct map.
|
||||
let id = self.allocate(zalsa, zalsa_local, current_revision, ¤t_deps, fields);
|
||||
let key = self.database_key_index(id);
|
||||
zalsa_local.add_output(key.into());
|
||||
zalsa_local.add_output(key);
|
||||
zalsa_local.store_tracked_struct_id(identity, id);
|
||||
C::struct_from_id(id)
|
||||
}
|
||||
|
@ -623,10 +620,7 @@ where
|
|||
let ingredient_index =
|
||||
zalsa.ingredient_index_for_memo(self.ingredient_index, memo_ingredient_index);
|
||||
|
||||
let executor = DatabaseKeyIndex {
|
||||
ingredient_index,
|
||||
key_index: id,
|
||||
};
|
||||
let executor = DatabaseKeyIndex::new(ingredient_index, id);
|
||||
|
||||
db.salsa_event(&|| Event::new(EventKind::DidDiscard { key: executor }));
|
||||
|
||||
|
@ -676,7 +670,7 @@ where
|
|||
let field_changed_at = data.revisions[relative_tracked_index];
|
||||
|
||||
zalsa_local.report_tracked_read_simple(
|
||||
InputDependencyIndex::new(field_ingredient_index, id),
|
||||
DatabaseKeyIndex::new(field_ingredient_index, id),
|
||||
data.durability,
|
||||
field_changed_at,
|
||||
);
|
||||
|
@ -701,7 +695,7 @@ where
|
|||
|
||||
// Add a dependency on the tracked struct itself.
|
||||
zalsa_local.report_tracked_read_simple(
|
||||
InputDependencyIndex::new(self.ingredient_index, id),
|
||||
DatabaseKeyIndex::new(self.ingredient_index, id),
|
||||
data.durability,
|
||||
data.created_at,
|
||||
);
|
||||
|
@ -781,7 +775,7 @@ where
|
|||
self.delete_entity(db.as_dyn_database(), stale_output_key, provisional);
|
||||
}
|
||||
|
||||
fn fmt_index(&self, index: Option<crate::Id>, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
fn fmt_index(&self, index: crate::Id, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
fmt_index(C::DEBUG_NAME, index, fmt)
|
||||
}
|
||||
|
||||
|
|
|
@ -94,17 +94,13 @@ where
|
|||
panic!("tracked field ingredients have no outputs")
|
||||
}
|
||||
|
||||
fn fmt_index(
|
||||
&self,
|
||||
index: Option<crate::Id>,
|
||||
fmt: &mut std::fmt::Formatter<'_>,
|
||||
) -> std::fmt::Result {
|
||||
fn fmt_index(&self, index: crate::Id, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(
|
||||
fmt,
|
||||
"{}.{}({:?})",
|
||||
C::DEBUG_NAME,
|
||||
C::FIELD_DEBUG_NAMES[self.field_index],
|
||||
index.unwrap()
|
||||
index
|
||||
)
|
||||
}
|
||||
|
||||
|
|
|
@ -7,7 +7,7 @@ use crate::accumulator::accumulated_map::{
|
|||
use crate::active_query::ActiveQuery;
|
||||
use crate::cycle::CycleHeads;
|
||||
use crate::durability::Durability;
|
||||
use crate::key::{DatabaseKeyIndex, InputDependencyIndex, OutputDependencyIndex};
|
||||
use crate::key::DatabaseKeyIndex;
|
||||
use crate::runtime::StampedValue;
|
||||
use crate::table::PageIndex;
|
||||
use crate::table::Slot;
|
||||
|
@ -144,7 +144,7 @@ impl ZalsaLocal {
|
|||
}
|
||||
|
||||
/// Add an output to the current query's list of dependencies
|
||||
pub(crate) fn add_output(&self, entity: OutputDependencyIndex) {
|
||||
pub(crate) fn add_output(&self, entity: DatabaseKeyIndex) {
|
||||
self.with_query_stack(|stack| {
|
||||
if let Some(top_query) = stack.last_mut() {
|
||||
top_query.add_output(entity)
|
||||
|
@ -153,7 +153,7 @@ impl ZalsaLocal {
|
|||
}
|
||||
|
||||
/// Check whether `entity` is an output of the currently active query (if any)
|
||||
pub(crate) fn is_output_of_active_query(&self, entity: OutputDependencyIndex) -> bool {
|
||||
pub(crate) fn is_output_of_active_query(&self, entity: DatabaseKeyIndex) -> bool {
|
||||
self.with_query_stack(|stack| {
|
||||
if let Some(top_query) = stack.last_mut() {
|
||||
top_query.is_output(entity)
|
||||
|
@ -166,7 +166,7 @@ impl ZalsaLocal {
|
|||
/// Register that currently active query reads the given input
|
||||
pub(crate) fn report_tracked_read(
|
||||
&self,
|
||||
input: InputDependencyIndex,
|
||||
input: DatabaseKeyIndex,
|
||||
durability: Durability,
|
||||
changed_at: Revision,
|
||||
accumulated: InputAccumulatedValues,
|
||||
|
@ -186,7 +186,7 @@ impl ZalsaLocal {
|
|||
/// Register that currently active query reads the given input
|
||||
pub(crate) fn report_tracked_read_simple(
|
||||
&self,
|
||||
input: InputDependencyIndex,
|
||||
input: DatabaseKeyIndex,
|
||||
durability: Durability,
|
||||
changed_at: Revision,
|
||||
) {
|
||||
|
@ -405,7 +405,7 @@ pub enum QueryOrigin {
|
|||
|
||||
impl QueryOrigin {
|
||||
/// Indices for queries *read* by this query
|
||||
pub(crate) fn inputs(&self) -> impl DoubleEndedIterator<Item = InputDependencyIndex> + '_ {
|
||||
pub(crate) fn inputs(&self) -> impl DoubleEndedIterator<Item = DatabaseKeyIndex> + '_ {
|
||||
let opt_edges = match self {
|
||||
QueryOrigin::Derived(edges) | QueryOrigin::DerivedUntracked(edges) => Some(edges),
|
||||
QueryOrigin::Assigned(_) | QueryOrigin::FixpointInitial => None,
|
||||
|
@ -414,7 +414,7 @@ impl QueryOrigin {
|
|||
}
|
||||
|
||||
/// Indices for queries *written* by this query (if any)
|
||||
pub(crate) fn outputs(&self) -> impl DoubleEndedIterator<Item = OutputDependencyIndex> + '_ {
|
||||
pub(crate) fn outputs(&self) -> impl DoubleEndedIterator<Item = DatabaseKeyIndex> + '_ {
|
||||
let opt_edges = match self {
|
||||
QueryOrigin::Derived(edges) | QueryOrigin::DerivedUntracked(edges) => Some(edges),
|
||||
QueryOrigin::Assigned(_) | QueryOrigin::FixpointInitial => None,
|
||||
|
@ -448,15 +448,15 @@ pub struct QueryEdges {
|
|||
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
|
||||
pub enum QueryEdge {
|
||||
Input(InputDependencyIndex),
|
||||
Output(OutputDependencyIndex),
|
||||
Input(DatabaseKeyIndex),
|
||||
Output(DatabaseKeyIndex),
|
||||
}
|
||||
|
||||
impl QueryEdges {
|
||||
/// Returns the (tracked) inputs that were executed in computing this memoized value.
|
||||
///
|
||||
/// These will always be in execution order.
|
||||
pub(crate) fn inputs(&self) -> impl DoubleEndedIterator<Item = InputDependencyIndex> + '_ {
|
||||
pub(crate) fn inputs(&self) -> impl DoubleEndedIterator<Item = DatabaseKeyIndex> + '_ {
|
||||
self.input_outputs.iter().filter_map(|&edge| match edge {
|
||||
QueryEdge::Input(dependency_index) => Some(dependency_index),
|
||||
QueryEdge::Output(_) => None,
|
||||
|
@ -466,7 +466,7 @@ impl QueryEdges {
|
|||
/// Returns the (tracked) outputs that were executed in computing this memoized value.
|
||||
///
|
||||
/// These will always be in execution order.
|
||||
pub(crate) fn outputs(&self) -> impl DoubleEndedIterator<Item = OutputDependencyIndex> + '_ {
|
||||
pub(crate) fn outputs(&self) -> impl DoubleEndedIterator<Item = DatabaseKeyIndex> + '_ {
|
||||
self.input_outputs.iter().filter_map(|&edge| match edge {
|
||||
QueryEdge::Output(dependency_index) => Some(dependency_index),
|
||||
QueryEdge::Input(_) => None,
|
||||
|
|
|
@ -78,7 +78,7 @@ pub struct EventLoggerDatabase {
|
|||
#[salsa::db]
|
||||
impl Database for EventLoggerDatabase {
|
||||
fn salsa_event(&self, event: &dyn Fn() -> salsa::Event) {
|
||||
self.push_log(format!("{:?}", event()));
|
||||
self.push_log(format!("{:?}", event().kind));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
116
tests/interned-revisions.rs
Normal file
116
tests/interned-revisions.rs
Normal file
|
@ -0,0 +1,116 @@
|
|||
//! Test that a `tracked` fn on a `salsa::input`
|
||||
//! compiles and executes successfully.
|
||||
|
||||
mod common;
|
||||
use common::LogDatabase;
|
||||
use expect_test::expect;
|
||||
use salsa::{Database, Setter};
|
||||
use test_log::test;
|
||||
|
||||
#[salsa::input]
|
||||
struct Input {
|
||||
field1: usize,
|
||||
}
|
||||
|
||||
#[salsa::interned]
|
||||
struct Interned<'db> {
|
||||
field1: usize,
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_intern_new() {
|
||||
#[salsa::tracked]
|
||||
fn function<'db>(db: &'db dyn Database, input: Input) -> Interned<'db> {
|
||||
Interned::new(db, input.field1(db))
|
||||
}
|
||||
|
||||
let mut db = common::EventLoggerDatabase::default();
|
||||
let input = Input::new(&db, 0);
|
||||
|
||||
let result_in_rev_1 = function(&db, input);
|
||||
assert_eq!(result_in_rev_1.field1(&db), 0);
|
||||
|
||||
// Modify the input to force a new value to be created.
|
||||
input.set_field1(&mut db).to(1);
|
||||
|
||||
let result_in_rev_2 = function(&db, input);
|
||||
assert_eq!(result_in_rev_2.field1(&db), 1);
|
||||
|
||||
db.assert_logs(expect![[r#"
|
||||
[
|
||||
"WillCheckCancellation",
|
||||
"WillExecute { database_key: function(Id(0)) }",
|
||||
"DidInternValue { id: Id(400), revision: R1 }",
|
||||
"DidSetCancellationFlag",
|
||||
"WillCheckCancellation",
|
||||
"WillExecute { database_key: function(Id(0)) }",
|
||||
"DidInternValue { id: Id(401), revision: R2 }",
|
||||
]"#]]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_reintern() {
|
||||
#[salsa::tracked]
|
||||
fn function(db: &dyn Database, input: Input) -> Interned<'_> {
|
||||
let _ = input.field1(db);
|
||||
Interned::new(db, 0)
|
||||
}
|
||||
|
||||
let mut db = common::EventLoggerDatabase::default();
|
||||
|
||||
let input = Input::new(&db, 0);
|
||||
let result_in_rev_1 = function(&db, input);
|
||||
db.assert_logs(expect![[r#"
|
||||
[
|
||||
"WillCheckCancellation",
|
||||
"WillExecute { database_key: function(Id(0)) }",
|
||||
"DidInternValue { id: Id(400), revision: R1 }",
|
||||
]"#]]);
|
||||
|
||||
assert_eq!(result_in_rev_1.field1(&db), 0);
|
||||
|
||||
// Modify the input to force the value to be re-interned.
|
||||
input.set_field1(&mut db).to(1);
|
||||
|
||||
let result_in_rev_2 = function(&db, input);
|
||||
db.assert_logs(expect![[r#"
|
||||
[
|
||||
"DidSetCancellationFlag",
|
||||
"WillCheckCancellation",
|
||||
"WillExecute { database_key: function(Id(0)) }",
|
||||
"DidReinternValue { id: Id(400), revision: R2 }",
|
||||
]"#]]);
|
||||
|
||||
assert_eq!(result_in_rev_2.field1(&db), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_durability() {
|
||||
#[salsa::tracked]
|
||||
fn function<'db>(db: &'db dyn Database, _input: Input) -> Interned<'db> {
|
||||
Interned::new(db, 0)
|
||||
}
|
||||
|
||||
let mut db = common::EventLoggerDatabase::default();
|
||||
let input = Input::new(&db, 0);
|
||||
|
||||
let result_in_rev_1 = function(&db, input);
|
||||
assert_eq!(result_in_rev_1.field1(&db), 0);
|
||||
|
||||
// Modify the input to bump the revision without re-interning the value, as there
|
||||
// is no read dependency.
|
||||
input.set_field1(&mut db).to(1);
|
||||
|
||||
let result_in_rev_2 = function(&db, input);
|
||||
assert_eq!(result_in_rev_2.field1(&db), 0);
|
||||
|
||||
db.assert_logs(expect![[r#"
|
||||
[
|
||||
"WillCheckCancellation",
|
||||
"WillExecute { database_key: function(Id(0)) }",
|
||||
"DidInternValue { id: Id(400), revision: R1 }",
|
||||
"DidSetCancellationFlag",
|
||||
"WillCheckCancellation",
|
||||
"DidValidateMemoizedValue { database_key: function(Id(0)) }",
|
||||
]"#]]);
|
||||
}
|
|
@ -62,10 +62,11 @@ fn test_leaked_inputs_ignored() {
|
|||
let result_in_rev_1 = function(&db, input);
|
||||
db.assert_logs(expect![[r#"
|
||||
[
|
||||
"Event { thread_id: ThreadId(2), kind: WillCheckCancellation }",
|
||||
"Event { thread_id: ThreadId(2), kind: WillExecute { database_key: function(Id(0)) } }",
|
||||
"Event { thread_id: ThreadId(2), kind: WillCheckCancellation }",
|
||||
"Event { thread_id: ThreadId(2), kind: WillExecute { database_key: counter_field(Id(800)) } }",
|
||||
"WillCheckCancellation",
|
||||
"WillExecute { database_key: function(Id(0)) }",
|
||||
"DidInternValue { id: Id(800), revision: R1 }",
|
||||
"WillCheckCancellation",
|
||||
"WillExecute { database_key: counter_field(Id(800)) }",
|
||||
]"#]]);
|
||||
|
||||
assert_eq!(result_in_rev_1, (0, 0));
|
||||
|
@ -80,12 +81,13 @@ fn test_leaked_inputs_ignored() {
|
|||
let result_in_rev_2 = function(&db, input);
|
||||
db.assert_logs(expect![[r#"
|
||||
[
|
||||
"Event { thread_id: ThreadId(2), kind: DidSetCancellationFlag }",
|
||||
"Event { thread_id: ThreadId(2), kind: WillCheckCancellation }",
|
||||
"Event { thread_id: ThreadId(2), kind: WillCheckCancellation }",
|
||||
"Event { thread_id: ThreadId(2), kind: WillExecute { database_key: counter_field(Id(800)) } }",
|
||||
"Event { thread_id: ThreadId(2), kind: WillExecute { database_key: function(Id(0)) } }",
|
||||
"Event { thread_id: ThreadId(2), kind: WillCheckCancellation }",
|
||||
"DidSetCancellationFlag",
|
||||
"WillCheckCancellation",
|
||||
"DidReinternValue { id: Id(800), revision: R2 }",
|
||||
"WillCheckCancellation",
|
||||
"WillExecute { database_key: counter_field(Id(800)) }",
|
||||
"WillExecute { database_key: function(Id(0)) }",
|
||||
"WillCheckCancellation",
|
||||
]"#]]);
|
||||
|
||||
// Salsa will re-execute `counter_field` before re-executing
|
||||
|
@ -96,6 +98,6 @@ fn test_leaked_inputs_ignored() {
|
|||
// value of 100 since the struct has already been read during
|
||||
// this revision.
|
||||
//
|
||||
// Contrast with preverify-struct-with-leaked-data-2.rs.
|
||||
// Contrast with preverify-struct-with-leaked-data.rs.
|
||||
assert_eq!(result_in_rev_2, (0, 0));
|
||||
}
|
||||
|
|
|
@ -59,10 +59,10 @@ fn test_leaked_inputs_ignored() {
|
|||
let result_in_rev_1 = function(&db, input);
|
||||
db.assert_logs(expect![[r#"
|
||||
[
|
||||
"Event { thread_id: ThreadId(2), kind: WillCheckCancellation }",
|
||||
"Event { thread_id: ThreadId(2), kind: WillExecute { database_key: function(Id(0)) } }",
|
||||
"Event { thread_id: ThreadId(2), kind: WillCheckCancellation }",
|
||||
"Event { thread_id: ThreadId(2), kind: WillExecute { database_key: counter_field(Id(400)) } }",
|
||||
"WillCheckCancellation",
|
||||
"WillExecute { database_key: function(Id(0)) }",
|
||||
"WillCheckCancellation",
|
||||
"WillExecute { database_key: counter_field(Id(400)) }",
|
||||
]"#]]);
|
||||
|
||||
assert_eq!(result_in_rev_1, (0, 0));
|
||||
|
@ -77,12 +77,12 @@ fn test_leaked_inputs_ignored() {
|
|||
let result_in_rev_2 = function(&db, input);
|
||||
db.assert_logs(expect![[r#"
|
||||
[
|
||||
"Event { thread_id: ThreadId(2), kind: DidSetCancellationFlag }",
|
||||
"Event { thread_id: ThreadId(2), kind: WillCheckCancellation }",
|
||||
"Event { thread_id: ThreadId(2), kind: WillCheckCancellation }",
|
||||
"Event { thread_id: ThreadId(2), kind: DidValidateMemoizedValue { database_key: counter_field(Id(400)) } }",
|
||||
"Event { thread_id: ThreadId(2), kind: WillExecute { database_key: function(Id(0)) } }",
|
||||
"Event { thread_id: ThreadId(2), kind: WillCheckCancellation }",
|
||||
"DidSetCancellationFlag",
|
||||
"WillCheckCancellation",
|
||||
"WillCheckCancellation",
|
||||
"DidValidateMemoizedValue { database_key: counter_field(Id(400)) }",
|
||||
"WillExecute { database_key: function(Id(0)) }",
|
||||
"WillCheckCancellation",
|
||||
]"#]]);
|
||||
|
||||
// Because salsa does not see any way for the tracked
|
||||
|
|
|
@ -28,10 +28,10 @@ fn execute() {
|
|||
|
||||
db.assert_logs(expect![[r#"
|
||||
[
|
||||
"Event { thread_id: ThreadId(2), kind: WillCheckCancellation }",
|
||||
"Event { thread_id: ThreadId(2), kind: WillExecute { database_key: tracked_fn(Id(0)) } }",
|
||||
"Event { thread_id: ThreadId(2), kind: WillCheckCancellation }",
|
||||
"Event { thread_id: ThreadId(2), kind: WillExecute { database_key: tracked_fn(Id(1)) } }",
|
||||
"WillCheckCancellation",
|
||||
"WillExecute { database_key: tracked_fn(Id(0)) }",
|
||||
"WillCheckCancellation",
|
||||
"WillExecute { database_key: tracked_fn(Id(1)) }",
|
||||
]"#]]);
|
||||
|
||||
db.synthetic_write(Durability::LOW);
|
||||
|
@ -46,10 +46,10 @@ fn execute() {
|
|||
// executed the query.
|
||||
db.assert_logs(expect![[r#"
|
||||
[
|
||||
"Event { thread_id: ThreadId(2), kind: DidSetCancellationFlag }",
|
||||
"Event { thread_id: ThreadId(2), kind: WillCheckCancellation }",
|
||||
"Event { thread_id: ThreadId(2), kind: DidValidateMemoizedValue { database_key: tracked_fn(Id(0)) } }",
|
||||
"Event { thread_id: ThreadId(2), kind: WillCheckCancellation }",
|
||||
"Event { thread_id: ThreadId(2), kind: DidValidateMemoizedValue { database_key: tracked_fn(Id(1)) } }",
|
||||
"DidSetCancellationFlag",
|
||||
"WillCheckCancellation",
|
||||
"DidValidateMemoizedValue { database_key: tracked_fn(Id(0)) }",
|
||||
"WillCheckCancellation",
|
||||
"DidValidateMemoizedValue { database_key: tracked_fn(Id(1)) }",
|
||||
]"#]]);
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue