fix: multithreaded nested fixpoint iteration (#882)
Some checks are pending
Test / Test (push) Waiting to run
Test / Miri (push) Waiting to run
Test / Shuttle (push) Waiting to run
Test / Benchmarks (push) Waiting to run
Book / Book (push) Waiting to run
Book / Deploy (push) Blocked by required conditions
Release-plz / Release-plz release (push) Waiting to run
Release-plz / Release-plz PR (push) Waiting to run

* Set `validate_final` in `execute` after removing the last cycle head

* Add runaway query repro

* Add tracing

* Fix part 1

* Fix `cycle_head_kinds` to always return provisional for memos that aren't verified final (They should be validated by `validate_same_iteration` or wait for the cycle head

* Fix cycle error

* Documentation

* Fix await for queries depending on initial value

* correctly initialize queued

* Cleanup

* Short circuit if entire query runs on single thread

* Move parallel code into its own method

* Rename method, add self_key to queued

* Revert self-key changes

* Move check *after* `deep_verify_memo`

* Add a test for a cycle with changing cycle heads

* Short circuit more often

* Consider iteration in `validate_provisional`

* Only yield if all heads result in a cycle. Retry if even just one inner cycle made progress (in which case there's a probably a new memo)

* Fix hangs

* Cargo fmt

* clippy

* Fix hang if cycle initial panics

* Rename `cycle_head_kind` enable `cycle_a_t1_b_t2_fallback` shuttle test

* Cleanup

* Docs
This commit is contained in:
Micha Reiser 2025-06-01 10:45:37 +02:00 committed by GitHub
parent 80fb79e910
commit 2b5188778e
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
25 changed files with 877 additions and 223 deletions

View file

@ -3,7 +3,7 @@ use std::{fmt, mem, ops};
use crate::accumulator::accumulated_map::{
AccumulatedMap, AtomicInputAccumulatedValues, InputAccumulatedValues,
};
use crate::cycle::CycleHeads;
use crate::cycle::{CycleHeads, IterationCount};
use crate::durability::Durability;
use crate::hash::FxIndexSet;
use crate::key::DatabaseKeyIndex;
@ -61,7 +61,7 @@ pub(crate) struct ActiveQuery {
cycle_heads: CycleHeads,
/// If this query is a cycle head, iteration count of that cycle.
iteration_count: u32,
iteration_count: IterationCount,
}
impl ActiveQuery {
@ -147,7 +147,7 @@ impl ActiveQuery {
}
}
pub(super) fn iteration_count(&self) -> u32 {
pub(super) fn iteration_count(&self) -> IterationCount {
self.iteration_count
}
@ -161,7 +161,7 @@ impl ActiveQuery {
}
impl ActiveQuery {
fn new(database_key_index: DatabaseKeyIndex, iteration_count: u32) -> Self {
fn new(database_key_index: DatabaseKeyIndex, iteration_count: IterationCount) -> Self {
ActiveQuery {
database_key_index,
durability: Durability::MAX,
@ -189,7 +189,7 @@ impl ActiveQuery {
ref mut accumulated,
accumulated_inputs,
ref mut cycle_heads,
iteration_count: _,
iteration_count,
} = self;
let origin = if untracked_read {
@ -204,6 +204,7 @@ impl ActiveQuery {
mem::take(accumulated),
mem::take(tracked_struct_ids),
mem::take(cycle_heads),
iteration_count,
);
let accumulated_inputs = AtomicInputAccumulatedValues::new(accumulated_inputs);
@ -236,10 +237,14 @@ impl ActiveQuery {
tracked_struct_ids.clear();
accumulated.clear();
*cycle_heads = Default::default();
*iteration_count = 0;
*iteration_count = IterationCount::initial();
}
fn reset_for(&mut self, new_database_key_index: DatabaseKeyIndex, new_iteration_count: u32) {
fn reset_for(
&mut self,
new_database_key_index: DatabaseKeyIndex,
new_iteration_count: IterationCount,
) {
let Self {
database_key_index,
durability,
@ -323,7 +328,7 @@ impl QueryStack {
pub(crate) fn push_new_query(
&mut self,
database_key_index: DatabaseKeyIndex,
iteration_count: u32,
iteration_count: IterationCount,
) {
if self.len < self.stack.len() {
self.stack[self.len].reset_for(database_key_index, iteration_count);
@ -373,7 +378,7 @@ struct CapturedQuery {
durability: Durability,
changed_at: Revision,
cycle_heads: CycleHeads,
iteration_count: u32,
iteration_count: IterationCount,
}
impl fmt::Debug for CapturedQuery {
@ -449,7 +454,7 @@ impl fmt::Display for Backtrace {
write!(fmt, "{idx:>4}: {database_key_index:?}")?;
if full {
write!(fmt, " -> ({changed_at:?}, {durability:#?}")?;
if !cycle_heads.is_empty() || iteration_count > 0 {
if !cycle_heads.is_empty() || !iteration_count.is_initial() {
write!(fmt, ", iteration = {iteration_count:?}")?;
}
write!(fmt, ")")?;

View file

@ -60,7 +60,7 @@ use crate::sync::OnceLock;
/// The maximum number of times we'll fixpoint-iterate before panicking.
///
/// Should only be relevant in case of a badly configured cycle recovery.
pub const MAX_ITERATIONS: u32 = 200;
pub const MAX_ITERATIONS: IterationCount = IterationCount(200);
pub struct UnexpectedCycle(Option<crate::Backtrace>);
@ -147,7 +147,33 @@ pub enum CycleRecoveryStrategy {
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub struct CycleHead {
pub(crate) database_key_index: DatabaseKeyIndex,
pub(crate) iteration_count: u32,
pub(crate) iteration_count: IterationCount,
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, Default)]
pub struct IterationCount(u8);
impl IterationCount {
pub(crate) const fn initial() -> Self {
Self(0)
}
pub(crate) const fn is_initial(self) -> bool {
self.0 == 0
}
pub(crate) const fn increment(self) -> Option<Self> {
let next = Self(self.0 + 1);
if next.0 <= MAX_ITERATIONS.0 {
Some(next)
} else {
None
}
}
pub(crate) const fn as_u32(self) -> u32 {
self.0 as u32
}
}
/// Any provisional value generated by any query in a cycle will track the cycle head(s) (can be
@ -164,7 +190,7 @@ impl CycleHeads {
pub(crate) fn initial(database_key_index: DatabaseKeyIndex) -> Self {
Self(thin_vec![CycleHead {
database_key_index,
iteration_count: 0,
iteration_count: IterationCount::initial(),
}])
}
@ -190,7 +216,7 @@ impl CycleHeads {
pub(crate) fn update_iteration_count(
&mut self,
cycle_head_index: DatabaseKeyIndex,
new_iteration_count: u32,
new_iteration_count: IterationCount,
) {
if let Some(cycle_head) = self
.0
@ -208,11 +234,11 @@ impl CycleHeads {
.iter()
.find(|candidate| candidate.database_key_index == database_key_index)
{
assert_eq!(existing.iteration_count, 0);
assert_eq!(existing.iteration_count, IterationCount::initial());
} else {
self.0.push(CycleHead {
database_key_index,
iteration_count: 0,
iteration_count: IterationCount::initial(),
});
}
}
@ -266,8 +292,18 @@ pub(crate) fn empty_cycle_heads() -> &'static CycleHeads {
}
#[derive(Debug, PartialEq, Eq)]
pub enum CycleHeadKind {
Provisional,
NotProvisional,
pub enum ProvisionalStatus {
Provisional { iteration: IterationCount },
Final { iteration: IterationCount },
FallbackImmediate,
}
impl ProvisionalStatus {
pub(crate) const fn iteration(&self) -> Option<IterationCount> {
match self {
ProvisionalStatus::Provisional { iteration } => Some(*iteration),
ProvisionalStatus::Final { iteration } => Some(*iteration),
ProvisionalStatus::FallbackImmediate => None,
}
}
}

View file

@ -1,3 +1,4 @@
use crate::cycle::IterationCount;
use crate::key::DatabaseKeyIndex;
use crate::sync::thread::{self, ThreadId};
use crate::Revision;
@ -61,7 +62,7 @@ pub enum EventKind {
WillIterateCycle {
/// The database-key for the cycle head. Implements `Debug`.
database_key: DatabaseKeyIndex,
iteration_count: u32,
iteration_count: IterationCount,
fell_back: bool,
},

View file

@ -1,14 +1,17 @@
pub(crate) use maybe_changed_after::VerifyResult;
use std::any::Any;
use std::fmt;
use std::ptr::NonNull;
pub(crate) use maybe_changed_after::VerifyResult;
use std::sync::atomic::Ordering;
pub(crate) use sync::SyncGuard;
use crate::accumulator::accumulated_map::{AccumulatedMap, InputAccumulatedValues};
use crate::cycle::{CycleHeadKind, CycleHeads, CycleRecoveryAction, CycleRecoveryStrategy};
use crate::cycle::{
empty_cycle_heads, CycleHeads, CycleRecoveryAction, CycleRecoveryStrategy, ProvisionalStatus,
};
use crate::function::delete::DeletedEntries;
use crate::function::sync::{ClaimResult, SyncTable};
use crate::ingredient::Ingredient;
use crate::ingredient::{Ingredient, WaitForResult};
use crate::key::DatabaseKeyIndex;
use crate::plumbing::MemoIngredientMap;
use crate::salsa_struct::SalsaStructInDb;
@ -244,30 +247,46 @@ where
self.maybe_changed_after(db, input, revision, cycle_heads)
}
/// True if the input `input` contains a memo that cites itself as a cycle head.
/// This indicates an intermediate value for a cycle that has not yet reached a fixed point.
fn cycle_head_kind(&self, zalsa: &Zalsa, input: Id) -> CycleHeadKind {
let is_provisional = self
.get_memo_from_table_for(zalsa, input, self.memo_ingredient_index(zalsa, input))
.is_some_and(|memo| {
memo.cycle_heads()
.into_iter()
.any(|head| head.database_key_index == self.database_key_index(input))
});
if is_provisional {
CycleHeadKind::Provisional
} else if C::CYCLE_STRATEGY == CycleRecoveryStrategy::FallbackImmediate {
CycleHeadKind::FallbackImmediate
/// Returns `final` only if the memo has the `verified_final` flag set and the cycle recovery strategy is not `FallbackImmediate`.
///
/// Otherwise, the value is still provisional. For both final and provisional, it also
/// returns the iteration in which this memo was created (always 0 except for cycle heads).
fn provisional_status(&self, zalsa: &Zalsa, input: Id) -> Option<ProvisionalStatus> {
let memo =
self.get_memo_from_table_for(zalsa, input, self.memo_ingredient_index(zalsa, input))?;
let iteration = memo.revisions.iteration();
let verified_final = memo.revisions.verified_final.load(Ordering::Relaxed);
Some(if verified_final {
if C::CYCLE_STRATEGY == CycleRecoveryStrategy::FallbackImmediate {
ProvisionalStatus::FallbackImmediate
} else {
ProvisionalStatus::Final { iteration }
}
} else {
CycleHeadKind::NotProvisional
}
ProvisionalStatus::Provisional { iteration }
})
}
/// Attempts to claim `key_index`, returning `false` if a cycle occurs.
fn wait_for(&self, zalsa: &Zalsa, key_index: Id) -> bool {
fn cycle_heads<'db>(&self, zalsa: &'db Zalsa, input: Id) -> &'db CycleHeads {
self.get_memo_from_table_for(zalsa, input, self.memo_ingredient_index(zalsa, input))
.map(|memo| memo.cycle_heads())
.unwrap_or(empty_cycle_heads())
}
/// Attempts to claim `key_index` without blocking.
///
/// * [`WaitForResult::Running`] if the `key_index` is running on another thread. It's up to the caller to block on the other thread
/// to wait until the result becomes available.
/// * [`WaitForResult::Available`] It is (or at least was) possible to claim the `key_index`
/// * [`WaitResult::Cycle`] Claiming the `key_index` results in a cycle because it's on the current's thread query stack or
/// running on another thread that is blocked on this thread.
fn wait_for<'me>(&'me self, zalsa: &'me Zalsa, key_index: Id) -> WaitForResult<'me> {
match self.sync_table.try_claim(zalsa, key_index) {
ClaimResult::Retry | ClaimResult::Claimed(_) => true,
ClaimResult::Cycle => false,
ClaimResult::Running(blocked_on) => WaitForResult::Running(blocked_on),
ClaimResult::Cycle { same_thread } => WaitForResult::Cycle { same_thread },
ClaimResult::Claimed(_) => WaitForResult::Available,
}
}

View file

@ -96,8 +96,9 @@ where
db: &'db C::DbView,
key: Id,
) -> (Option<&'db AccumulatedMap>, InputAccumulatedValues) {
let (zalsa, zalsa_local) = db.zalsas();
// NEXT STEP: stash and refactor `fetch` to return an `&Memo` so we can make this work
let memo = self.refresh_memo(db, db.zalsa(), key);
let memo = self.refresh_memo(db, zalsa, zalsa_local, key);
(
memo.revisions.accumulated(),
memo.revisions.accumulated_inputs.load(),

View file

@ -1,4 +1,4 @@
use crate::cycle::{CycleRecoveryStrategy, MAX_ITERATIONS};
use crate::cycle::{CycleRecoveryStrategy, IterationCount};
use crate::function::memo::Memo;
use crate::function::{Configuration, IngredientImpl};
use crate::sync::atomic::{AtomicBool, Ordering};
@ -74,7 +74,9 @@ where
// Cycle participants that don't have a fallback will be discarded in
// `validate_provisional()`.
let cycle_heads = std::mem::take(cycle_heads);
let active_query = db.zalsa_local().push_query(database_key_index, 0);
let active_query = db
.zalsa_local()
.push_query(database_key_index, IterationCount::initial());
new_value = C::cycle_initial(db, C::id_to_input(db, id));
revisions = active_query.pop();
// We need to set `cycle_heads` and `verified_final` because it needs to propagate to the callers.
@ -125,7 +127,7 @@ where
memo_ingredient_index: MemoIngredientIndex,
) -> (C::Output<'db>, QueryRevisions) {
let database_key_index = active_query.database_key_index;
let mut iteration_count: u32 = 0;
let mut iteration_count = IterationCount::initial();
let mut fell_back = false;
// Our provisional value from the previous iteration, when doing fixpoint iteration.
@ -189,12 +191,10 @@ where
match C::recover_from_cycle(
db,
&new_value,
iteration_count,
iteration_count.as_u32(),
C::id_to_input(db, id),
) {
crate::CycleRecoveryAction::Iterate => {
tracing::debug!("{database_key_index:?}: execute: iterate again");
}
crate::CycleRecoveryAction::Iterate => {}
crate::CycleRecoveryAction::Fallback(fallback_value) => {
tracing::debug!(
"{database_key_index:?}: execute: user cycle_fn says to fall back"
@ -208,10 +208,9 @@ where
}
// `iteration_count` can't overflow as we check it against `MAX_ITERATIONS`
// which is less than `u32::MAX`.
iteration_count += 1;
if iteration_count > MAX_ITERATIONS {
panic!("{database_key_index:?}: execute: too many cycle iterations");
}
iteration_count = iteration_count.increment().unwrap_or_else(|| {
panic!("{database_key_index:?}: execute: too many cycle iterations")
});
zalsa.event(&|| {
Event::new(EventKind::WillIterateCycle {
database_key: database_key_index,
@ -220,6 +219,10 @@ where
})
});
cycle_heads.update_iteration_count(database_key_index, iteration_count);
revisions.update_iteration_count(iteration_count);
tracing::debug!(
"{database_key_index:?}: execute: iterate again, revisions: {revisions:#?}"
);
opt_last_provisional = Some(self.insert_memo(
zalsa,
id,

View file

@ -1,9 +1,9 @@
use crate::cycle::{CycleHeads, CycleRecoveryStrategy, UnexpectedCycle};
use crate::cycle::{CycleHeads, CycleRecoveryStrategy, IterationCount, UnexpectedCycle};
use crate::function::memo::Memo;
use crate::function::sync::ClaimResult;
use crate::function::{Configuration, IngredientImpl, VerifyResult};
use crate::zalsa::{MemoIngredientIndex, Zalsa, ZalsaDatabase};
use crate::zalsa_local::QueryRevisions;
use crate::zalsa_local::{QueryRevisions, ZalsaLocal};
use crate::Id;
impl<C> IngredientImpl<C>
@ -18,7 +18,7 @@ where
#[cfg(debug_assertions)]
let _span = tracing::debug_span!("fetch", query = ?database_key_index).entered();
let memo = self.refresh_memo(db, zalsa, id);
let memo = self.refresh_memo(db, zalsa, zalsa_local, id);
// SAFETY: We just refreshed the memo so it is guaranteed to contain a value now.
let memo_value = unsafe { memo.value.as_ref().unwrap_unchecked() };
@ -41,13 +41,16 @@ where
&'db self,
db: &'db C::DbView,
zalsa: &'db Zalsa,
zalsa_local: &'db ZalsaLocal,
id: Id,
) -> &'db Memo<C::Output<'db>> {
let memo_ingredient_index = self.memo_ingredient_index(zalsa, id);
loop {
if let Some(memo) = self
.fetch_hot(zalsa, id, memo_ingredient_index)
.or_else(|| self.fetch_cold_with_retry(zalsa, db, id, memo_ingredient_index))
.or_else(|| {
self.fetch_cold_with_retry(zalsa, zalsa_local, db, id, memo_ingredient_index)
})
{
return memo;
}
@ -84,11 +87,12 @@ where
fn fetch_cold_with_retry<'db>(
&'db self,
zalsa: &'db Zalsa,
zalsa_local: &'db ZalsaLocal,
db: &'db C::DbView,
id: Id,
memo_ingredient_index: MemoIngredientIndex,
) -> Option<&'db Memo<C::Output<'db>>> {
let memo = self.fetch_cold(zalsa, db, id, memo_ingredient_index)?;
let memo = self.fetch_cold(zalsa, zalsa_local, db, id, memo_ingredient_index)?;
// If we get back a provisional cycle memo, and it's provisional on any cycle heads
// that are claimed by a different thread, we can't propagate the provisional memo
@ -98,7 +102,7 @@ where
// That is only correct for fixpoint cycles, though: `FallbackImmediate` cycles
// never have provisional entries.
if C::CYCLE_STRATEGY == CycleRecoveryStrategy::FallbackImmediate
|| !memo.provisional_retry(zalsa, self.database_key_index(id))
|| !memo.provisional_retry(zalsa, zalsa_local, self.database_key_index(id))
{
Some(memo)
} else {
@ -109,15 +113,30 @@ where
fn fetch_cold<'db>(
&'db self,
zalsa: &'db Zalsa,
zalsa_local: &'db ZalsaLocal,
db: &'db C::DbView,
id: Id,
memo_ingredient_index: MemoIngredientIndex,
) -> Option<&'db Memo<C::Output<'db>>> {
let database_key_index = self.database_key_index(id);
// Try to claim this query: if someone else has claimed it already, go back and start again.
let _claim_guard = match self.sync_table.try_claim(zalsa, id) {
ClaimResult::Retry => return None,
ClaimResult::Cycle => {
let database_key_index = self.database_key_index(id);
let claim_guard = match self.sync_table.try_claim(zalsa, id) {
ClaimResult::Running(blocked_on) => {
blocked_on.block_on(zalsa);
let memo = self.get_memo_from_table_for(zalsa, id, memo_ingredient_index);
if let Some(memo) = memo {
// This isn't strictly necessary, but if this is a provisional memo for an inner cycle,
// await all outer cycle heads to give the thread driving it a chance to complete
// (we don't want multiple threads competing for the queries participating in the same cycle).
if memo.value.is_some() && memo.may_be_provisional() {
memo.block_on_heads(zalsa, zalsa_local);
}
}
return None;
}
ClaimResult::Cycle { .. } => {
// check if there's a provisional value for this query
// Note we don't `validate_may_be_provisional` the memo here as we want to reuse an
// existing provisional memo if it exists
@ -161,7 +180,8 @@ where
tracing::debug!(
"hit a `FallbackImmediate` cycle at {database_key_index:#?}"
);
let active_query = db.zalsa_local().push_query(database_key_index, 0);
let active_query =
zalsa_local.push_query(database_key_index, IterationCount::initial());
let fallback_value = C::cycle_initial(db, C::id_to_input(db, id));
let mut revisions = active_query.pop();
revisions.set_cycle_heads(CycleHeads::initial(database_key_index));
@ -181,28 +201,54 @@ where
// Now that we've claimed the item, check again to see if there's a "hot" value.
let opt_old_memo = self.get_memo_from_table_for(zalsa, id, memo_ingredient_index);
if let Some(old_memo) = opt_old_memo {
if old_memo.value.is_some() {
let mut cycle_heads = CycleHeads::default();
if let VerifyResult::Unchanged(_) = self.deep_verify_memo(
db,
zalsa,
old_memo,
self.database_key_index(id),
&mut cycle_heads,
) {
if let VerifyResult::Unchanged(_) =
self.deep_verify_memo(db, zalsa, old_memo, database_key_index, &mut cycle_heads)
{
if cycle_heads.is_empty() {
// SAFETY: memo is present in memo_map and we have verified that it is
// still valid for the current revision.
return unsafe { Some(self.extend_memo_lifetime(old_memo)) };
}
}
// If this is a provisional memo from the same revision, await all its cycle heads because
// we need to ensure that only one thread is iterating on a cycle at a given time.
// For example, if we have a nested cycle like so:
// ```
// a -> b -> c -> b
// -> a
//
// d -> b
// ```
// thread 1 calls `a` and `a` completes the inner cycle `b -> c` but hasn't finished the outer cycle `a` yet.
// thread 2 now calls `b`. We don't want that thread 2 iterates `b` while thread 1 is iterating `a` at the same time
// because it can result in thread b overriding provisional memos that thread a has accessed already and still relies upon.
//
// By waiting, we ensure that thread 1 completes a (based on a provisional value for `b`) and `b`
// becomes the new outer cycle, which thread 2 drives to completion.
if old_memo.may_be_provisional()
&& old_memo.verified_at.load() == zalsa.current_revision()
{
// Try to claim all cycle heads of the provisional memo. If we can't because
// some head is running on another thread, drop our claim guard to give that thread
// a chance to take ownership of this query and complete it as part of its fixpoint iteration.
// We will then block on the cycle head and retry once all cycle heads completed.
if !old_memo.try_claim_heads(zalsa, zalsa_local) {
drop(claim_guard);
old_memo.block_on_heads(zalsa, zalsa_local);
return None;
}
}
}
}
let memo = self.execute(
db,
db.zalsa_local().push_query(self.database_key_index(id), 0),
zalsa_local.push_query(database_key_index, IterationCount::initial()),
opt_old_memo,
);

View file

@ -1,13 +1,14 @@
use crate::accumulator::accumulated_map::InputAccumulatedValues;
use crate::cycle::{CycleHeadKind, CycleHeads, CycleRecoveryStrategy, UnexpectedCycle};
use crate::cycle::{
CycleHeads, CycleRecoveryStrategy, IterationCount, ProvisionalStatus, UnexpectedCycle,
};
use crate::function::memo::Memo;
use crate::function::sync::ClaimResult;
use crate::function::{Configuration, IngredientImpl};
use crate::key::DatabaseKeyIndex;
use crate::plumbing::ZalsaLocal;
use crate::sync::atomic::Ordering;
use crate::zalsa::{MemoIngredientIndex, Zalsa, ZalsaDatabase};
use crate::zalsa_local::{QueryEdgeKind, QueryOriginRef};
use crate::zalsa_local::{QueryEdgeKind, QueryOriginRef, ZalsaLocal};
use crate::{AsDynDatabase as _, Id, Revision};
/// Result of memo validation.
@ -102,8 +103,11 @@ where
let database_key_index = self.database_key_index(key_index);
let _claim_guard = match self.sync_table.try_claim(zalsa, key_index) {
ClaimResult::Retry => return None,
ClaimResult::Cycle => match C::CYCLE_STRATEGY {
ClaimResult::Running(blocked_on) => {
blocked_on.block_on(zalsa);
return None;
}
ClaimResult::Cycle { .. } => match C::CYCLE_STRATEGY {
CycleRecoveryStrategy::Panic => UnexpectedCycle::throw(),
CycleRecoveryStrategy::FallbackImmediate => {
return Some(VerifyResult::unchanged());
@ -152,7 +156,9 @@ where
// `in_cycle` tracks if the enclosing query is in a cycle. `deep_verify.cycle_heads` tracks
// if **this query** encountered a cycle (which means there's some provisional value somewhere floating around).
if old_memo.value.is_some() && cycle_heads.is_empty() {
let active_query = db.zalsa_local().push_query(database_key_index, 0);
let active_query = db
.zalsa_local()
.push_query(database_key_index, IterationCount::initial());
let memo = self.execute(db, active_query, Some(old_memo));
let changed_at = memo.revisions.changed_at;
@ -241,7 +247,7 @@ where
) -> bool {
!memo.may_be_provisional()
|| self.validate_provisional(zalsa, database_key_index, memo)
|| self.validate_same_iteration(zalsa_local, database_key_index, memo)
|| self.validate_same_iteration(zalsa, zalsa_local, database_key_index, memo)
}
/// Check if this memo's cycle heads have all been finalized. If so, mark it verified final and
@ -258,18 +264,33 @@ where
memo = memo.tracing_debug()
);
for cycle_head in memo.revisions.cycle_heads() {
let kind = zalsa
// Test if our cycle heads (with the same revision) are now finalized.
let Some(kind) = zalsa
.lookup_ingredient(cycle_head.database_key_index.ingredient_index())
.cycle_head_kind(zalsa, cycle_head.database_key_index.key_index());
.provisional_status(zalsa, cycle_head.database_key_index.key_index())
else {
return false;
};
match kind {
CycleHeadKind::Provisional => return false,
CycleHeadKind::NotProvisional => {
ProvisionalStatus::Provisional { .. } => return false,
ProvisionalStatus::Final { iteration } => {
// It's important to also account for the revision for the case where:
// thread 1: `b` -> `a` (but only in the first iteration)
// -> `c` -> `b`
// thread 2: `a` -> `b`
//
// If we don't account for the revision, then `a` (from iteration 0) will be finalized
// because its cycle head `b` is now finalized, but `b` never pulled `a` in the last iteration.
if iteration != cycle_head.iteration_count {
return false;
}
// FIXME: We can ignore this, I just don't have a use-case for this.
if C::CYCLE_STRATEGY == CycleRecoveryStrategy::FallbackImmediate {
panic!("cannot mix `cycle_fn` and `cycle_result` in cycles")
}
}
CycleHeadKind::FallbackImmediate => match C::CYCLE_STRATEGY {
ProvisionalStatus::FallbackImmediate => match C::CYCLE_STRATEGY {
CycleRecoveryStrategy::Panic => {
// Queries without fallback are not considered when inside a cycle.
return false;
@ -294,6 +315,7 @@ where
/// runaway re-execution of the same queries within a fixpoint iteration.
pub(super) fn validate_same_iteration(
&self,
zalsa: &Zalsa,
zalsa_local: &ZalsaLocal,
database_key_index: DatabaseKeyIndex,
memo: &Memo<C::Output<'_>>,
@ -314,7 +336,24 @@ where
.iter()
.rev()
.find(|query| query.database_key_index == cycle_head.database_key_index)
.is_some_and(|query| query.iteration_count() == cycle_head.iteration_count)
.map(|query| query.iteration_count())
.or_else(|| {
// If this is a cycle head is owned by another thread that is blocked by this ingredient,
// check if it has the same iteration count.
let ingredient = zalsa
.lookup_ingredient(cycle_head.database_key_index.ingredient_index());
let wait_result =
ingredient.wait_for(zalsa, cycle_head.database_key_index.key_index());
if !wait_result.is_cycle_with_other_thread() {
return None;
}
let provisional_status = ingredient
.provisional_status(zalsa, cycle_head.database_key_index.key_index())?;
provisional_status.iteration()
})
== Some(cycle_head.iteration_count)
})
})
}

View file

@ -3,14 +3,17 @@ use std::fmt::{Debug, Formatter};
use std::mem::transmute;
use std::ptr::NonNull;
use crate::cycle::{empty_cycle_heads, CycleHeadKind, CycleHeads};
use crate::cycle::{empty_cycle_heads, CycleHead, CycleHeads, IterationCount, ProvisionalStatus};
use crate::function::{Configuration, IngredientImpl};
use crate::hash::FxHashSet;
use crate::ingredient::{Ingredient, WaitForResult};
use crate::key::DatabaseKeyIndex;
use crate::revision::AtomicRevision;
use crate::runtime::Running;
use crate::sync::atomic::Ordering;
use crate::table::memo::MemoTableWithTypesMut;
use crate::zalsa::{MemoIngredientIndex, Zalsa};
use crate::zalsa_local::{QueryOriginRef, QueryRevisions};
use crate::zalsa_local::{QueryOriginRef, QueryRevisions, ZalsaLocal};
use crate::{Event, EventKind, Id, Revision};
impl<C: Configuration> IngredientImpl<C> {
@ -105,6 +108,10 @@ const _: [(); std::mem::size_of::<Memo<std::num::NonZeroUsize>>()] =
impl<V> Memo<V> {
pub(super) fn new(value: Option<V>, revision_now: Revision, revisions: QueryRevisions) -> Self {
debug_assert!(
!revisions.verified_final.load(Ordering::Relaxed) || revisions.cycle_heads().is_empty(),
"Memo must be finalized if it has no cycle heads"
);
Memo {
value,
verified_at: AtomicRevision::from(revision_now),
@ -132,6 +139,7 @@ impl<V> Memo<V> {
pub(super) fn provisional_retry(
&self,
zalsa: &Zalsa,
zalsa_local: &ZalsaLocal,
database_key_index: DatabaseKeyIndex,
) -> bool {
if self.revisions.cycle_heads().is_empty() {
@ -142,62 +150,105 @@ impl<V> Memo<V> {
return false;
};
return provisional_retry_cold(zalsa, database_key_index, self.revisions.cycle_heads());
if self.block_on_heads(zalsa, zalsa_local) {
// If we get here, we are a provisional value of
// the cycle head (either initial value, or from a later iteration) and should be
// returned to caller to allow fixpoint iteration to proceed.
false
} else {
// all our cycle heads are complete; re-fetch
// and we should get a non-provisional memo.
tracing::debug!(
"Retrying provisional memo {database_key_index:?} after awaiting cycle heads."
);
true
}
}
/// Blocks on all cycle heads (recursively) that this memo depends on.
///
/// Returns `true` if awaiting all cycle heads results in a cycle. This means, they're all waiting
/// for us to make progress.
#[inline(always)]
pub(super) fn block_on_heads(&self, zalsa: &Zalsa, zalsa_local: &ZalsaLocal) -> bool {
// IMPORTANT: If you make changes to this function, make sure to run `cycle_nested_deep` with
// shuttle with at least 10k iterations.
// The most common case is that the entire cycle is running in the same thread.
// If that's the case, short circuit and return `true` immediately.
if self.all_cycles_on_stack(zalsa_local) {
return true;
}
// Otherwise, await all cycle heads, recursively.
return block_on_heads_cold(zalsa, self.cycle_heads());
#[inline(never)]
fn provisional_retry_cold(
zalsa: &Zalsa,
database_key_index: DatabaseKeyIndex,
cycle_heads: &CycleHeads,
) -> bool {
let mut retry = false;
let mut hit_cycle = false;
fn block_on_heads_cold(zalsa: &Zalsa, heads: &CycleHeads) -> bool {
let _entered = tracing::debug_span!("block_on_heads").entered();
let mut cycle_heads = TryClaimCycleHeadsIter::new(zalsa, heads);
let mut all_cycles = true;
for head in cycle_heads {
let head_index = head.database_key_index;
let ingredient = zalsa.lookup_ingredient(head_index.ingredient_index());
let cycle_head_kind = ingredient.cycle_head_kind(zalsa, head_index.key_index());
if matches!(
cycle_head_kind,
CycleHeadKind::NotProvisional | CycleHeadKind::FallbackImmediate
) {
// This cycle is already finalized, so we don't need to wait on it;
// keep looping through cycle heads.
retry = true;
tracing::trace!("Dependent cycle head {head_index:?} has been finalized.");
} else if ingredient.wait_for(zalsa, head_index.key_index()) {
tracing::trace!("Dependent cycle head {head_index:?} has been released (there's a new memo)");
// There's a new memo available for the cycle head; fetch our own
// updated memo and see if it's still provisional or if the cycle
// has resolved.
retry = true;
} else {
// We hit a cycle blocking on the cycle head; this means it's in
// our own active query stack and we are responsible to resolve the
// cycle, so go ahead and return the provisional memo.
tracing::debug!(
"Waiting for {head_index:?} results in a cycle, return {database_key_index:?} once all other cycle heads completed to allow the outer cycle to make progress."
);
hit_cycle = true;
while let Some(claim_result) = cycle_heads.next() {
match claim_result {
TryClaimHeadsResult::Cycle => {}
TryClaimHeadsResult::Finalized => {
all_cycles = false;
}
TryClaimHeadsResult::Available => {
all_cycles = false;
}
TryClaimHeadsResult::Running(running) => {
all_cycles = false;
running.block_on(&mut cycle_heads);
}
}
}
// If `retry` is `true`, all our cycle heads (barring ourself) are complete; re-fetch
// and we should get a non-provisional memo. If we get here and `retry` is still
// `false`, we have no cycle heads other than ourself, so we are a provisional value of
// the cycle head (either initial value, or from a later iteration) and should be
// returned to caller to allow fixpoint iteration to proceed. (All cases in the loop
// above other than "cycle head is self" are either terminal or set `retry`.)
if hit_cycle {
false
} else if retry {
tracing::debug!("Retrying {database_key_index:?}");
true
} else {
false
all_cycles
}
}
/// Tries to claim all cycle heads to see if they're finalized or available.
///
/// Unlike `block_on_heads`, this code does not block on any cycle head. Instead it returns `false` if
/// claiming all cycle heads failed because one of them is running on another thread.
pub(super) fn try_claim_heads(&self, zalsa: &Zalsa, zalsa_local: &ZalsaLocal) -> bool {
let _entered = tracing::debug_span!("try_claim_heads").entered();
if self.all_cycles_on_stack(zalsa_local) {
return true;
}
let cycle_heads = TryClaimCycleHeadsIter::new(zalsa, self.revisions.cycle_heads());
for claim_result in cycle_heads {
match claim_result {
TryClaimHeadsResult::Cycle
| TryClaimHeadsResult::Finalized
| TryClaimHeadsResult::Available => {}
TryClaimHeadsResult::Running(_) => {
return false;
}
}
}
true
}
fn all_cycles_on_stack(&self, zalsa_local: &ZalsaLocal) -> bool {
let cycle_heads = self.revisions.cycle_heads();
if cycle_heads.is_empty() {
return true;
}
zalsa_local.with_query_stack(|stack| {
cycle_heads.iter().all(|cycle_head| {
stack
.iter()
.rev()
.any(|query| query.database_key_index == cycle_head.database_key_index)
})
})
}
/// Cycle heads that should be propagated to dependent queries.
@ -252,7 +303,7 @@ impl<V> Memo<V> {
},
)
.field("verified_at", &self.memo.verified_at)
// .field("revisions", &self.memo.revisions)
.field("revisions", &self.memo.revisions)
.finish()
}
}
@ -266,3 +317,119 @@ impl<V: Send + Sync + Any> crate::table::memo::Memo for Memo<V> {
self.revisions.origin.as_ref()
}
}
pub(super) enum TryClaimHeadsResult<'me> {
/// Claiming every cycle head results in a cycle head.
Cycle,
/// The cycle head has been finalized.
Finalized,
/// The cycle head is not finalized, but it can be claimed.
Available,
/// The cycle head is currently executed on another thread.
Running(RunningCycleHead<'me>),
}
pub(super) struct RunningCycleHead<'me> {
inner: Running<'me>,
ingredient: &'me dyn Ingredient,
}
impl<'a> RunningCycleHead<'a> {
fn block_on(self, cycle_heads: &mut TryClaimCycleHeadsIter<'a>) {
let key_index = self.inner.database_key().key_index();
self.inner.block_on(cycle_heads.zalsa);
cycle_heads.queue_ingredient_heads(self.ingredient, key_index);
}
}
/// Iterator to try claiming the transitive cycle heads of a memo.
struct TryClaimCycleHeadsIter<'a> {
zalsa: &'a Zalsa,
queue: Vec<CycleHead>,
queued: FxHashSet<CycleHead>,
}
impl<'a> TryClaimCycleHeadsIter<'a> {
fn new(zalsa: &'a Zalsa, heads: &CycleHeads) -> Self {
let queue: Vec<_> = heads.iter().copied().collect();
let queued: FxHashSet<_> = queue.iter().copied().collect();
Self {
zalsa,
queue,
queued,
}
}
fn queue_ingredient_heads(&mut self, ingredient: &dyn Ingredient, key: Id) {
// Recursively wait for all cycle heads that this head depends on. It's important
// that we fetch those from the updated memo because the cycle heads can change
// between iterations and new cycle heads can be added if a query depeonds on
// some cycle heads depending on a specific condition being met
// (`a` calls `b` and `c` in iteration 0 but `c` and `d` in iteration 1 or later).
// IMPORTANT: It's critical that we get the cycle head from the latest memo
// here, in case the memo has become part of another cycle (we need to block on that too!).
self.queue.extend(
ingredient
.cycle_heads(self.zalsa, key)
.iter()
.copied()
.filter(|head| self.queued.insert(*head)),
)
}
}
impl<'me> Iterator for TryClaimCycleHeadsIter<'me> {
type Item = TryClaimHeadsResult<'me>;
fn next(&mut self) -> Option<Self::Item> {
let head = self.queue.pop()?;
let head_database_key = head.database_key_index;
let head_key_index = head_database_key.key_index();
let ingredient = self
.zalsa
.lookup_ingredient(head_database_key.ingredient_index());
let cycle_head_kind = ingredient
.provisional_status(self.zalsa, head_key_index)
.unwrap_or(ProvisionalStatus::Provisional {
iteration: IterationCount::initial(),
});
match cycle_head_kind {
ProvisionalStatus::Final { .. } | ProvisionalStatus::FallbackImmediate => {
// This cycle is already finalized, so we don't need to wait on it;
// keep looping through cycle heads.
tracing::trace!("Dependent cycle head {head:?} has been finalized.");
Some(TryClaimHeadsResult::Finalized)
}
ProvisionalStatus::Provisional { .. } => {
match ingredient.wait_for(self.zalsa, head_key_index) {
WaitForResult::Cycle { .. } => {
// We hit a cycle blocking on the cycle head; this means this query actively
// participates in the cycle and some other query is blocked on this thread.
tracing::debug!("Waiting for {head:?} results in a cycle");
Some(TryClaimHeadsResult::Cycle)
}
WaitForResult::Running(running) => {
tracing::debug!("Ingredient {head:?} is running: {running:?}");
Some(TryClaimHeadsResult::Running(RunningCycleHead {
inner: running,
ingredient,
}))
}
WaitForResult::Available => {
self.queue_ingredient_heads(ingredient, head_key_index);
Some(TryClaimHeadsResult::Available)
}
}
}
}
}
}

View file

@ -1,12 +1,14 @@
use rustc_hash::FxHashMap;
use crate::key::DatabaseKeyIndex;
use crate::runtime::{BlockResult, WaitResult};
use crate::runtime::{BlockResult, Running, WaitResult};
use crate::sync::thread::{self, ThreadId};
use crate::sync::Mutex;
use crate::zalsa::Zalsa;
use crate::{Id, IngredientIndex};
pub(crate) type SyncGuard<'me> = crate::sync::MutexGuard<'me, FxHashMap<Id, SyncState>>;
/// Tracks the keys that are currently being processed; used to coordinate between
/// worker threads.
pub(crate) struct SyncTable {
@ -15,12 +17,15 @@ pub(crate) struct SyncTable {
}
pub(crate) enum ClaimResult<'a> {
Retry,
Cycle,
/// Can't claim the query because it is running on an other thread.
Running(Running<'a>),
/// Claiming the query results in a cycle.
Cycle { same_thread: bool },
/// Successfully claimed the query.
Claimed(ClaimGuard<'a>),
}
struct SyncState {
pub(crate) struct SyncState {
id: ThreadId,
/// Set to true if any other queries are blocked,
@ -51,14 +56,13 @@ impl SyncTable {
// boolean is to decide *whether* to acquire the lock,
// not to gate future atomic reads.
*anyone_waiting = true;
match zalsa.runtime().block_on(
zalsa,
match zalsa.runtime().block(
DatabaseKeyIndex::new(self.ingredient, key_index),
id,
write,
) {
BlockResult::Completed => ClaimResult::Retry,
BlockResult::Cycle => ClaimResult::Cycle,
BlockResult::Running(blocked_on) => ClaimResult::Running(blocked_on),
BlockResult::Cycle { same_thread } => ClaimResult::Cycle { same_thread },
}
}
std::collections::hash_map::Entry::Vacant(vacant_entry) => {
@ -70,7 +74,6 @@ impl SyncTable {
key_index,
zalsa,
sync_table: self,
_padding: false,
})
}
}
@ -84,9 +87,6 @@ pub(crate) struct ClaimGuard<'me> {
key_index: Id,
zalsa: &'me Zalsa,
sync_table: &'me SyncTable,
// Reduce the size of ClaimResult by making more niches available in ClaimGuard; this fits into
// the padding of ClaimGuard so doesn't increase its size.
_padding: bool,
}
impl ClaimGuard<'_> {

View file

@ -2,9 +2,12 @@ use std::any::{Any, TypeId};
use std::fmt;
use crate::accumulator::accumulated_map::{AccumulatedMap, InputAccumulatedValues};
use crate::cycle::{CycleHeadKind, CycleHeads, CycleRecoveryStrategy};
use crate::cycle::{
empty_cycle_heads, CycleHeads, CycleRecoveryStrategy, IterationCount, ProvisionalStatus,
};
use crate::function::VerifyResult;
use crate::plumbing::IngredientIndices;
use crate::runtime::Running;
use crate::sync::Arc;
use crate::table::memo::MemoTableTypes;
use crate::table::Table;
@ -66,23 +69,36 @@ pub trait Ingredient: Any + std::fmt::Debug + Send + Sync {
cycle_heads: &mut CycleHeads,
) -> VerifyResult;
/// Is the value for `input` in this ingredient a cycle head that is still provisional?
/// Returns information about the current provisional status of `input`.
///
/// In the case of nested cycles, we are not asking here whether the value is provisional due
/// to the outer cycle being unresolved, only whether its own cycle remains provisional.
fn cycle_head_kind(&self, zalsa: &Zalsa, input: Id) -> CycleHeadKind {
/// Is it a provisional value or has it been finalized and in which iteration.
///
/// Returns `None` if `input` doesn't exist.
fn provisional_status(&self, zalsa: &Zalsa, input: Id) -> Option<ProvisionalStatus> {
_ = (zalsa, input);
CycleHeadKind::NotProvisional
Some(ProvisionalStatus::Final {
iteration: IterationCount::initial(),
})
}
/// Returns the cycle heads for this ingredient.
fn cycle_heads<'db>(&self, zalsa: &'db Zalsa, input: Id) -> &'db CycleHeads {
_ = (zalsa, input);
empty_cycle_heads()
}
/// Invoked when the current thread needs to wait for a result for the given `key_index`.
/// This call doesn't block the current thread. Instead, it's up to the caller to block
/// in case `key_index` is [running](`WaitForResult::Running`) on another thread.
///
/// A return value of `true` indicates that a result is now available. A return value of
/// `false` means that a cycle was encountered; the waited-on query is either already claimed
/// A return value of [`WaitForResult::Available`] indicates that a result is now available.
/// A return value of [`WaitForResult::Running`] indicates that `key_index` is currently running
/// on an other thread, it's up to caller to block until the result becomes available if desired.
/// A return value of [`WaitForResult::Cycle`] means that a cycle was encountered; the waited-on query is either already claimed
/// by the current thread, or by a thread waiting on the current thread.
fn wait_for(&self, zalsa: &Zalsa, key_index: Id) -> bool {
fn wait_for<'me>(&'me self, zalsa: &'me Zalsa, key_index: Id) -> WaitForResult<'me> {
_ = (zalsa, key_index);
true
WaitForResult::Available
}
/// Invoked when the value `output_key` should be marked as valid in the current revision.
@ -203,3 +219,16 @@ impl dyn Ingredient {
pub(crate) fn fmt_index(debug_name: &str, id: Id, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(fmt, "{debug_name}({id:?})")
}
pub enum WaitForResult<'me> {
Running(Running<'me>),
Available,
Cycle { same_thread: bool },
}
impl WaitForResult<'_> {
/// Returns `true` if waiting for this input results in a cycle with another thread.
pub const fn is_cycle_with_other_thread(&self) -> bool {
matches!(self, WaitForResult::Cycle { same_thread: false })
}
}

View file

@ -1,10 +1,12 @@
use self::dependency_graph::DependencyGraph;
use crate::durability::Durability;
use crate::function::SyncGuard;
use crate::key::DatabaseKeyIndex;
use crate::sync::atomic::{AtomicBool, Ordering};
use crate::sync::thread::{self, ThreadId};
use crate::sync::Mutex;
use crate::table::Table;
use crate::zalsa::Zalsa;
use crate::{Cancelled, Event, EventKind, Revision};
mod dependency_graph;
@ -34,16 +36,83 @@ pub struct Runtime {
table: Table,
}
#[derive(Clone, Debug)]
pub(crate) enum WaitResult {
#[derive(Copy, Clone, Debug)]
pub(super) enum WaitResult {
Completed,
Panicked,
}
#[derive(Clone, Debug)]
pub(crate) enum BlockResult {
Completed,
Cycle,
#[derive(Debug)]
pub(crate) enum BlockResult<'me> {
/// The query is running on another thread.
Running(Running<'me>),
/// Blocking resulted in a cycle.
///
/// The lock is hold by the current thread or there's another thread that is waiting on the current thread,
/// and blocking this thread on the other thread would result in a deadlock/cycle.
Cycle { same_thread: bool },
}
pub struct Running<'me>(Box<BlockedOnInner<'me>>);
struct BlockedOnInner<'me> {
dg: crate::sync::MutexGuard<'me, DependencyGraph>,
query_mutex_guard: SyncGuard<'me>,
database_key: DatabaseKeyIndex,
other_id: ThreadId,
thread_id: ThreadId,
}
impl Running<'_> {
pub(crate) fn database_key(&self) -> DatabaseKeyIndex {
self.0.database_key
}
/// Blocks on the other thread to complete the computation.
pub(crate) fn block_on(self, zalsa: &Zalsa) {
let BlockedOnInner {
dg,
query_mutex_guard,
database_key,
other_id,
thread_id,
} = *self.0;
zalsa.event(&|| {
Event::new(EventKind::WillBlockOn {
other_thread_id: other_id,
database_key,
})
});
tracing::debug!(
"block_on: thread {thread_id:?} is blocking on {database_key:?} in thread {other_id:?}",
);
let result =
DependencyGraph::block_on(dg, thread_id, database_key, other_id, query_mutex_guard);
match result {
WaitResult::Panicked => {
// If the other thread panicked, then we consider this thread
// cancelled. The assumption is that the panic will be detected
// by the other thread and responded to appropriately.
Cancelled::PropagatedPanic.throw()
}
WaitResult::Completed => {}
}
}
}
impl std::fmt::Debug for Running<'_> {
fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
fmt.debug_struct("Running")
.field("database_key", &self.0.database_key)
.field("other_id", &self.0.other_id)
.field("thread_id", &self.0.thread_id)
.finish()
}
}
#[derive(Copy, Clone, Debug)]
@ -165,43 +234,32 @@ impl Runtime {
///
/// If the thread `other_id` panics, then our thread is considered
/// cancelled, so this function will panic with a `Cancelled` value.
pub(crate) fn block_on<QueryMutexGuard>(
&self,
zalsa: &crate::zalsa::Zalsa,
pub(crate) fn block<'a>(
&'a self,
database_key: DatabaseKeyIndex,
other_id: ThreadId,
query_mutex_guard: QueryMutexGuard,
) -> BlockResult {
query_mutex_guard: SyncGuard<'a>,
) -> BlockResult<'a> {
let thread_id = thread::current().id();
// Cycle in the same thread.
if thread_id == other_id {
return BlockResult::Cycle;
return BlockResult::Cycle { same_thread: true };
}
let dg = self.dependency_graph.lock();
if dg.depends_on(other_id, thread_id) {
return BlockResult::Cycle;
tracing::debug!("block_on: cycle detected for {database_key:?} in thread {thread_id:?} on {other_id:?}");
return BlockResult::Cycle { same_thread: false };
}
zalsa.event(&|| {
Event::new(EventKind::WillBlockOn {
other_thread_id: other_id,
database_key,
})
});
let result =
DependencyGraph::block_on(dg, thread_id, database_key, other_id, query_mutex_guard);
match result {
WaitResult::Completed => BlockResult::Completed,
// If the other thread panicked, then we consider this thread
// cancelled. The assumption is that the panic will be detected
// by the other thread and responded to appropriately.
WaitResult::Panicked => Cancelled::PropagatedPanic.throw(),
}
BlockResult::Running(Running(Box::new(BlockedOnInner {
dg,
query_mutex_guard,
database_key,
other_id,
thread_id,
})))
}
/// Invoked when this runtime completed computing `database_key` with

View file

@ -123,7 +123,7 @@ impl DependencyGraph {
.unwrap_or_default();
for from_id in dependents {
self.unblock_runtime(from_id, wait_result.clone());
self.unblock_runtime(from_id, wait_result);
}
}

View file

@ -7,7 +7,7 @@ use tracing::debug;
use crate::accumulator::accumulated_map::{AccumulatedMap, AtomicInputAccumulatedValues};
use crate::active_query::QueryStack;
use crate::cycle::{empty_cycle_heads, CycleHeads};
use crate::cycle::{empty_cycle_heads, CycleHeads, IterationCount};
use crate::durability::Durability;
use crate::key::DatabaseKeyIndex;
use crate::runtime::Stamp;
@ -99,7 +99,7 @@ impl ZalsaLocal {
pub(crate) fn push_query(
&self,
database_key_index: DatabaseKeyIndex,
iteration_count: u32,
iteration_count: IterationCount,
) -> ActiveQueryGuard<'_> {
let mut query_stack = self.query_stack.borrow_mut();
query_stack.push_new_query(database_key_index, iteration_count);
@ -359,17 +359,22 @@ impl QueryRevisionsExtra {
accumulated: AccumulatedMap,
tracked_struct_ids: IdentityMap,
cycle_heads: CycleHeads,
iteration: IterationCount,
) -> Self {
let inner =
if tracked_struct_ids.is_empty() && cycle_heads.is_empty() && accumulated.is_empty() {
None
} else {
Some(Box::new(QueryRevisionsExtraInner {
accumulated,
cycle_heads,
tracked_struct_ids,
}))
};
let inner = if tracked_struct_ids.is_empty()
&& cycle_heads.is_empty()
&& accumulated.is_empty()
&& iteration.is_initial()
{
None
} else {
Some(Box::new(QueryRevisionsExtraInner {
accumulated,
cycle_heads,
tracked_struct_ids,
iteration,
}))
};
Self(inner)
}
@ -407,6 +412,8 @@ struct QueryRevisionsExtraInner {
/// after each iteration, whether the cycle has converged or must
/// iterate again.
cycle_heads: CycleHeads,
iteration: IterationCount,
}
#[cfg(not(feature = "shuttle"))]
@ -416,7 +423,7 @@ const _: [(); std::mem::size_of::<QueryRevisions>()] = [(); std::mem::size_of::<
#[cfg(not(feature = "shuttle"))]
#[cfg(target_pointer_width = "64")]
const _: [(); std::mem::size_of::<QueryRevisionsExtraInner>()] =
[(); std::mem::size_of::<[usize; 9]>()];
[(); std::mem::size_of::<[usize; 10]>()];
impl QueryRevisions {
pub(crate) fn fixpoint_initial(query: DatabaseKeyIndex) -> Self {
@ -430,6 +437,7 @@ impl QueryRevisions {
AccumulatedMap::default(),
IdentityMap::default(),
CycleHeads::initial(query),
IterationCount::initial(),
),
}
}
@ -469,11 +477,26 @@ impl QueryRevisions {
AccumulatedMap::default(),
IdentityMap::default(),
cycle_heads,
IterationCount::default(),
);
}
};
}
pub(crate) const fn iteration(&self) -> IterationCount {
match &self.extra.0 {
Some(extra) => extra.iteration,
None => IterationCount::initial(),
}
}
/// Updates the iteration count if this query has any cycle heads. Otherwise it's a no-op.
pub(crate) fn update_iteration_count(&mut self, iteration_count: IterationCount) {
if let Some(extra) = &mut self.extra.0 {
extra.iteration = iteration_count
}
}
/// Returns a reference to the `IdentityMap` for this query, or `None` if the map is empty.
pub fn tracked_struct_ids(&self) -> Option<&IdentityMap> {
self.extra

View file

@ -106,7 +106,7 @@ fn backtrace_works() {
at tests/backtrace.rs:30
1: query_cycle(Id(2))
at tests/backtrace.rs:43
cycle heads: query_cycle(Id(2)) -> 0
cycle heads: query_cycle(Id(2)) -> IterationCount(0)
2: query_f(Id(2))
at tests/backtrace.rs:38
"#]]
@ -117,9 +117,9 @@ fn backtrace_works() {
query stacktrace:
0: query_e(Id(3)) -> (R1, Durability::LOW)
at tests/backtrace.rs:30
1: query_cycle(Id(3)) -> (R1, Durability::HIGH, iteration = 0)
1: query_cycle(Id(3)) -> (R1, Durability::HIGH, iteration = IterationCount(0))
at tests/backtrace.rs:43
cycle heads: query_cycle(Id(3)) -> 0
cycle heads: query_cycle(Id(3)) -> IterationCount(0)
2: query_f(Id(3)) -> (R1, Durability::HIGH)
at tests/backtrace.rs:38
"#]]

View file

@ -1017,7 +1017,7 @@ fn repeat_provisional_query() {
"salsa_event(WillExecute { database_key: min_iterate(Id(0)) })",
"salsa_event(WillExecute { database_key: min_panic(Id(1)) })",
"salsa_event(WillExecute { database_key: min_panic(Id(2)) })",
"salsa_event(WillIterateCycle { database_key: min_iterate(Id(0)), iteration_count: 1, fell_back: false })",
"salsa_event(WillIterateCycle { database_key: min_iterate(Id(0)), iteration_count: IterationCount(1), fell_back: false })",
"salsa_event(WillExecute { database_key: min_panic(Id(1)) })",
"salsa_event(WillExecute { database_key: min_panic(Id(2)) })",
]"#]]);

View file

@ -194,13 +194,13 @@ fn revalidate_with_change_after_output_read() {
"salsa_event(WillDiscardStaleOutput { execute_key: query_a(Id(0)), output_key: Output(Id(402)) })",
"salsa_event(DidDiscard { key: Output(Id(402)) })",
"salsa_event(DidDiscard { key: read_value(Id(402)) })",
"salsa_event(WillIterateCycle { database_key: query_b(Id(0)), iteration_count: 1, fell_back: false })",
"salsa_event(WillIterateCycle { database_key: query_b(Id(0)), iteration_count: IterationCount(1), fell_back: false })",
"salsa_event(WillExecute { database_key: query_a(Id(0)) })",
"salsa_event(WillExecute { database_key: read_value(Id(403g1)) })",
"salsa_event(WillIterateCycle { database_key: query_b(Id(0)), iteration_count: 2, fell_back: false })",
"salsa_event(WillIterateCycle { database_key: query_b(Id(0)), iteration_count: IterationCount(2), fell_back: false })",
"salsa_event(WillExecute { database_key: query_a(Id(0)) })",
"salsa_event(WillExecute { database_key: read_value(Id(401g1)) })",
"salsa_event(WillIterateCycle { database_key: query_b(Id(0)), iteration_count: 3, fell_back: false })",
"salsa_event(WillIterateCycle { database_key: query_b(Id(0)), iteration_count: IterationCount(3), fell_back: false })",
"salsa_event(WillExecute { database_key: query_a(Id(0)) })",
"salsa_event(WillExecute { database_key: read_value(Id(402g1)) })",
]"#]]);

View file

@ -163,7 +163,7 @@ fn main() {
"WillExecute { database_key: cost_to_start(Id(401)) }",
"WillCheckCancellation",
"WillCheckCancellation",
"WillIterateCycle { database_key: cost_to_start(Id(403)), iteration_count: 1, fell_back: false }",
"WillIterateCycle { database_key: cost_to_start(Id(403)), iteration_count: IterationCount(1), fell_back: false }",
"WillCheckCancellation",
"WillCheckCancellation",
"WillCheckCancellation",

View file

@ -115,7 +115,7 @@ fn main() {
"WillExecute { database_key: infer_type_param(Id(400)) }",
"WillCheckCancellation",
"DidInternValue { key: Class(Id(c00)), revision: R2 }",
"WillIterateCycle { database_key: infer_class(Id(0)), iteration_count: 1, fell_back: false }",
"WillIterateCycle { database_key: infer_class(Id(0)), iteration_count: IterationCount(1), fell_back: false }",
"WillCheckCancellation",
"WillExecute { database_key: infer_type_param(Id(400)) }",
"WillCheckCancellation",

View file

@ -62,11 +62,18 @@ fn initial(_db: &dyn KnobsDatabase) -> CycleValue {
#[test_log::test]
fn the_test() {
crate::sync::check(|| {
tracing::debug!("New run");
let db_t1 = Knobs::default();
let db_t2 = db_t1.clone();
let t1 = thread::spawn(move || query_a(&db_t1));
let t2 = thread::spawn(move || query_b(&db_t2));
let t1 = thread::spawn(move || {
let _span = tracing::debug_span!("t1", thread_id = ?thread::current().id()).entered();
query_a(&db_t1)
});
let t2 = thread::spawn(move || {
let _span = tracing::debug_span!("t2", thread_id = ?thread::current().id()).entered();
query_b(&db_t2)
});
let (r_t1, r_t2) = (t1.join().unwrap(), t2.join().unwrap());

View file

@ -11,8 +11,7 @@
//! | |
//! +--------------------+
//! ```
use crate::sync::thread;
use crate::{Knobs, KnobsDatabase};
use crate::KnobsDatabase;
const FALLBACK_A: u32 = 0b01;
const FALLBACK_B: u32 = 0b10;
@ -51,8 +50,10 @@ fn cycle_result_b(_db: &dyn KnobsDatabase) -> u32 {
}
#[test_log::test]
#[cfg(not(feature = "shuttle"))] // This test is currently failing.
fn the_test() {
use crate::sync::thread;
use crate::Knobs;
crate::sync::check(|| {
let db_t1 = Knobs::default();
let db_t2 = db_t1.clone();

View file

@ -60,7 +60,7 @@ fn query_c(db: &dyn KnobsDatabase) -> CycleValue {
query_b(db)
}
#[test]
#[test_log::test]
fn the_test() {
crate::sync::check(|| {
let db_t1 = Knobs::default();
@ -68,8 +68,15 @@ fn the_test() {
let db_t2 = db_t1.clone();
db_t2.signal_on_will_block(2);
let t1 = thread::spawn(move || query_a(&db_t1));
let t2 = thread::spawn(move || query_c(&db_t2));
let t1 = thread::spawn(move || {
let _span = tracing::debug_span!("t1", thread_id = ?thread::current().id()).entered();
query_a(&db_t1)
});
let t2 = thread::spawn(move || {
let _span = tracing::debug_span!("t2", thread_id = ?thread::current().id()).entered();
query_c(&db_t2)
});
let (r_t1, r_t2) = (t1.join().unwrap(), t2.join().unwrap());

View file

@ -0,0 +1,100 @@
//! Test a deeply nested-cycle scenario across multiple threads.
//!
//! The trick is that different threads call into the same cycle from different entry queries.
//!
//! * Thread 1: `a` -> b -> c (which calls back into d, e, b, a)
//! * Thread 2: `b`
//! * Thread 3: `d` -> `c`
//! * Thread 4: `e` -> `c`
use crate::sync::thread;
use crate::{Knobs, KnobsDatabase};
use salsa::CycleRecoveryAction;
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Copy, salsa::Update)]
struct CycleValue(u32);
const MIN: CycleValue = CycleValue(0);
const MAX: CycleValue = CycleValue(3);
#[salsa::tracked(cycle_fn=cycle_fn, cycle_initial=initial)]
fn query_a(db: &dyn KnobsDatabase) -> CycleValue {
query_b(db)
}
#[salsa::tracked(cycle_fn=cycle_fn, cycle_initial=initial)]
fn query_b(db: &dyn KnobsDatabase) -> CycleValue {
let c_value = query_c(db);
CycleValue(c_value.0 + 1).min(MAX)
}
#[salsa::tracked(cycle_fn=cycle_fn, cycle_initial=initial)]
fn query_c(db: &dyn KnobsDatabase) -> CycleValue {
let d_value = query_d(db);
let e_value = query_e(db);
let b_value = query_b(db);
let a_value = query_a(db);
CycleValue(d_value.0.max(e_value.0).max(b_value.0).max(a_value.0))
}
#[salsa::tracked(cycle_fn=cycle_fn, cycle_initial=initial)]
fn query_d(db: &dyn KnobsDatabase) -> CycleValue {
query_c(db)
}
#[salsa::tracked(cycle_fn=cycle_fn, cycle_initial=initial)]
fn query_e(db: &dyn KnobsDatabase) -> CycleValue {
query_c(db)
}
fn cycle_fn(
_db: &dyn KnobsDatabase,
_value: &CycleValue,
_count: u32,
) -> CycleRecoveryAction<CycleValue> {
CycleRecoveryAction::Iterate
}
fn initial(_db: &dyn KnobsDatabase) -> CycleValue {
MIN
}
#[test_log::test]
fn the_test() {
crate::sync::check(|| {
let db_t1 = Knobs::default();
let db_t2 = db_t1.clone();
let db_t3 = db_t1.clone();
let db_t4 = db_t1.clone();
let t1 = thread::spawn(move || {
let _span = tracing::debug_span!("t1", thread_id = ?thread::current().id()).entered();
let result = query_a(&db_t1);
db_t1.signal(1);
result
});
let t2 = thread::spawn(move || {
let _span = tracing::debug_span!("t4", thread_id = ?thread::current().id()).entered();
db_t4.wait_for(1);
query_b(&db_t4)
});
let t3 = thread::spawn(move || {
let _span = tracing::debug_span!("t2", thread_id = ?thread::current().id()).entered();
db_t2.wait_for(1);
query_d(&db_t2)
});
let t4 = thread::spawn(move || {
let _span = tracing::debug_span!("t3", thread_id = ?thread::current().id()).entered();
db_t3.wait_for(1);
query_e(&db_t3)
});
let r_t1 = t1.join().unwrap();
let r_t2 = t2.join().unwrap();
let r_t3 = t3.join().unwrap();
let r_t4 = t4.join().unwrap();
assert_eq!((r_t1, r_t2, r_t3, r_t4), (MAX, MAX, MAX, MAX));
});
}

View file

@ -0,0 +1,110 @@
//! Test a deeply nested-cycle scenario where cycles have changing query dependencies.
//!
//! The trick is that different threads call into the same cycle from different entry queries and
//! the cycle heads change over different iterations
//!
//! * Thread 1: `a` -> b -> c
//! * Thread 2: `b`
//! * Thread 3: `d` -> `c`
//! * Thread 4: `e` -> `c`
//!
//! `c` calls:
//! * `d` and `a` in the first few iterations
//! * `d`, `b` and `e` in the last iterations
use crate::sync::thread;
use crate::{Knobs, KnobsDatabase};
use salsa::CycleRecoveryAction;
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Copy, salsa::Update)]
struct CycleValue(u32);
const MIN: CycleValue = CycleValue(0);
const MAX: CycleValue = CycleValue(3);
#[salsa::tracked(cycle_fn=cycle_fn, cycle_initial=initial)]
fn query_a(db: &dyn KnobsDatabase) -> CycleValue {
query_b(db)
}
#[salsa::tracked(cycle_fn=cycle_fn, cycle_initial=initial)]
fn query_b(db: &dyn KnobsDatabase) -> CycleValue {
let c_value = query_c(db);
CycleValue(c_value.0 + 1).min(MAX)
}
#[salsa::tracked(cycle_fn=cycle_fn, cycle_initial=initial)]
fn query_c(db: &dyn KnobsDatabase) -> CycleValue {
let d_value = query_d(db);
if d_value > CycleValue(0) {
let e_value = query_e(db);
let b_value = query_b(db);
CycleValue(d_value.0.max(e_value.0).max(b_value.0))
} else {
let a_value = query_a(db);
CycleValue(d_value.0.max(a_value.0))
}
}
#[salsa::tracked(cycle_fn=cycle_fn, cycle_initial=initial)]
fn query_d(db: &dyn KnobsDatabase) -> CycleValue {
query_c(db)
}
#[salsa::tracked(cycle_fn=cycle_fn, cycle_initial=initial)]
fn query_e(db: &dyn KnobsDatabase) -> CycleValue {
query_c(db)
}
fn cycle_fn(
_db: &dyn KnobsDatabase,
_value: &CycleValue,
_count: u32,
) -> CycleRecoveryAction<CycleValue> {
CycleRecoveryAction::Iterate
}
fn initial(_db: &dyn KnobsDatabase) -> CycleValue {
MIN
}
#[test_log::test]
fn the_test() {
crate::sync::check(|| {
tracing::debug!("New run");
let db_t1 = Knobs::default();
let db_t2 = db_t1.clone();
let db_t3 = db_t1.clone();
let db_t4 = db_t1.clone();
let t1 = thread::spawn(move || {
let _span = tracing::debug_span!("t1", thread_id = ?thread::current().id()).entered();
let result = query_a(&db_t1);
db_t1.signal(1);
result
});
let t2 = thread::spawn(move || {
let _span = tracing::debug_span!("t4", thread_id = ?thread::current().id()).entered();
db_t4.wait_for(1);
query_b(&db_t4)
});
let t3 = thread::spawn(move || {
let _span = tracing::debug_span!("t2", thread_id = ?thread::current().id()).entered();
db_t2.wait_for(1);
query_d(&db_t2)
});
let t4 = thread::spawn(move || {
let _span = tracing::debug_span!("t3", thread_id = ?thread::current().id()).entered();
db_t3.wait_for(1);
query_e(&db_t3)
});
let r_t1 = t1.join().unwrap();
let r_t2 = t2.join().unwrap();
let r_t3 = t3.join().unwrap();
let r_t4 = t4.join().unwrap();
assert_eq!((r_t1, r_t2, r_t3, r_t4), (MAX, MAX, MAX, MAX));
});
}

View file

@ -4,6 +4,8 @@ mod signal;
mod cycle_a_t1_b_t2;
mod cycle_a_t1_b_t2_fallback;
mod cycle_ab_peeping_c;
mod cycle_nested_deep;
mod cycle_nested_deep_conditional;
mod cycle_nested_three_threads;
mod cycle_panic;
mod cycle_provisional_depending_on_itself;