Replace loom with shuttle (#876)

* replace loom with shuttle

* inline `empty_cycle_heads`

* ignore failing shuttle test
This commit is contained in:
Ibraheem Ahmed 2025-05-23 11:28:51 -04:00 committed by GitHub
parent f7b08562ed
commit 0414d89327
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
58 changed files with 526 additions and 604 deletions

View file

@ -52,15 +52,13 @@ jobs:
- name: Format
run: cargo fmt -- --check
- name: Clippy
run: cargo clippy --workspace --all-features --all-targets -- -D warnings
run: cargo clippy --workspace --all-targets -- -D warnings
- name: Test
run: cargo nextest run --workspace --all-features --all-targets --no-fail-fast
run: cargo nextest run --workspace --all-targets --no-fail-fast
- name: Test docs
run: cargo test --workspace --all-features --doc
run: cargo test --workspace --doc
- name: Check (without default features)
run: cargo check --workspace --no-default-features
- name: Check (loom)
run: RUSTFLAGS="--cfg loom" cargo check --workspace --features loom
miri:
name: Miri
@ -88,12 +86,39 @@ jobs:
- name: Setup Miri
run: cargo miri setup
- name: Test with Miri
run: cargo miri nextest run --all-features --no-fail-fast --tests
run: cargo miri nextest run --no-fail-fast --tests
env:
MIRIFLAGS: -Zmiri-disable-isolation -Zmiri-retag-fields
- name: Run examples with Miri
run: cargo miri run --example calc
shuttle:
name: Shuttle
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup Rust toolchain
uses: dtolnay/rust-toolchain@master
id: rust-toolchain
with:
toolchain: stable
- uses: taiki-e/install-action@nextest
- uses: actions/cache@v4
with:
path: |
~/.cargo/bin/
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
target/
key: ${{ runner.os }}-cargo-${{ steps.rust-toolchain.outputs.cachekey }}-${{ hashFiles('**/Cargo.toml') }}
restore-keys: |
${{ runner.os }}-cargo-${{ steps.rust-toolchain.outputs.cachekey }}-
${{ runner.os }}-cargo-
- name: Test with Shuttle
run: cargo nextest run --features shuttle --test parallel
benchmarks:
# https://github.com/CodSpeedHQ/action/issues/126
if: github.event_name != 'merge_group'

View file

@ -15,6 +15,8 @@ salsa-macros = { version = "0.22.0", path = "components/salsa-macros", optional
boxcar = { version = "0.2.12" }
crossbeam-queue = "0.3.11"
dashmap = { version = "6", features = ["raw-api"] }
# the version of hashbrown used by dashmap
hashbrown_14 = { version = "0.14", package = "hashbrown" }
hashbrown = "0.15"
hashlink = "0.10"
indexmap = "2"
@ -30,13 +32,14 @@ rayon = { version = "1.10.0", optional = true }
# Stuff we want Update impls for by default
compact_str = { version = "0.9", optional = true }
thin-vec = "0.2.13"
loom = { version = "0.7.2", optional = true }
shuttle = { version = "0.8.0", optional = true }
[features]
default = ["salsa_unstable", "rayon", "macros"]
shuttle = ["dep:shuttle"]
# FIXME: remove `salsa_unstable` before 1.0.
salsa_unstable = []
loom = ["dep:loom", "boxcar/loom"]
macros = ["dep:salsa-macros"]
# This interlocks the `salsa-macros` and `salsa` versions together
@ -67,9 +70,6 @@ half = "=2.4.1"
[target.'cfg(all(not(target_os = "windows"), not(target_os = "openbsd"), any(target_arch = "x86_64", target_arch = "aarch64", target_arch = "powerpc64")))'.dev-dependencies]
tikv-jemallocator = "0.6.0"
[lints.rust]
unexpected_cfgs = { level = "warn", check-cfg = ['cfg(loom)'] }
[[bench]]
name = "compare"
harness = false

View file

@ -21,13 +21,9 @@ macro_rules! setup_accumulator_impl {
use salsa::plumbing as $zalsa;
use salsa::plumbing::accumulator as $zalsa_struct;
// Suppress the lint against `cfg(loom)`.
#[allow(unexpected_cfgs)]
fn $ingredient(zalsa: &$zalsa::Zalsa) -> &$zalsa_struct::IngredientImpl<$Struct> {
$zalsa::__maybe_lazy_static! {
static $CACHE: $zalsa::IngredientCache<$zalsa_struct::IngredientImpl<$Struct>> =
$zalsa::IngredientCache::new();
}
static $CACHE: $zalsa::IngredientCache<$zalsa_struct::IngredientImpl<$Struct>> =
$zalsa::IngredientCache::new();
$CACHE.get_or_create(zalsa, || {
zalsa.add_or_lookup_jar_by_type::<$zalsa_struct::JarImpl<$Struct>>()

View file

@ -95,13 +95,9 @@ macro_rules! setup_input_struct {
Self::ingredient_(db.zalsa())
}
// Suppress the lint against `cfg(loom)`.
#[allow(unexpected_cfgs)]
fn ingredient_(zalsa: &$zalsa::Zalsa) -> &$zalsa_struct::IngredientImpl<Self> {
zalsa_::__maybe_lazy_static! {
static CACHE: $zalsa::IngredientCache<$zalsa_struct::IngredientImpl<$Configuration>> =
$zalsa::IngredientCache::new();
}
static CACHE: $zalsa::IngredientCache<$zalsa_struct::IngredientImpl<$Configuration>> =
$zalsa::IngredientCache::new();
CACHE.get_or_create(zalsa, || {
zalsa.add_or_lookup_jar_by_type::<$zalsa_struct::JarImpl<$Configuration>>()

View file

@ -131,16 +131,12 @@ macro_rules! setup_interned_struct {
}
impl $Configuration {
// Suppress the lint against `cfg(loom)`.
#[allow(unexpected_cfgs)]
pub fn ingredient<Db>(db: &Db) -> &$zalsa_struct::IngredientImpl<Self>
where
Db: ?Sized + $zalsa::Database,
{
$zalsa::__maybe_lazy_static! {
static CACHE: $zalsa::IngredientCache<$zalsa_struct::IngredientImpl<$Configuration>> =
$zalsa::IngredientCache::new();
}
static CACHE: $zalsa::IngredientCache<$zalsa_struct::IngredientImpl<$Configuration>> =
$zalsa::IngredientCache::new();
let zalsa = db.zalsa();
CACHE.get_or_create(zalsa, || {

View file

@ -74,8 +74,6 @@ macro_rules! setup_tracked_fn {
) => {
// Suppress this clippy lint because we sometimes require `'db` where the ordinary Rust rules would not.
#[allow(clippy::needless_lifetimes)]
// Suppress the lint against `cfg(loom)`.
#[allow(unexpected_cfgs)]
$(#[$attr])*
$vis fn $fn_name<$db_lt>(
$db: &$db_lt dyn $Db,
@ -85,10 +83,8 @@ macro_rules! setup_tracked_fn {
struct $Configuration;
$zalsa::__maybe_lazy_static! {
static $FN_CACHE: $zalsa::IngredientCache<$zalsa::function::IngredientImpl<$Configuration>> =
$zalsa::IngredientCache::new();
}
static $FN_CACHE: $zalsa::IngredientCache<$zalsa::function::IngredientImpl<$Configuration>> =
$zalsa::IngredientCache::new();
$zalsa::macro_if! {
if $needs_interner {
@ -98,10 +94,8 @@ macro_rules! setup_tracked_fn {
std::marker::PhantomData<&$db_lt $zalsa::interned::Value<$Configuration>>,
);
$zalsa::__maybe_lazy_static! {
static $INTERN_CACHE: $zalsa::IngredientCache<$zalsa::interned::IngredientImpl<$Configuration>> =
$zalsa::IngredientCache::new();
}
static $INTERN_CACHE: $zalsa::IngredientCache<$zalsa::interned::IngredientImpl<$Configuration>> =
$zalsa::IngredientCache::new();
impl $zalsa::SalsaStructInDb for $InternedData<'_> {
type MemoIngredientMap = $zalsa::MemoIngredientSingletonIndex;

View file

@ -173,13 +173,9 @@ macro_rules! setup_tracked_struct {
Self::ingredient_(db.zalsa())
}
// Suppress the lint against `cfg(loom)`.
#[allow(unexpected_cfgs)]
fn ingredient_(zalsa: &$zalsa::Zalsa) -> &$zalsa_struct::IngredientImpl<Self> {
$zalsa::__maybe_lazy_static! {
static CACHE: $zalsa::IngredientCache<$zalsa_struct::IngredientImpl<$Configuration>> =
$zalsa::IngredientCache::new();
}
static CACHE: $zalsa::IngredientCache<$zalsa_struct::IngredientImpl<$Configuration>> =
$zalsa::IngredientCache::new();
CACHE.get_or_create(zalsa, || {
zalsa.add_or_lookup_jar_by_type::<$zalsa_struct::JarImpl<$Configuration>>()

View file

@ -1,10 +1,10 @@
test:
cargo test --workspace --all-features --all-targets --no-fail-fast
cargo test --workspace --all-targets --no-fail-fast
miri:
cargo +nightly miri test --no-fail-fast --all-features
cargo +nightly miri test --no-fail-fast
loom:
RUSTFLAGS="--cfg loom" cargo check --workspace --features loom
shuttle:
cargo nextest run --features shuttle --test parallel
all: test miri

View file

@ -10,8 +10,8 @@ use accumulated::{Accumulated, AnyAccumulated};
use crate::cycle::CycleHeads;
use crate::function::VerifyResult;
use crate::ingredient::{Ingredient, Jar};
use crate::loom::sync::Arc;
use crate::plumbing::{IngredientIndices, ZalsaLocal};
use crate::sync::Arc;
use crate::table::memo::MemoTableTypes;
use crate::zalsa::{IngredientIndex, Zalsa};
use crate::{Database, Id, Revision};

View file

@ -4,7 +4,7 @@ use rustc_hash::FxHashMap;
use crate::accumulator::accumulated::Accumulated;
use crate::accumulator::{Accumulator, AnyAccumulated};
use crate::loom::sync::atomic::{AtomicBool, Ordering};
use crate::sync::atomic::{AtomicBool, Ordering};
use crate::IngredientIndex;
#[derive(Default)]

View file

@ -8,8 +8,8 @@ use crate::cycle::CycleHeads;
use crate::durability::Durability;
use crate::hash::FxIndexSet;
use crate::key::DatabaseKeyIndex;
use crate::loom::sync::atomic::AtomicBool;
use crate::runtime::Stamp;
use crate::sync::atomic::AtomicBool;
use crate::tracked_struct::{Disambiguator, DisambiguatorMap, IdentityHash, IdentityMap};
use crate::zalsa_local::{QueryEdge, QueryEdges, QueryOrigin, QueryRevisions};
use crate::{Accumulator, IngredientIndex, Revision};

View file

@ -1,17 +1,17 @@
use std::cell::Cell;
use std::ptr::NonNull;
use crate::loom::cell::Cell;
use crate::Database;
#[cfg(loom)]
crate::loom::thread_local! {
#[cfg(feature = "shuttle")]
crate::sync::thread_local! {
/// The thread-local state salsa requires for a given thread
static ATTACHED: Attached = Attached::new();
}
// loom's `thread_local` macro does not support const-initialization.
#[cfg(not(loom))]
crate::loom::thread_local! {
// shuttle's `thread_local` macro does not support const-initialization.
#[cfg(not(feature = "shuttle"))]
crate::sync::thread_local! {
/// The thread-local state salsa requires for a given thread
static ATTACHED: Attached = const { Attached::new() }
}
@ -28,14 +28,6 @@ struct Attached {
}
impl Attached {
#[cfg(loom)]
fn new() -> Self {
Self {
database: Cell::new(None),
}
}
#[cfg(not(loom))]
const fn new() -> Self {
Self {
database: Cell::new(None),
@ -58,9 +50,7 @@ impl Attached {
Some(current_db) => {
let new_db = NonNull::from(db);
if !std::ptr::addr_eq(current_db.as_ptr(), new_db.as_ptr()) {
panic!(
"Cannot change database mid-query. current: {current_db:?}, new: {new_db:?}",
);
panic!("Cannot change database mid-query. current: {current_db:?}, new: {new_db:?}");
}
Self { state: None }
}

View file

@ -55,6 +55,7 @@ use std::panic;
use thin_vec::{thin_vec, ThinVec};
use crate::key::DatabaseKeyIndex;
use crate::sync::OnceLock;
/// The maximum number of times we'll fixpoint-iterate before panicking.
///
@ -258,13 +259,10 @@ impl From<CycleHead> for CycleHeads {
}
}
#[cfg(not(loom))]
pub(crate) static EMPTY_CYCLE_HEADS: std::sync::LazyLock<CycleHeads> =
std::sync::LazyLock::new(|| CycleHeads(ThinVec::new()));
#[cfg(loom)]
loom::lazy_static! {
pub(crate) static ref EMPTY_CYCLE_HEADS: CycleHeads = CycleHeads(ThinVec::new());
#[inline]
pub(crate) fn empty_cycle_heads() -> &'static CycleHeads {
static EMPTY_CYCLE_HEADS: OnceLock<CycleHeads> = OnceLock::new();
EMPTY_CYCLE_HEADS.get_or_init(|| CycleHeads(ThinVec::new()))
}
#[derive(Debug, PartialEq, Eq)]

View file

@ -1,5 +1,5 @@
use crate::key::DatabaseKeyIndex;
use crate::loom::thread::{self, ThreadId};
use crate::sync::thread::{self, ThreadId};
use crate::Revision;
/// The `Event` struct identifies various notable things that can

View file

@ -10,9 +10,9 @@ use crate::function::delete::DeletedEntries;
use crate::function::sync::{ClaimResult, SyncTable};
use crate::ingredient::Ingredient;
use crate::key::DatabaseKeyIndex;
use crate::loom::sync::Arc;
use crate::plumbing::MemoIngredientMap;
use crate::salsa_struct::SalsaStructInDb;
use crate::sync::Arc;
use crate::table::memo::MemoTableTypes;
use crate::table::Table;
use crate::views::DatabaseDownCaster;

View file

@ -1,7 +1,7 @@
use crate::cycle::{CycleRecoveryStrategy, MAX_ITERATIONS};
use crate::function::memo::Memo;
use crate::function::{Configuration, IngredientImpl};
use crate::loom::sync::atomic::{AtomicBool, Ordering};
use crate::sync::atomic::{AtomicBool, Ordering};
use crate::zalsa::{MemoIngredientIndex, Zalsa, ZalsaDatabase};
use crate::zalsa_local::{ActiveQueryGuard, QueryRevisions};
use crate::{Event, EventKind, Id, Revision};

View file

@ -2,7 +2,6 @@ use crate::cycle::{CycleHeads, CycleRecoveryStrategy, UnexpectedCycle};
use crate::function::memo::Memo;
use crate::function::sync::ClaimResult;
use crate::function::{Configuration, IngredientImpl, VerifyResult};
use crate::loom::sync::AtomicMut;
use crate::zalsa::{MemoIngredientIndex, Zalsa, ZalsaDatabase};
use crate::zalsa_local::QueryRevisions;
use crate::Id;
@ -163,7 +162,7 @@ where
let mut revisions = active_query.pop();
revisions.cycle_heads = CycleHeads::initial(database_key_index);
// We need this for `cycle_heads()` to work. We will unset this in the outer `execute()`.
revisions.verified_final.write_mut(false);
*revisions.verified_final.get_mut() = false;
Some(self.insert_memo(
zalsa,
id,

View file

@ -1,7 +1,7 @@
use std::num::NonZeroUsize;
use crate::hash::FxLinkedHashSet;
use crate::loom::sync::Mutex;
use crate::sync::Mutex;
use crate::Id;
pub(super) struct Lru {

View file

@ -4,8 +4,8 @@ use crate::function::memo::Memo;
use crate::function::sync::ClaimResult;
use crate::function::{Configuration, IngredientImpl};
use crate::key::DatabaseKeyIndex;
use crate::loom::sync::atomic::Ordering;
use crate::plumbing::ZalsaLocal;
use crate::sync::atomic::Ordering;
use crate::zalsa::{MemoIngredientIndex, Zalsa, ZalsaDatabase};
use crate::zalsa_local::{QueryEdge, QueryOrigin};
use crate::{AsDynDatabase as _, Id, Revision};

View file

@ -3,11 +3,11 @@ use std::fmt::{Debug, Formatter};
use std::mem::transmute;
use std::ptr::NonNull;
use crate::cycle::{CycleHeadKind, CycleHeads, EMPTY_CYCLE_HEADS};
use crate::cycle::{empty_cycle_heads, CycleHeadKind, CycleHeads};
use crate::function::{Configuration, IngredientImpl};
use crate::key::DatabaseKeyIndex;
use crate::loom::sync::atomic::Ordering;
use crate::revision::AtomicRevision;
use crate::sync::atomic::Ordering;
use crate::table::memo::MemoTableWithTypesMut;
use crate::zalsa::{MemoIngredientIndex, Zalsa};
use crate::zalsa_local::{QueryOrigin, QueryRevisions};
@ -98,7 +98,7 @@ pub struct Memo<V> {
}
// Memo's are stored a lot, make sure their size is doesn't randomly increase.
#[cfg(not(loom))]
#[cfg(not(feature = "shuttle"))]
const _: [(); std::mem::size_of::<Memo<std::num::NonZeroUsize>>()] =
[(); std::mem::size_of::<[usize; 13]>()];
@ -196,7 +196,7 @@ impl<V> Memo<V> {
if self.may_be_provisional() {
&self.revisions.cycle_heads
} else {
&EMPTY_CYCLE_HEADS
empty_cycle_heads()
}
}

View file

@ -1,8 +1,8 @@
use crate::accumulator::accumulated_map::InputAccumulatedValues;
use crate::function::memo::Memo;
use crate::function::{Configuration, IngredientImpl};
use crate::loom::sync::atomic::AtomicBool;
use crate::revision::AtomicRevision;
use crate::sync::atomic::AtomicBool;
use crate::tracked_struct::TrackedStructInDb;
use crate::zalsa::{Zalsa, ZalsaDatabase};
use crate::zalsa_local::{QueryOrigin, QueryRevisions};

View file

@ -1,9 +1,9 @@
use rustc_hash::FxHashMap;
use crate::key::DatabaseKeyIndex;
use crate::loom::sync::Mutex;
use crate::loom::thread::{self, ThreadId};
use crate::runtime::{BlockResult, WaitResult};
use crate::sync::thread::{self, ThreadId};
use crate::sync::Mutex;
use crate::zalsa::Zalsa;
use crate::{Id, IngredientIndex};
@ -99,7 +99,7 @@ impl ClaimGuard<'_> {
if anyone_waiting {
self.zalsa.runtime().unblock_queries_blocked_on(
DatabaseKeyIndex::new(self.sync_table.ingredient, self.key_index),
if std::thread::panicking() {
if thread::panicking() {
WaitResult::Panicked
} else {
WaitResult::Completed

View file

@ -2,7 +2,6 @@ use std::hash::{BuildHasher, Hash};
pub(crate) type FxHasher = std::hash::BuildHasherDefault<rustc_hash::FxHasher>;
pub(crate) type FxIndexSet<K> = indexmap::IndexSet<K, FxHasher>;
pub(crate) type FxDashMap<K, V> = dashmap::DashMap<K, V, FxHasher>;
pub(crate) type FxLinkedHashSet<K> = hashlink::LinkedHashSet<K, FxHasher>;
pub(crate) type FxHashSet<K> = std::collections::HashSet<K, FxHasher>;

View file

@ -4,8 +4,8 @@ use std::fmt;
use crate::accumulator::accumulated_map::{AccumulatedMap, InputAccumulatedValues};
use crate::cycle::{CycleHeadKind, CycleHeads, CycleRecoveryStrategy};
use crate::function::VerifyResult;
use crate::loom::sync::Arc;
use crate::plumbing::IngredientIndices;
use crate::sync::Arc;
use crate::table::memo::MemoTableTypes;
use crate::table::Table;
use crate::zalsa::{transmute_data_mut_ptr, transmute_data_ptr, IngredientIndex, Zalsa};

View file

@ -1,6 +1,5 @@
use std::any::{Any, TypeId};
use std::fmt;
use std::mem::MaybeUninit;
use std::ops::IndexMut;
pub mod input_field;
@ -15,9 +14,8 @@ use crate::id::{AsId, FromId, FromIdWithDb};
use crate::ingredient::Ingredient;
use crate::input::singleton::{Singleton, SingletonChoice};
use crate::key::DatabaseKeyIndex;
use crate::loom::cell::UnsafeCell;
use crate::loom::sync::Arc;
use crate::plumbing::{Jar, Stamp};
use crate::sync::Arc;
use crate::table::memo::{MemoTable, MemoTableTypes};
use crate::table::{Slot, Table};
use crate::zalsa::{IngredientIndex, Zalsa};
@ -94,7 +92,7 @@ impl<C: Configuration> IngredientImpl<C> {
zalsa.table().get(id)
}
fn data_raw(table: &Table, id: Id) -> &UnsafeCell<MaybeUninit<Value<C>>> {
fn data_raw(table: &Table, id: Id) -> *mut Value<C> {
table.get_raw(id)
}
@ -135,21 +133,21 @@ impl<C: Configuration> IngredientImpl<C> {
) -> R {
let id: Id = id.as_id();
Self::data_raw(runtime.table(), id).get_mut().with(|r| {
// SAFETY: We hold `&mut` on the runtime so no `&`-references can be active.
// Also, we don't access any other data from the table while `r` is active.
let r = unsafe { (*r).assume_init_mut() };
let data_raw = Self::data_raw(runtime.table(), id);
let stamp = &mut r.stamps[field_index];
// SAFETY: We hold `&mut` on the runtime so no `&`-references can be active.
// Also, we don't access any other data from the table while `r` is active.
let data = unsafe { &mut *data_raw };
if stamp.durability != Durability::MIN {
runtime.report_tracked_write(stamp.durability);
}
let stamp = &mut data.stamps[field_index];
stamp.durability = durability.unwrap_or(stamp.durability);
stamp.changed_at = runtime.current_revision();
setter(&mut r.fields)
})
if stamp.durability != Durability::MIN {
runtime.report_tracked_write(stamp.durability);
}
stamp.durability = durability.unwrap_or(stamp.durability);
stamp.changed_at = runtime.current_revision();
setter(&mut data.fields)
}
/// Get the singleton input previously created (if any).

View file

@ -5,7 +5,7 @@ use crate::cycle::CycleHeads;
use crate::function::VerifyResult;
use crate::ingredient::Ingredient;
use crate::input::{Configuration, IngredientImpl, Value};
use crate::loom::sync::Arc;
use crate::sync::Arc;
use crate::table::memo::MemoTableTypes;
use crate::zalsa::IngredientIndex;
use crate::{Database, Id, Revision};

View file

@ -1,4 +1,4 @@
use crate::loom::sync::atomic::{AtomicU64, Ordering};
use crate::sync::atomic::{AtomicU64, Ordering};
use crate::Id;
mod sealed {

View file

@ -1,6 +1,7 @@
#![allow(clippy::undocumented_unsafe_blocks)] // TODO(#697) document safety
use std::any::TypeId;
use std::cell::Cell;
use std::fmt;
use std::hash::{BuildHasher, Hash, Hasher};
use std::marker::PhantomData;
@ -11,14 +12,13 @@ use dashmap::SharedValue;
use crate::cycle::CycleHeads;
use crate::durability::Durability;
use crate::function::VerifyResult;
use crate::hash::FxDashMap;
use crate::id::{AsId, FromId};
use crate::ingredient::Ingredient;
use crate::loom::cell::Cell;
use crate::loom::sync::atomic::{AtomicU8, Ordering};
use crate::loom::sync::Arc;
use crate::plumbing::{IngredientIndices, Jar};
use crate::revision::AtomicRevision;
use crate::sync::atomic::{AtomicU8, Ordering};
use crate::sync::Arc;
use crate::sync::FxDashMap;
use crate::table::memo::{MemoTable, MemoTableTypes};
use crate::table::Slot;
use crate::zalsa::{IngredientIndex, Zalsa};

View file

@ -17,7 +17,6 @@ mod ingredient;
mod input;
mod interned;
mod key;
mod loom;
mod memo_ingredient_indices;
mod nonce;
#[cfg(feature = "rayon")]
@ -27,6 +26,7 @@ mod revision;
mod runtime;
mod salsa_struct;
mod storage;
mod sync;
mod table;
mod tracked_struct;
mod update;
@ -80,7 +80,6 @@ pub mod plumbing {
setup_tracked_struct, unexpected_cycle_initial, unexpected_cycle_recovery,
};
pub use crate::__maybe_lazy_static;
pub use crate::accumulator::Accumulator;
pub use crate::attach::{attach, with_attached_database};
pub use crate::cycle::{CycleRecoveryAction, CycleRecoveryStrategy};

View file

@ -1,274 +0,0 @@
#[cfg(loom)]
pub use loom::{cell, thread, thread_local};
/// A helper macro to work around the fact that most loom types are not `const` constructable.
#[doc(hidden)]
#[macro_export]
#[cfg(loom)]
macro_rules! __maybe_lazy_static {
(static $name:ident: $t:ty = $init:expr $(;)?) => {
loom::lazy_static! { static ref $name: $t = $init; }
};
}
/// A helper macro to work around the fact that most loom types are not `const` constructable.
#[doc(hidden)]
#[macro_export]
#[cfg(not(loom))]
macro_rules! __maybe_lazy_static {
(static $name:ident: $t:ty = $init:expr $(;)?) => {
static $name: $t = $init;
};
}
pub(crate) use crate::__maybe_lazy_static as maybe_lazy_static;
/// A polyfill for `Atomic*::get_mut`, which loom does not support.
pub trait AtomicMut<T> {
fn read_mut(&mut self) -> T;
fn write_mut(&mut self, value: T);
}
#[cfg(loom)]
pub mod sync {
pub use super::AtomicMut;
pub use loom::sync::*;
/// A wrapper around loom's `Mutex` to mirror parking-lot's API.
#[derive(Default, Debug)]
pub struct Mutex<T>(loom::sync::Mutex<T>);
impl<T> Mutex<T> {
pub fn new(value: T) -> Mutex<T> {
Mutex(loom::sync::Mutex::new(value))
}
pub fn lock(&self) -> MutexGuard<'_, T> {
self.0.lock().unwrap()
}
pub fn get_mut(&mut self) -> &mut T {
self.0.get_mut().unwrap()
}
}
/// A wrapper around loom's `RwLock` to mirror parking-lot's API.
#[derive(Default, Debug)]
pub struct RwLock<T>(loom::sync::RwLock<T>);
impl<T> RwLock<T> {
pub fn read(&self) -> RwLockReadGuard<'_, T> {
self.0.read().unwrap()
}
pub fn write(&self) -> RwLockWriteGuard<'_, T> {
self.0.write().unwrap()
}
pub fn get_mut(&mut self) -> &mut T {
self.0.get_mut().unwrap()
}
}
/// A wrapper around loom's `Condvar` to mirror parking-lot's API.
#[derive(Default, Debug)]
pub struct Condvar(loom::sync::Condvar);
impl Condvar {
// We cannot match parking-lot identically because loom's version takes ownership of the `MutexGuard`.
pub fn wait<'a, T>(&self, guard: MutexGuard<'a, T>) -> MutexGuard<'a, T> {
self.0.wait(guard).unwrap()
}
pub fn notify_one(&self) {
self.0.notify_one();
}
pub fn notify_all(&self) {
self.0.notify_all();
}
}
use loom::cell::UnsafeCell;
use std::mem::MaybeUninit;
/// A polyfill for `std::sync::OnceLock`.
pub struct OnceLock<T>(Mutex<bool>, UnsafeCell<MaybeUninit<T>>);
impl<T> OnceLock<T> {
pub fn new() -> OnceLock<T> {
OnceLock(Mutex::new(false), UnsafeCell::new(MaybeUninit::uninit()))
}
pub fn get(&self) -> Option<&T> {
let initialized = self.0.lock();
if *initialized {
// SAFETY: The value is initialized and write-once.
Some(self.1.with(|ptr| unsafe { (*ptr).assume_init_ref() }))
} else {
None
}
}
pub fn set(&self, value: T) -> Result<(), T> {
let mut initialized = self.0.lock();
if *initialized {
Err(value)
} else {
self.1.with_mut(|ptr| {
// SAFETY: We hold the lock.
unsafe { ptr.write(MaybeUninit::new(value)) }
});
*initialized = true;
Ok(())
}
}
}
impl<T> From<T> for OnceLock<T> {
fn from(value: T) -> OnceLock<T> {
OnceLock(Mutex::new(true), UnsafeCell::new(MaybeUninit::new(value)))
}
}
// SAFETY: Mirroring `std::sync::OnceLock`.
unsafe impl<T: Send> Send for OnceLock<T> {}
// SAFETY: Mirroring `std::sync::OnceLock`.
unsafe impl<T: Sync + Send> Sync for OnceLock<T> {}
/// Extend `Atomic*` with mutable accessors.
macro_rules! impl_loom_atomic_mut {
($($atomic_ty:ident $(<$generic:ident>)? => $ty:ty),*) => {$(
impl $(<$generic>)? super::AtomicMut<$ty> for atomic::$atomic_ty $(<$generic>)? {
fn read_mut(&mut self) -> $ty {
self.load(atomic::Ordering::Relaxed)
}
fn write_mut(&mut self, value: $ty) {
self.store(value, atomic::Ordering::Relaxed)
}
}
)*};
}
impl_loom_atomic_mut! { AtomicBool => bool, AtomicUsize => usize, AtomicPtr<T> => *mut T }
}
#[cfg(not(loom))]
pub use std::{thread, thread_local};
#[cfg(not(loom))]
pub mod cell {
pub use std::cell::*;
pub(crate) struct UnsafeCell<T>(core::cell::UnsafeCell<T>);
// this is not derived because it confuses rust-analyzer ... https://github.com/rust-lang/rust-analyzer/issues/19755
impl<T: std::fmt::Debug> std::fmt::Debug for UnsafeCell<T> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_tuple("UnsafeCell").field(&self.0).finish()
}
}
impl<T> UnsafeCell<T> {
pub const fn new(data: T) -> UnsafeCell<T> {
UnsafeCell(core::cell::UnsafeCell::new(data))
}
#[inline(always)]
pub fn with<F, R>(&self, f: F) -> R
where
F: FnOnce(*const T) -> R,
{
f(self.0.get())
}
#[inline(always)]
pub fn with_mut<F, R>(&self, f: F) -> R
where
F: FnOnce(*mut T) -> R,
{
f(self.0.get())
}
#[inline(always)]
pub(crate) fn get_mut(&self) -> MutPtr<T> {
MutPtr(self.0.get())
}
}
#[derive(Debug)]
pub(crate) struct MutPtr<T: ?Sized>(*mut T);
impl<T: ?Sized> MutPtr<T> {
#[inline(always)]
pub fn with<F, R>(&self, f: F) -> R
where
F: FnOnce(*mut T) -> R,
{
f(self.0)
}
}
}
#[cfg(not(loom))]
pub mod sync {
pub use super::AtomicMut;
pub use parking_lot::{Mutex, MutexGuard, RwLock};
pub use std::sync::*;
pub mod atomic {
pub use portable_atomic::AtomicU64;
pub use std::sync::atomic::*;
}
/// A wrapper around parking-lot's `Condvar` to mirror loom's API.
pub struct Condvar(parking_lot::Condvar);
// this is not derived because it confuses rust-analyzer ... https://github.com/rust-lang/rust-analyzer/issues/19755
#[allow(clippy::derivable_impls)]
impl Default for Condvar {
fn default() -> Self {
Self(Default::default())
}
}
// this is not derived because it confuses rust-analyzer ... https://github.com/rust-lang/rust-analyzer/issues/19755
impl std::fmt::Debug for Condvar {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_tuple("Condvar").field(&self.0).finish()
}
}
impl Condvar {
pub fn wait<'a, T>(&self, mut guard: MutexGuard<'a, T>) -> MutexGuard<'a, T> {
self.0.wait(&mut guard);
guard
}
pub fn notify_one(&self) {
self.0.notify_one();
}
pub fn notify_all(&self) {
self.0.notify_all();
}
}
}
/// Extend `Atomic*` with mutable accessors.
macro_rules! impl_std_atomic_mut {
($($atomic_ty:ident $(<$generic:ident>)? => $ty:ty),*) => {$(
#[cfg(not(loom))]
impl $(<$generic>)? AtomicMut<$ty> for sync::atomic::$atomic_ty $(<$generic>)? {
fn read_mut(&mut self) -> $ty {
*self.get_mut()
}
fn write_mut(&mut self, value: $ty) {
*self.get_mut() = value;
}
}
)*};
}
impl_std_atomic_mut! { AtomicBool => bool, AtomicUsize => usize, AtomicPtr<T> => *mut T }

View file

@ -1,4 +1,4 @@
use crate::loom::sync::Arc;
use crate::sync::Arc;
use crate::table::memo::{MemoEntryType, MemoTableTypes};
use crate::zalsa::{MemoIngredientIndex, Zalsa};
use crate::{Id, IngredientIndex};

View file

@ -1,4 +1,4 @@
use crate::loom::sync::atomic::{AtomicU32, Ordering};
use crate::sync::atomic::{AtomicU32, Ordering};
use std::marker::PhantomData;
use std::num::NonZeroU32;
@ -15,7 +15,6 @@ pub(crate) struct NonceGenerator<T> {
pub struct Nonce<T>(NonZeroU32, PhantomData<T>);
impl<T> NonceGenerator<T> {
#[cfg(not(loom))]
pub(crate) const fn new() -> Self {
Self {
// start at 1 so we can detect rollover more easily
@ -24,15 +23,6 @@ impl<T> NonceGenerator<T> {
}
}
#[cfg(loom)]
pub(crate) fn new() -> Self {
Self {
// start at 1 so we can detect rollover more easily
value: AtomicU32::new(1),
phantom: PhantomData,
}
}
pub(crate) fn nonce(&self) -> Nonce<T> {
let value = self.value.fetch_add(1, Ordering::Relaxed);

View file

@ -1,6 +1,6 @@
use std::num::NonZeroUsize;
use crate::loom::sync::atomic::{AtomicUsize, Ordering};
use crate::sync::atomic::{AtomicUsize, Ordering};
/// Value of the initial revision, as a u64. We don't use 0
/// because we want to use a `NonZeroUsize`.

View file

@ -1,9 +1,9 @@
use self::dependency_graph::DependencyGraph;
use crate::durability::Durability;
use crate::key::DatabaseKeyIndex;
use crate::loom::sync::atomic::{AtomicBool, Ordering};
use crate::loom::sync::{AtomicMut, Mutex};
use crate::loom::thread::{self, ThreadId};
use crate::sync::atomic::{AtomicBool, Ordering};
use crate::sync::thread::{self, ThreadId};
use crate::sync::Mutex;
use crate::table::Table;
use crate::{Cancelled, Event, EventKind, Revision};
@ -129,7 +129,7 @@ impl Runtime {
}
pub(crate) fn reset_cancellation_flag(&mut self) {
self.revision_canceled.write_mut(false);
*self.revision_canceled.get_mut() = false;
}
/// Returns the [`Table`] used to store the value of salsa structs

View file

@ -4,10 +4,10 @@ use rustc_hash::FxHashMap;
use smallvec::SmallVec;
use crate::key::DatabaseKeyIndex;
use crate::loom::sync::MutexGuard;
use crate::loom::thread::ThreadId;
use crate::runtime::dependency_graph::edge::EdgeCondvar;
use crate::runtime::WaitResult;
use crate::sync::thread::ThreadId;
use crate::sync::MutexGuard;
#[derive(Debug, Default)]
pub(super) struct DependencyGraph {
@ -141,8 +141,8 @@ impl DependencyGraph {
}
mod edge {
use crate::loom::sync::{Condvar, MutexGuard};
use crate::loom::thread::ThreadId;
use crate::sync::thread::ThreadId;
use crate::sync::{Condvar, MutexGuard};
use std::pin::Pin;

View file

@ -2,7 +2,7 @@
use std::marker::PhantomData;
use std::panic::RefUnwindSafe;
use crate::loom::sync::{Arc, Condvar, Mutex};
use crate::sync::{Arc, Condvar, Mutex};
use crate::zalsa::{Zalsa, ZalsaDatabase};
use crate::zalsa_local::{self, ZalsaLocal};
use crate::{Database, Event, EventKind};

199
src/sync.rs Normal file
View file

@ -0,0 +1,199 @@
pub use shim::*;
#[cfg(feature = "shuttle")]
pub mod shim {
pub use shuttle::sync::*;
pub use shuttle::{thread, thread_local};
/// A polyfill for `dashmap::DashMap`.
pub struct FxDashMap<K, V>(RwLock<HashTable<K, V>>, crate::hash::FxHasher);
type HashTable<K, V> = hashbrown_14::raw::RawTable<(K, dashmap::SharedValue<V>)>;
impl<K, V> Default for FxDashMap<K, V> {
fn default() -> FxDashMap<K, V> {
FxDashMap(RwLock::default(), crate::hash::FxHasher::default())
}
}
impl<K, V> FxDashMap<K, V> {
pub fn shards(&self) -> &[RwLock<HashTable<K, V>>] {
std::slice::from_ref(&self.0)
}
pub fn determine_shard(&self, _hash: usize) -> usize {
0
}
pub fn hasher(&self) -> &crate::hash::FxHasher {
&self.1
}
pub fn clear(&self) {
self.0.write().clear();
}
}
/// A wrapper around shuttle's `Mutex` to mirror parking-lot's API.
#[derive(Default, Debug)]
pub struct Mutex<T>(shuttle::sync::Mutex<T>);
impl<T> Mutex<T> {
pub const fn new(value: T) -> Mutex<T> {
Mutex(shuttle::sync::Mutex::new(value))
}
pub fn lock(&self) -> MutexGuard<'_, T> {
self.0.lock().unwrap()
}
pub fn get_mut(&mut self) -> &mut T {
self.0.get_mut().unwrap()
}
}
/// A wrapper around shuttle's `RwLock` to mirror parking-lot's API.
#[derive(Default, Debug)]
pub struct RwLock<T>(shuttle::sync::RwLock<T>);
impl<T> RwLock<T> {
pub fn read(&self) -> RwLockReadGuard<'_, T> {
self.0.read().unwrap()
}
pub fn write(&self) -> RwLockWriteGuard<'_, T> {
self.0.write().unwrap()
}
pub fn get_mut(&mut self) -> &mut T {
self.0.get_mut().unwrap()
}
}
/// A wrapper around shuttle's `Condvar` to mirror parking-lot's API.
#[derive(Default, Debug)]
pub struct Condvar(shuttle::sync::Condvar);
impl Condvar {
// We cannot match parking-lot identically because shuttle's version takes ownership of the `MutexGuard`.
pub fn wait<'a, T>(&self, guard: MutexGuard<'a, T>) -> MutexGuard<'a, T> {
self.0.wait(guard).unwrap()
}
pub fn notify_one(&self) {
self.0.notify_one();
}
pub fn notify_all(&self) {
self.0.notify_all();
}
}
use std::cell::UnsafeCell;
use std::mem::MaybeUninit;
/// A polyfill for `std::sync::OnceLock`.
pub struct OnceLock<T>(Mutex<bool>, UnsafeCell<MaybeUninit<T>>);
impl<T> OnceLock<T> {
pub const fn new() -> OnceLock<T> {
OnceLock(Mutex::new(false), UnsafeCell::new(MaybeUninit::uninit()))
}
pub fn get(&self) -> Option<&T> {
let initialized = self.0.lock();
if *initialized {
// SAFETY: The value is initialized and write-once.
Some(unsafe { (*self.1.get()).assume_init_ref() })
} else {
None
}
}
pub fn get_or_init<F>(&self, f: F) -> &T
where
F: FnOnce() -> T,
{
let _ = self.set_with(f);
self.get().unwrap()
}
pub fn set(&self, value: T) -> Result<(), T> {
self.set_with(|| value).map_err(|f| f())
}
fn set_with<F>(&self, f: F) -> Result<(), F>
where
F: FnOnce() -> T,
{
let mut initialized = self.0.lock();
if *initialized {
return Err(f);
}
// SAFETY: We hold the lock.
unsafe { self.1.get().write(MaybeUninit::new(f())) }
*initialized = true;
Ok(())
}
}
impl<T> From<T> for OnceLock<T> {
fn from(value: T) -> OnceLock<T> {
OnceLock(Mutex::new(true), UnsafeCell::new(MaybeUninit::new(value)))
}
}
// SAFETY: Mirroring `std::sync::OnceLock`.
unsafe impl<T: Send> Send for OnceLock<T> {}
// SAFETY: Mirroring `std::sync::OnceLock`.
unsafe impl<T: Sync + Send> Sync for OnceLock<T> {}
}
#[cfg(not(feature = "shuttle"))]
pub mod shim {
pub use parking_lot::{Mutex, MutexGuard, RwLock};
pub use std::sync::*;
pub use std::{thread, thread_local};
pub(crate) type FxDashMap<K, V> = dashmap::DashMap<K, V, crate::hash::FxHasher>;
pub mod atomic {
pub use portable_atomic::AtomicU64;
pub use std::sync::atomic::*;
}
/// A wrapper around parking-lot's `Condvar` to mirror shuttle's API.
pub struct Condvar(parking_lot::Condvar);
// this is not derived because it confuses rust-analyzer ... https://github.com/rust-lang/rust-analyzer/issues/19755
#[allow(clippy::derivable_impls)]
impl Default for Condvar {
fn default() -> Self {
Self(Default::default())
}
}
// this is not derived because it confuses rust-analyzer ... https://github.com/rust-lang/rust-analyzer/issues/19755
impl std::fmt::Debug for Condvar {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_tuple("Condvar").field(&self.0).finish()
}
}
impl Condvar {
pub fn wait<'a, T>(&self, mut guard: MutexGuard<'a, T>) -> MutexGuard<'a, T> {
self.0.wait(&mut guard);
guard
}
pub fn notify_one(&self) {
self.0.notify_one();
}
pub fn notify_all(&self) {
self.0.notify_all();
}
}
}

View file

@ -1,5 +1,6 @@
use std::alloc::Layout;
use std::any::{Any, TypeId};
use std::cell::UnsafeCell;
use std::marker::PhantomData;
use std::mem::{self, MaybeUninit};
use std::ptr::{self, NonNull};
@ -8,9 +9,8 @@ use std::slice;
use memo::MemoTable;
use rustc_hash::FxHashMap;
use crate::loom::cell::UnsafeCell;
use crate::loom::sync::atomic::{AtomicUsize, Ordering};
use crate::loom::sync::{Arc, AtomicMut, Mutex};
use crate::sync::atomic::{AtomicUsize, Ordering};
use crate::sync::{Arc, Mutex};
use crate::table::memo::{MemoTableTypes, MemoTableWithTypes, MemoTableWithTypesMut};
use crate::{Id, IngredientIndex, Revision};
@ -71,11 +71,9 @@ impl SlotVTable {
unsafe {
let data = Box::from_raw(data.cast::<PageData<T>>());
for i in 0..initialized {
data[i].with_mut(|item| {
let item = item.cast::<T>();
memo_types.attach_memos_mut((*item).memos_mut()).drop();
ptr::drop_in_place(item);
});
let item = data[i].get().cast::<T>();
memo_types.attach_memos_mut((*item).memos_mut()).drop();
ptr::drop_in_place(item);
}
},
layout: Layout::new::<T>(),
@ -192,11 +190,10 @@ impl Table {
/// # Safety
///
/// See [`Page::get_raw`][].
// TODO: This could return an `&UnsafeCell<T>` directly, but loom's `UnsafeCell` is not `repr(C)`
pub(crate) fn get_raw<T: Slot>(&self, id: Id) -> &UnsafeCell<MaybeUninit<T>> {
pub(crate) fn get_raw<T: Slot>(&self, id: Id) -> *mut T {
let (page, slot) = split_id(id);
let page_ref = self.page::<T>(page);
&page_ref.page_data()[slot.0]
page_ref.page_data()[slot.0].get().cast::<T>()
}
/// Gets a reference to the page which has slots of type `T`
@ -312,10 +309,13 @@ impl<'p, T: Slot> PageView<'p, T> {
// Initialize entry `index`
let id = make_id(page, SlotIndex::new(index));
let data = self.0.data.cast::<PageDataEntry<T>>();
// SAFETY: `index` is also guaranteed to be in bounds as per the check above.
let entry = unsafe { &*data.as_ptr().add(index) };
// SAFETY: We acquired the allocation lock, so we have unique access to the UnsafeCell
// interior.
// `index` is also guaranteed to be in bounds as per the check above.
unsafe { (*data.as_ptr().add(index)).with_mut(|ptr| (*ptr).write(value(id))) };
// interior
unsafe { (*entry.get()).write(value(id)) };
// Update the length (this must be done after initialization as otherwise an uninitialized
// read could occur!)
@ -328,13 +328,22 @@ impl<'p, T: Slot> PageView<'p, T> {
impl Page {
#[inline]
fn new<T: Slot>(ingredient: IngredientIndex, memo_types: Arc<MemoTableTypes>) -> Self {
#[cfg(not(loom))]
#[cfg(not(feature = "shuttle"))]
let data: Box<PageData<T>> =
Box::new([const { UnsafeCell::new(MaybeUninit::uninit()) }; PAGE_LEN]);
#[cfg(loom)]
let data: Box<PageData<T>> =
Box::new([const { MaybeUninit::uninit() }; PAGE_LEN].map(UnsafeCell::new));
#[cfg(feature = "shuttle")]
let data = {
// Avoid stack overflows when using larger shuttle types.
let data = (0..PAGE_LEN)
.map(|_| UnsafeCell::new(MaybeUninit::uninit()))
.collect::<Box<[PageDataEntry<T>]>>();
let data: *mut [PageDataEntry<T>] = Box::into_raw(data);
// SAFETY: `*mut PageDataEntry<T>` and `*mut [PageDataEntry<T>; N]` have the same layout.
unsafe { Box::from_raw(data.cast::<PageDataEntry<T>>().cast::<PageData<T>>()) }
};
Self {
slot_vtable: SlotVTable::of::<T>(),
@ -390,7 +399,7 @@ impl Page {
impl Drop for Page {
fn drop(&mut self) {
let len = self.allocated.read_mut();
let len = *self.allocated.get_mut();
// SAFETY: We supply the data pointer and the initialized length
unsafe { (self.slot_vtable.drop_impl)(self.data.as_ptr(), len, &self.memo_types) };
}

View file

@ -7,8 +7,8 @@ use std::{
use thin_vec::ThinVec;
use crate::loom::sync::atomic::{AtomicPtr, Ordering};
use crate::loom::sync::{AtomicMut, OnceLock, RwLock};
use crate::sync::atomic::{AtomicPtr, Ordering};
use crate::sync::{OnceLock, RwLock};
use crate::{zalsa::MemoIngredientIndex, zalsa_local::QueryOrigin};
/// The "memo table" stores the memoized results of tracked function calls.
@ -222,9 +222,10 @@ impl MemoTableWithTypes<'_> {
}
}
let memo_entry = &mut memos[memo_ingredient_index].atomic_memo;
let old_entry = memo_entry.read_mut();
memo_entry.write_mut(MemoEntryType::to_dummy(memo).as_ptr());
let old_entry = mem::replace(
memos[memo_ingredient_index].atomic_memo.get_mut(),
MemoEntryType::to_dummy(memo).as_ptr(),
);
// SAFETY: The `TypeId` is asserted in `insert()`.
NonNull::new(old_entry).map(|memo| unsafe { MemoEntryType::from_dummy(memo) })
@ -288,7 +289,7 @@ impl MemoTableWithTypesMut<'_> {
else {
return;
};
let Some(memo) = NonNull::new(atomic_memo.read_mut()) else {
let Some(memo) = NonNull::new(*atomic_memo.get_mut()) else {
return;
};
@ -341,11 +342,11 @@ impl MemoEntry {
/// The type must match.
#[inline]
unsafe fn take(&mut self, type_: &MemoEntryType) -> Option<Box<dyn Memo>> {
let memo = NonNull::new(self.atomic_memo.read_mut());
self.atomic_memo.write_mut(ptr::null_mut());
let memo = mem::replace(self.atomic_memo.get_mut(), ptr::null_mut());
let memo = NonNull::new(memo)?;
let type_ = type_.load()?;
// SAFETY: Our preconditions.
Some(unsafe { Box::from_raw((type_.to_dyn_fn)(memo?).as_ptr()) })
Some(unsafe { Box::from_raw((type_.to_dyn_fn)(memo).as_ptr()) })
}
}

View file

@ -3,7 +3,6 @@
use std::any::TypeId;
use std::hash::Hash;
use std::marker::PhantomData;
use std::mem::MaybeUninit;
use std::ops::Index;
use std::{fmt, mem};
@ -15,12 +14,11 @@ use crate::function::VerifyResult;
use crate::id::{AsId, FromId};
use crate::ingredient::{Ingredient, Jar};
use crate::key::DatabaseKeyIndex;
use crate::loom::cell::UnsafeCell;
use crate::loom::sync::Arc;
use crate::plumbing::ZalsaLocal;
use crate::revision::OptionalAtomicRevision;
use crate::runtime::StampedValue;
use crate::salsa_struct::SalsaStructInDb;
use crate::sync::Arc;
use crate::table::memo::{MemoTable, MemoTableTypes, MemoTableWithTypesMut};
use crate::table::{Slot, Table};
use crate::zalsa::{IngredientIndex, Zalsa};
@ -452,20 +450,19 @@ where
continue;
};
return Self::data_raw(zalsa.table(), id).with_mut(|data_raw| {
let data = unsafe { (*data_raw).assume_init_mut() };
// SAFETY: We just removed `id` from the free-list, so we have exclusive access.
let data = unsafe { &mut *Self::data_raw(zalsa.table(), id) };
assert!(
data.updated_at.load().is_none(),
"free list entry for `{id:?}` does not have `None` for `updated_at`"
);
assert!(
data.updated_at.load().is_none(),
"free list entry for `{id:?}` does not have `None` for `updated_at`"
);
// Overwrite the free-list entry. Use `*foo = ` because the entry
// has been previously initialized and we want to free the old contents.
*data = value(id);
// Overwrite the free-list entry. Use `*foo = ` because the entry
// has been previously initialized and we want to free the old contents.
*data = value(id);
id
});
return id;
}
zalsa_local.allocate::<Value<C>>(zalsa, self.ingredient_index, value)
@ -526,9 +523,9 @@ where
// during the current revision and thus obtained an `&` reference to those fields
// that is still live.
let lock_result = data_raw.with(|data_raw| {
{
// SAFETY: Guaranteed by caller.
let data = unsafe { (*data_raw).assume_init_ref() };
let data = unsafe { &*data_raw };
let last_updated_at = data.updated_at.load();
assert!(
@ -536,8 +533,9 @@ where
"two concurrent writers to {id:?}, should not be possible"
);
// The value is already read-locked, but we can reuse it safely as per above.
if last_updated_at == Some(current_revision) {
return Ok(false);
return Ok(id);
}
// Updating the fields may make it necessary to increment the generation of the ID. In
@ -548,80 +546,68 @@ where
"leaking tracked struct {:?} due to generation oveflow",
self.database_key_index(id)
);
return Err(());
return Err(fields);
}
// Acquire the write-lock. This can only fail if there is a parallel thread
// reading from this same `id`, which can only happen if the user has leaked it.
// Tsk tsk.
let swapped_out = data.updated_at.swap(None) ;
let swapped_out = data.updated_at.swap(None);
if swapped_out != last_updated_at {
panic!("failed to acquire write lock, id `{id:?}` must have been leaked across threads");
panic!(
"failed to acquire write lock, id `{id:?}` must have been leaked across threads"
);
}
Ok(true)
});
match lock_result {
// We cannot perform the update as the ID is at its maximum generation.
Err(()) => return Err(fields),
// The value is already read-locked, but we can reuse it safely as per above.
Ok(false) => return Ok(id),
// Acquired the write-lock.
Ok(true) => {}
}
data_raw.with_mut(|data| {
// UNSAFE: Marking as mut requires exclusive access for the duration of
// the `mut`. We have now *claimed* this data by swapping in `None`,
// any attempt to read concurrently will panic.
let data = unsafe { (*data).assume_init_mut() };
// UNSAFE: Marking as mut requires exclusive access for the duration of
// the `mut`. We have now *claimed* this data by swapping in `None`,
// any attempt to read concurrently will panic.
let data = unsafe { &mut *data_raw };
// SAFETY: We assert that the pointer to `data.revisions`
// is a pointer into the database referencing a value
// from a previous revision. As such, it continues to meet
// its validity invariant and any owned content also continues
// to meet its safety invariant.
let untracked_update = unsafe {
C::update_fields(
current_deps.changed_at,
&mut data.revisions,
mem::transmute::<*mut C::Fields<'static>, *mut C::Fields<'db>>(
std::ptr::addr_of_mut!(data.fields),
),
fields,
)
};
// SAFETY: We assert that the pointer to `data.revisions`
// is a pointer into the database referencing a value
// from a previous revision. As such, it continues to meet
// its validity invariant and any owned content also continues
// to meet its safety invariant.
let untracked_update = unsafe {
C::update_fields(
current_deps.changed_at,
&mut data.revisions,
mem::transmute::<*mut C::Fields<'static>, *mut C::Fields<'db>>(
std::ptr::addr_of_mut!(data.fields),
),
fields,
)
};
if untracked_update {
// Consider this a new tracked-struct when any non-tracked field got updated.
// This should be rare and only ever happen if there's a hash collision.
//
// Note that we hold the lock and have exclusive access to the tracked struct data,
// so there should be no live instances of IDs from the previous generation. We clear
// the memos and return a new ID here as if we have allocated a new slot.
let mut table = data.take_memo_table();
if untracked_update {
// Consider this a new tracked-struct when any non-tracked field got updated.
// This should be rare and only ever happen if there's a hash collision.
//
// Note that we hold the lock and have exclusive access to the tracked struct data,
// so there should be no live instances of IDs from the previous generation. We clear
// the memos and return a new ID here as if we have allocated a new slot.
let mut table = data.take_memo_table();
// SAFETY: The memo table belongs to a value that we allocated, so it has the
// correct type.
unsafe { self.clear_memos(zalsa, &mut table, id) };
// SAFETY: The memo table belongs to a value that we allocated, so it has the
// correct type.
unsafe { self.clear_memos(zalsa, &mut table, id) };
id = id
.next_generation()
.expect("already verified that generation is not maximum");
}
id = id
.next_generation()
.expect("already verified that generation is not maximum");
}
if current_deps.durability < data.durability {
data.revisions = C::new_revisions(current_deps.changed_at);
}
data.durability = current_deps.durability;
let swapped_out = data.updated_at.swap(Some(current_revision));
assert!(swapped_out.is_none());
if current_deps.durability < data.durability {
data.revisions = C::new_revisions(current_deps.changed_at);
}
data.durability = current_deps.durability;
let swapped_out = data.updated_at.swap(Some(current_revision));
assert!(swapped_out.is_none());
Ok(id)
})
Ok(id)
}
/// Fetch the data for a given id created by this ingredient from the table,
@ -630,7 +616,7 @@ where
table.get(id)
}
fn data_raw(table: &Table, id: Id) -> &UnsafeCell<MaybeUninit<Value<C>>> {
fn data_raw(table: &Table, id: Id) -> *mut Value<C> {
table.get_raw(id)
}
@ -654,8 +640,8 @@ where
let current_revision = zalsa.current_revision();
let data_raw = Self::data_raw(zalsa.table(), id);
data_raw.with(|data_raw| {
let data = unsafe { (*data_raw).assume_init_ref() };
{
let data = unsafe { &*data_raw };
// We want to set `updated_at` to `None`, signalling that other field values
// cannot be read. The current value should be `Some(R0)` for some older revision.
@ -672,12 +658,11 @@ where
}
}
}
});
}
let mut memo_table = data_raw.with_mut(|data| {
// SAFETY: We have acquired the write lock
unsafe { (*data).assume_init_mut() }.take_memo_table()
});
// SAFETY: We have acquired the write lock
let data = unsafe { &mut *data_raw };
let mut memo_table = data.take_memo_table();
// SAFETY: The memo table belongs to a value that we allocated, so it
// has the correct type.

View file

@ -3,7 +3,7 @@ use std::marker::PhantomData;
use crate::cycle::CycleHeads;
use crate::function::VerifyResult;
use crate::ingredient::Ingredient;
use crate::loom::sync::Arc;
use crate::sync::Arc;
use crate::table::memo::MemoTableTypes;
use crate::tracked_struct::{Configuration, Value};
use crate::zalsa::IngredientIndex;

View file

@ -8,7 +8,7 @@ use std::path::PathBuf;
#[cfg(feature = "rayon")]
use rayon::iter::Either;
use crate::loom::sync::Arc;
use crate::sync::Arc;
use crate::Revision;
/// This is used by the macro generated code.

View file

@ -8,10 +8,10 @@ use std::panic::RefUnwindSafe;
use rustc_hash::FxHashMap;
use crate::ingredient::{Ingredient, Jar};
use crate::loom::sync::atomic::{AtomicU64, Ordering};
use crate::loom::sync::{Mutex, RwLock};
use crate::nonce::{Nonce, NonceGenerator};
use crate::runtime::Runtime;
use crate::sync::atomic::{AtomicU64, Ordering};
use crate::sync::{Mutex, RwLock};
use crate::table::memo::MemoTableWithTypes;
use crate::table::Table;
use crate::views::Views;
@ -67,9 +67,7 @@ pub fn views<Db: ?Sized + Database>(db: &Db) -> &Views {
pub struct StorageNonce;
// Generator for storage nonces.
crate::loom::maybe_lazy_static! {
static NONCE: NonceGenerator<StorageNonce> = NonceGenerator::new();
}
static NONCE: NonceGenerator<StorageNonce> = NonceGenerator::new();
/// An ingredient index identifies a particular [`Ingredient`] in the database.
///
@ -426,7 +424,6 @@ where
const UNINITIALIZED: u64 = 0;
/// Create a new cache
#[cfg(not(loom))]
pub const fn new() -> Self {
Self {
cached_data: AtomicU64::new(Self::UNINITIALIZED),
@ -434,14 +431,6 @@ where
}
}
#[cfg(loom)]
pub fn new() -> Self {
Self {
cached_data: AtomicU64::new(Self::UNINITIALIZED),
phantom: PhantomData,
}
}
/// Get a reference to the ingredient in the database.
/// If the ingredient is not already in the cache, it will be created.
#[inline(always)]

View file

@ -9,8 +9,8 @@ use crate::active_query::QueryStack;
use crate::cycle::CycleHeads;
use crate::durability::Durability;
use crate::key::DatabaseKeyIndex;
use crate::loom::sync::atomic::AtomicBool;
use crate::runtime::Stamp;
use crate::sync::atomic::AtomicBool;
use crate::table::{PageIndex, Slot, Table};
use crate::tracked_struct::{Disambiguator, Identity, IdentityHash, IdentityMap};
use crate::zalsa::{IngredientIndex, Zalsa};

View file

@ -1,6 +1,6 @@
//! Utility for tests that lets us log when notable events happen.
#![allow(dead_code)]
#![allow(dead_code, unused_imports)]
use std::sync::{Arc, Mutex};

View file

@ -82,10 +82,8 @@ const _: () = {
where
Db: ?Sized + zalsa_::Database,
{
zalsa_::__maybe_lazy_static! {
static CACHE: zalsa_::IngredientCache<zalsa_struct_::IngredientImpl<Configuration_>> =
zalsa_::IngredientCache::new();
}
static CACHE: zalsa_::IngredientCache<zalsa_struct_::IngredientImpl<Configuration_>> =
zalsa_::IngredientCache::new();
let zalsa = db.zalsa();
CACHE.get_or_create(zalsa, || {

View file

@ -6,6 +6,7 @@ use std::sync::Arc;
mod common;
use common::LogDatabase;
use salsa::Database as _;
use test_log::test;

View file

@ -12,9 +12,10 @@
//! +--------------------+
//! ```
use salsa::CycleRecoveryAction;
use crate::sync::thread;
use crate::{Knobs, KnobsDatabase};
use crate::setup::{Knobs, KnobsDatabase};
use salsa::CycleRecoveryAction;
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Copy, salsa::Update)]
struct CycleValue(u32);
@ -60,12 +61,12 @@ fn initial(_db: &dyn KnobsDatabase) -> CycleValue {
#[test_log::test]
fn the_test() {
std::thread::scope(|scope| {
crate::sync::check(|| {
let db_t1 = Knobs::default();
let db_t2 = db_t1.clone();
let t1 = scope.spawn(move || query_a(&db_t1));
let t2 = scope.spawn(move || query_b(&db_t2));
let t1 = thread::spawn(move || query_a(&db_t1));
let t2 = thread::spawn(move || query_b(&db_t2));
let (r_t1, r_t2) = (t1.join().unwrap(), t2.join().unwrap());

View file

@ -11,7 +11,8 @@
//! | |
//! +--------------------+
//! ```
use crate::setup::{Knobs, KnobsDatabase};
use crate::sync::thread;
use crate::{Knobs, KnobsDatabase};
const FALLBACK_A: u32 = 0b01;
const FALLBACK_B: u32 = 0b10;
@ -50,18 +51,17 @@ fn cycle_result_b(_db: &dyn KnobsDatabase) -> u32 {
}
#[test_log::test]
#[cfg(not(feature = "shuttle"))] // This test is currently failing.
fn the_test() {
std::thread::scope(|scope| {
crate::sync::check(|| {
let db_t1 = Knobs::default();
let db_t2 = db_t1.clone();
let t1 = scope.spawn(move || query_a(&db_t1));
let t2 = scope.spawn(move || query_b(&db_t2));
let t1 = thread::spawn(move || query_a(&db_t1));
let t2 = thread::spawn(move || query_b(&db_t2));
let (r_t1, r_t2) = (t1.join(), t2.join());
assert_eq!((r_t1?, r_t2?), (FALLBACK_A, FALLBACK_B));
Ok(())
})
.unwrap_or_else(|e| std::panic::resume_unwind(e));
assert_eq!((r_t1.unwrap(), r_t2.unwrap()), (FALLBACK_A, FALLBACK_B));
});
}

View file

@ -6,11 +6,11 @@
//!
//! The trick is that the call from Thread T2 comes before B has reached a fixed point.
//! We want to be sure that C sees the final value (and blocks until it is complete).
use crate::sync::thread;
use crate::{Knobs, KnobsDatabase};
use salsa::CycleRecoveryAction;
use crate::setup::{Knobs, KnobsDatabase};
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Copy, salsa::Update)]
struct CycleValue(u32);
@ -62,14 +62,14 @@ fn query_c(db: &dyn KnobsDatabase) -> CycleValue {
#[test]
fn the_test() {
std::thread::scope(|scope| {
crate::sync::check(|| {
let db_t1 = Knobs::default();
let db_t2 = db_t1.clone();
db_t2.signal_on_will_block(2);
let t1 = scope.spawn(move || query_a(&db_t1));
let t2 = scope.spawn(move || query_c(&db_t2));
let t1 = thread::spawn(move || query_a(&db_t1));
let t2 = thread::spawn(move || query_c(&db_t2));
let (r_t1, r_t2) = (t1.join().unwrap(), t2.join().unwrap());

View file

@ -14,11 +14,11 @@
//! +------------------+--------------------+
//!
//! ```
use crate::sync::thread;
use crate::{Knobs, KnobsDatabase};
use salsa::CycleRecoveryAction;
use crate::setup::{Knobs, KnobsDatabase};
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Copy, salsa::Update)]
struct CycleValue(u32);
@ -71,14 +71,14 @@ fn initial(_db: &dyn KnobsDatabase) -> CycleValue {
#[test_log::test]
fn the_test() {
std::thread::scope(|scope| {
crate::sync::check(|| {
let db_t1 = Knobs::default();
let db_t2 = db_t1.clone();
let db_t3 = db_t1.clone();
let t1 = scope.spawn(move || query_a(&db_t1));
let t2 = scope.spawn(move || query_b(&db_t2));
let t3 = scope.spawn(move || query_c(&db_t3));
let t1 = thread::spawn(move || query_a(&db_t1));
let t2 = thread::spawn(move || query_b(&db_t2));
let t3 = thread::spawn(move || query_c(&db_t3));
let r_t1 = t1.join().unwrap();
let r_t2 = t2.join().unwrap();

View file

@ -1,3 +1,6 @@
// Shuttle doesn't like panics inside of its runtime.
#![cfg(not(feature = "shuttle"))]
//! Test for panic in cycle recovery function, in cross-thread cycle.
use crate::setup::{Knobs, KnobsDatabase};
@ -27,16 +30,14 @@ fn initial(_db: &dyn KnobsDatabase) -> u32 {
fn execute() {
let db = Knobs::default();
std::thread::scope(|scope| {
let db_t1 = db.clone();
let t1 = scope.spawn(move || query_a(&db_t1));
let db_t1 = db.clone();
let t1 = std::thread::spawn(move || query_a(&db_t1));
let db_t2 = db.clone();
let t2 = scope.spawn(move || query_b(&db_t2));
let db_t2 = db.clone();
let t2 = std::thread::spawn(move || query_b(&db_t2));
// The main thing here is that we don't deadlock.
let (r1, r2) = (t1.join(), t2.join());
assert!(r1.is_err());
assert!(r2.is_err());
});
// The main thing here is that we don't deadlock.
let (r1, r2) = (t1.join(), t2.join());
assert!(r1.is_err());
assert!(r2.is_err());
}

View file

@ -1,4 +1,5 @@
mod setup;
mod signal;
mod cycle_a_t1_b_t2;
mod cycle_a_t1_b_t2_fallback;
@ -8,4 +9,25 @@ mod cycle_panic;
mod parallel_cancellation;
mod parallel_join;
mod parallel_map;
mod signal;
#[cfg(not(feature = "shuttle"))]
pub(crate) mod sync {
pub use std::sync::*;
pub use std::thread;
pub fn check(f: impl Fn() + Send + Sync + 'static) {
f();
}
}
#[cfg(feature = "shuttle")]
pub(crate) mod sync {
pub use shuttle::sync::*;
pub use shuttle::thread;
pub fn check(f: impl Fn() + Send + Sync + 'static) {
shuttle::check_pct(f, 1000, 50);
}
}
pub(crate) use setup::*;

View file

@ -1,5 +1,7 @@
//! Test for thread cancellation.
// Shuttle doesn't like panics inside of its runtime.
#![cfg(not(feature = "shuttle"))]
//! Test for thread cancellation.
use salsa::{Cancelled, Setter};
use crate::setup::{Knobs, KnobsDatabase};

View file

@ -1,4 +1,5 @@
#![cfg(feature = "rayon")]
#![cfg(all(feature = "rayon", not(feature = "shuttle")))]
// test for rayon-like join interactions.
use salsa::{Cancelled, Setter};

View file

@ -1,4 +1,4 @@
#![cfg(feature = "rayon")]
#![cfg(all(feature = "rayon", not(feature = "shuttle")))]
// test for rayon-like parallel map interactions.
use salsa::{Cancelled, Setter};

View file

@ -1,9 +1,10 @@
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Arc;
#![allow(dead_code)]
use salsa::{Database, Storage};
use crate::signal::Signal;
use super::signal::Signal;
use super::sync::atomic::{AtomicUsize, Ordering};
use super::sync::Arc;
/// Various "knobs" and utilities used by tests to force
/// a certain behavior.

View file

@ -1,4 +1,6 @@
use parking_lot::{Condvar, Mutex};
#![allow(unused)]
use super::sync::{Condvar, Mutex};
#[derive(Default)]
pub(crate) struct Signal {
@ -8,16 +10,21 @@ pub(crate) struct Signal {
impl Signal {
pub(crate) fn signal(&self, stage: usize) {
// This check avoids acquiring the lock for things that will
// clearly be a no-op. Not *necessary* but helps to ensure we
// are more likely to encounter weird race conditions;
// otherwise calls to `sum` will tend to be unnecessarily
// synchronous.
if stage > 0 {
let mut v = self.value.lock();
if stage > *v {
*v = stage;
self.cond_var.notify_all();
// When running with shuttle we want to explore as many possible
// executions, so we avoid signals entirely.
#[cfg(not(feature = "shuttle"))]
{
// This check avoids acquiring the lock for things that will
// clearly be a no-op. Not *necessary* but helps to ensure we
// are more likely to encounter weird race conditions;
// otherwise calls to `sum` will tend to be unnecessarily
// synchronous.
if stage > 0 {
let mut v = self.value.lock().unwrap();
if stage > *v {
*v = stage;
self.cond_var.notify_all();
}
}
}
}
@ -25,11 +32,14 @@ impl Signal {
/// Waits until the given condition is true; the fn is invoked
/// with the current stage.
pub(crate) fn wait_for(&self, stage: usize) {
// As above, avoid lock if clearly a no-op.
if stage > 0 {
let mut v = self.value.lock();
while *v < stage {
self.cond_var.wait(&mut v);
#[cfg(not(feature = "shuttle"))]
{
// As above, avoid lock if clearly a no-op.
if stage > 0 {
let mut v = self.value.lock().unwrap();
while *v < stage {
v = self.cond_var.wait(v).unwrap();
}
}
}
}