mirror of
https://github.com/rust-lang/rust-analyzer.git
synced 2025-09-29 05:15:04 +00:00
Move salsa fork in-tree
This commit is contained in:
parent
1974e7490d
commit
159a03ad7b
69 changed files with 9478 additions and 41 deletions
277
crates/salsa/src/runtime/dependency_graph.rs
Normal file
277
crates/salsa/src/runtime/dependency_graph.rs
Normal file
|
@ -0,0 +1,277 @@
|
|||
use triomphe::Arc;
|
||||
|
||||
use crate::{DatabaseKeyIndex, RuntimeId};
|
||||
use parking_lot::{Condvar, MutexGuard};
|
||||
use rustc_hash::FxHashMap;
|
||||
use smallvec::SmallVec;
|
||||
|
||||
use super::{ActiveQuery, WaitResult};
|
||||
|
||||
type QueryStack = Vec<ActiveQuery>;
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub(super) struct DependencyGraph {
|
||||
/// A `(K -> V)` pair in this map indicates that the the runtime
|
||||
/// `K` is blocked on some query executing in the runtime `V`.
|
||||
/// This encodes a graph that must be acyclic (or else deadlock
|
||||
/// will result).
|
||||
edges: FxHashMap<RuntimeId, Edge>,
|
||||
|
||||
/// Encodes the `RuntimeId` that are blocked waiting for the result
|
||||
/// of a given query.
|
||||
query_dependents: FxHashMap<DatabaseKeyIndex, SmallVec<[RuntimeId; 4]>>,
|
||||
|
||||
/// When a key K completes which had dependent queries Qs blocked on it,
|
||||
/// it stores its `WaitResult` here. As they wake up, each query Q in Qs will
|
||||
/// come here to fetch their results.
|
||||
wait_results: FxHashMap<RuntimeId, (QueryStack, WaitResult)>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct Edge {
|
||||
blocked_on_id: RuntimeId,
|
||||
blocked_on_key: DatabaseKeyIndex,
|
||||
stack: QueryStack,
|
||||
|
||||
/// Signalled whenever a query with dependents completes.
|
||||
/// Allows those dependents to check if they are ready to unblock.
|
||||
condvar: Arc<parking_lot::Condvar>,
|
||||
}
|
||||
|
||||
impl DependencyGraph {
|
||||
/// True if `from_id` depends on `to_id`.
|
||||
///
|
||||
/// (i.e., there is a path from `from_id` to `to_id` in the graph.)
|
||||
pub(super) fn depends_on(&mut self, from_id: RuntimeId, to_id: RuntimeId) -> bool {
|
||||
let mut p = from_id;
|
||||
while let Some(q) = self.edges.get(&p).map(|edge| edge.blocked_on_id) {
|
||||
if q == to_id {
|
||||
return true;
|
||||
}
|
||||
|
||||
p = q;
|
||||
}
|
||||
p == to_id
|
||||
}
|
||||
|
||||
/// Invokes `closure` with a `&mut ActiveQuery` for each query that participates in the cycle.
|
||||
/// The cycle runs as follows:
|
||||
///
|
||||
/// 1. The runtime `from_id`, which has the stack `from_stack`, would like to invoke `database_key`...
|
||||
/// 2. ...but `database_key` is already being executed by `to_id`...
|
||||
/// 3. ...and `to_id` is transitively dependent on something which is present on `from_stack`.
|
||||
pub(super) fn for_each_cycle_participant(
|
||||
&mut self,
|
||||
from_id: RuntimeId,
|
||||
from_stack: &mut QueryStack,
|
||||
database_key: DatabaseKeyIndex,
|
||||
to_id: RuntimeId,
|
||||
mut closure: impl FnMut(&mut [ActiveQuery]),
|
||||
) {
|
||||
debug_assert!(self.depends_on(to_id, from_id));
|
||||
|
||||
// To understand this algorithm, consider this [drawing](https://is.gd/TGLI9v):
|
||||
//
|
||||
// database_key = QB2
|
||||
// from_id = A
|
||||
// to_id = B
|
||||
// from_stack = [QA1, QA2, QA3]
|
||||
//
|
||||
// self.edges[B] = { C, QC2, [QB1..QB3] }
|
||||
// self.edges[C] = { A, QA2, [QC1..QC3] }
|
||||
//
|
||||
// The cyclic
|
||||
// edge we have
|
||||
// failed to add.
|
||||
// :
|
||||
// A : B C
|
||||
// :
|
||||
// QA1 v QB1 QC1
|
||||
// ┌► QA2 ┌──► QB2 ┌─► QC2
|
||||
// │ QA3 ───┘ QB3 ──┘ QC3 ───┐
|
||||
// │ │
|
||||
// └───────────────────────────────┘
|
||||
//
|
||||
// Final output: [QB2, QB3, QC2, QC3, QA2, QA3]
|
||||
|
||||
let mut id = to_id;
|
||||
let mut key = database_key;
|
||||
while id != from_id {
|
||||
// Looking at the diagram above, the idea is to
|
||||
// take the edge from `to_id` starting at `key`
|
||||
// (inclusive) and down to the end. We can then
|
||||
// load up the next thread (i.e., we start at B/QB2,
|
||||
// and then load up the dependency on C/QC2).
|
||||
let edge = self.edges.get_mut(&id).unwrap();
|
||||
let prefix = edge
|
||||
.stack
|
||||
.iter_mut()
|
||||
.take_while(|p| p.database_key_index != key)
|
||||
.count();
|
||||
closure(&mut edge.stack[prefix..]);
|
||||
id = edge.blocked_on_id;
|
||||
key = edge.blocked_on_key;
|
||||
}
|
||||
|
||||
// Finally, we copy in the results from `from_stack`.
|
||||
let prefix = from_stack
|
||||
.iter_mut()
|
||||
.take_while(|p| p.database_key_index != key)
|
||||
.count();
|
||||
closure(&mut from_stack[prefix..]);
|
||||
}
|
||||
|
||||
/// Unblock each blocked runtime (excluding the current one) if some
|
||||
/// query executing in that runtime is participating in cycle fallback.
|
||||
///
|
||||
/// Returns a boolean (Current, Others) where:
|
||||
/// * Current is true if the current runtime has cycle participants
|
||||
/// with fallback;
|
||||
/// * Others is true if other runtimes were unblocked.
|
||||
pub(super) fn maybe_unblock_runtimes_in_cycle(
|
||||
&mut self,
|
||||
from_id: RuntimeId,
|
||||
from_stack: &QueryStack,
|
||||
database_key: DatabaseKeyIndex,
|
||||
to_id: RuntimeId,
|
||||
) -> (bool, bool) {
|
||||
// See diagram in `for_each_cycle_participant`.
|
||||
let mut id = to_id;
|
||||
let mut key = database_key;
|
||||
let mut others_unblocked = false;
|
||||
while id != from_id {
|
||||
let edge = self.edges.get(&id).unwrap();
|
||||
let prefix = edge
|
||||
.stack
|
||||
.iter()
|
||||
.take_while(|p| p.database_key_index != key)
|
||||
.count();
|
||||
let next_id = edge.blocked_on_id;
|
||||
let next_key = edge.blocked_on_key;
|
||||
|
||||
if let Some(cycle) = edge.stack[prefix..]
|
||||
.iter()
|
||||
.rev()
|
||||
.find_map(|aq| aq.cycle.clone())
|
||||
{
|
||||
// Remove `id` from the list of runtimes blocked on `next_key`:
|
||||
self.query_dependents
|
||||
.get_mut(&next_key)
|
||||
.unwrap()
|
||||
.retain(|r| *r != id);
|
||||
|
||||
// Unblock runtime so that it can resume execution once lock is released:
|
||||
self.unblock_runtime(id, WaitResult::Cycle(cycle));
|
||||
|
||||
others_unblocked = true;
|
||||
}
|
||||
|
||||
id = next_id;
|
||||
key = next_key;
|
||||
}
|
||||
|
||||
let prefix = from_stack
|
||||
.iter()
|
||||
.take_while(|p| p.database_key_index != key)
|
||||
.count();
|
||||
let this_unblocked = from_stack[prefix..].iter().any(|aq| aq.cycle.is_some());
|
||||
|
||||
(this_unblocked, others_unblocked)
|
||||
}
|
||||
|
||||
/// Modifies the graph so that `from_id` is blocked
|
||||
/// on `database_key`, which is being computed by
|
||||
/// `to_id`.
|
||||
///
|
||||
/// For this to be reasonable, the lock on the
|
||||
/// results table for `database_key` must be held.
|
||||
/// This ensures that computing `database_key` doesn't
|
||||
/// complete before `block_on` executes.
|
||||
///
|
||||
/// Preconditions:
|
||||
/// * No path from `to_id` to `from_id`
|
||||
/// (i.e., `me.depends_on(to_id, from_id)` is false)
|
||||
/// * `held_mutex` is a read lock (or stronger) on `database_key`
|
||||
pub(super) fn block_on<QueryMutexGuard>(
|
||||
mut me: MutexGuard<'_, Self>,
|
||||
from_id: RuntimeId,
|
||||
database_key: DatabaseKeyIndex,
|
||||
to_id: RuntimeId,
|
||||
from_stack: QueryStack,
|
||||
query_mutex_guard: QueryMutexGuard,
|
||||
) -> (QueryStack, WaitResult) {
|
||||
let condvar = me.add_edge(from_id, database_key, to_id, from_stack);
|
||||
|
||||
// Release the mutex that prevents `database_key`
|
||||
// from completing, now that the edge has been added.
|
||||
drop(query_mutex_guard);
|
||||
|
||||
loop {
|
||||
if let Some(stack_and_result) = me.wait_results.remove(&from_id) {
|
||||
debug_assert!(!me.edges.contains_key(&from_id));
|
||||
return stack_and_result;
|
||||
}
|
||||
condvar.wait(&mut me);
|
||||
}
|
||||
}
|
||||
|
||||
/// Helper for `block_on`: performs actual graph modification
|
||||
/// to add a dependency edge from `from_id` to `to_id`, which is
|
||||
/// computing `database_key`.
|
||||
fn add_edge(
|
||||
&mut self,
|
||||
from_id: RuntimeId,
|
||||
database_key: DatabaseKeyIndex,
|
||||
to_id: RuntimeId,
|
||||
from_stack: QueryStack,
|
||||
) -> Arc<parking_lot::Condvar> {
|
||||
assert_ne!(from_id, to_id);
|
||||
debug_assert!(!self.edges.contains_key(&from_id));
|
||||
debug_assert!(!self.depends_on(to_id, from_id));
|
||||
|
||||
let condvar = Arc::new(Condvar::new());
|
||||
self.edges.insert(
|
||||
from_id,
|
||||
Edge {
|
||||
blocked_on_id: to_id,
|
||||
blocked_on_key: database_key,
|
||||
stack: from_stack,
|
||||
condvar: condvar.clone(),
|
||||
},
|
||||
);
|
||||
self.query_dependents
|
||||
.entry(database_key)
|
||||
.or_default()
|
||||
.push(from_id);
|
||||
condvar
|
||||
}
|
||||
|
||||
/// Invoked when runtime `to_id` completes executing
|
||||
/// `database_key`.
|
||||
pub(super) fn unblock_runtimes_blocked_on(
|
||||
&mut self,
|
||||
database_key: DatabaseKeyIndex,
|
||||
wait_result: WaitResult,
|
||||
) {
|
||||
let dependents = self
|
||||
.query_dependents
|
||||
.remove(&database_key)
|
||||
.unwrap_or_default();
|
||||
|
||||
for from_id in dependents {
|
||||
self.unblock_runtime(from_id, wait_result.clone());
|
||||
}
|
||||
}
|
||||
|
||||
/// Unblock the runtime with the given id with the given wait-result.
|
||||
/// This will cause it resume execution (though it will have to grab
|
||||
/// the lock on this data structure first, to recover the wait result).
|
||||
fn unblock_runtime(&mut self, id: RuntimeId, wait_result: WaitResult) {
|
||||
let edge = self.edges.remove(&id).expect("not blocked");
|
||||
self.wait_results.insert(id, (edge.stack, wait_result));
|
||||
|
||||
// Now that we have inserted the `wait_results`,
|
||||
// notify the thread.
|
||||
edge.condvar.notify_one();
|
||||
}
|
||||
}
|
232
crates/salsa/src/runtime/local_state.rs
Normal file
232
crates/salsa/src/runtime/local_state.rs
Normal file
|
@ -0,0 +1,232 @@
|
|||
use tracing::debug;
|
||||
|
||||
use crate::durability::Durability;
|
||||
use crate::runtime::ActiveQuery;
|
||||
use crate::runtime::Revision;
|
||||
use crate::Cycle;
|
||||
use crate::DatabaseKeyIndex;
|
||||
use std::cell::RefCell;
|
||||
use triomphe::Arc;
|
||||
|
||||
/// State that is specific to a single execution thread.
|
||||
///
|
||||
/// Internally, this type uses ref-cells.
|
||||
///
|
||||
/// **Note also that all mutations to the database handle (and hence
|
||||
/// to the local-state) must be undone during unwinding.**
|
||||
pub(super) struct LocalState {
|
||||
/// Vector of active queries.
|
||||
///
|
||||
/// This is normally `Some`, but it is set to `None`
|
||||
/// while the query is blocked waiting for a result.
|
||||
///
|
||||
/// Unwinding note: pushes onto this vector must be popped -- even
|
||||
/// during unwinding.
|
||||
query_stack: RefCell<Option<Vec<ActiveQuery>>>,
|
||||
}
|
||||
|
||||
/// Summarizes "all the inputs that a query used"
|
||||
#[derive(Debug, Clone)]
|
||||
pub(crate) struct QueryRevisions {
|
||||
/// The most revision in which some input changed.
|
||||
pub(crate) changed_at: Revision,
|
||||
|
||||
/// Minimum durability of the inputs to this query.
|
||||
pub(crate) durability: Durability,
|
||||
|
||||
/// The inputs that went into our query, if we are tracking them.
|
||||
pub(crate) inputs: QueryInputs,
|
||||
}
|
||||
|
||||
/// Every input.
|
||||
#[derive(Debug, Clone)]
|
||||
pub(crate) enum QueryInputs {
|
||||
/// Non-empty set of inputs, fully known
|
||||
Tracked { inputs: Arc<[DatabaseKeyIndex]> },
|
||||
|
||||
/// Empty set of inputs, fully known.
|
||||
NoInputs,
|
||||
|
||||
/// Unknown quantity of inputs
|
||||
Untracked,
|
||||
}
|
||||
|
||||
impl Default for LocalState {
|
||||
fn default() -> Self {
|
||||
LocalState {
|
||||
query_stack: RefCell::new(Some(Vec::new())),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl LocalState {
|
||||
#[inline]
|
||||
pub(super) fn push_query(&self, database_key_index: DatabaseKeyIndex) -> ActiveQueryGuard<'_> {
|
||||
let mut query_stack = self.query_stack.borrow_mut();
|
||||
let query_stack = query_stack.as_mut().expect("local stack taken");
|
||||
query_stack.push(ActiveQuery::new(database_key_index));
|
||||
ActiveQueryGuard {
|
||||
local_state: self,
|
||||
database_key_index,
|
||||
push_len: query_stack.len(),
|
||||
}
|
||||
}
|
||||
|
||||
fn with_query_stack<R>(&self, c: impl FnOnce(&mut Vec<ActiveQuery>) -> R) -> R {
|
||||
c(self
|
||||
.query_stack
|
||||
.borrow_mut()
|
||||
.as_mut()
|
||||
.expect("query stack taken"))
|
||||
}
|
||||
|
||||
pub(super) fn query_in_progress(&self) -> bool {
|
||||
self.with_query_stack(|stack| !stack.is_empty())
|
||||
}
|
||||
|
||||
pub(super) fn active_query(&self) -> Option<DatabaseKeyIndex> {
|
||||
self.with_query_stack(|stack| {
|
||||
stack
|
||||
.last()
|
||||
.map(|active_query| active_query.database_key_index)
|
||||
})
|
||||
}
|
||||
|
||||
pub(super) fn report_query_read_and_unwind_if_cycle_resulted(
|
||||
&self,
|
||||
input: DatabaseKeyIndex,
|
||||
durability: Durability,
|
||||
changed_at: Revision,
|
||||
) {
|
||||
debug!(
|
||||
"report_query_read_and_unwind_if_cycle_resulted(input={:?}, durability={:?}, changed_at={:?})",
|
||||
input, durability, changed_at
|
||||
);
|
||||
self.with_query_stack(|stack| {
|
||||
if let Some(top_query) = stack.last_mut() {
|
||||
top_query.add_read(input, durability, changed_at);
|
||||
|
||||
// We are a cycle participant:
|
||||
//
|
||||
// C0 --> ... --> Ci --> Ci+1 -> ... -> Cn --> C0
|
||||
// ^ ^
|
||||
// : |
|
||||
// This edge -----+ |
|
||||
// |
|
||||
// |
|
||||
// N0
|
||||
//
|
||||
// In this case, the value we have just read from `Ci+1`
|
||||
// is actually the cycle fallback value and not especially
|
||||
// interesting. We unwind now with `CycleParticipant` to avoid
|
||||
// executing the rest of our query function. This unwinding
|
||||
// will be caught and our own fallback value will be used.
|
||||
//
|
||||
// Note that `Ci+1` may` have *other* callers who are not
|
||||
// participants in the cycle (e.g., N0 in the graph above).
|
||||
// They will not have the `cycle` marker set in their
|
||||
// stack frames, so they will just read the fallback value
|
||||
// from `Ci+1` and continue on their merry way.
|
||||
if let Some(cycle) = &top_query.cycle {
|
||||
cycle.clone().throw()
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
pub(super) fn report_untracked_read(&self, current_revision: Revision) {
|
||||
self.with_query_stack(|stack| {
|
||||
if let Some(top_query) = stack.last_mut() {
|
||||
top_query.add_untracked_read(current_revision);
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Update the top query on the stack to act as though it read a value
|
||||
/// of durability `durability` which changed in `revision`.
|
||||
pub(super) fn report_synthetic_read(&self, durability: Durability, revision: Revision) {
|
||||
self.with_query_stack(|stack| {
|
||||
if let Some(top_query) = stack.last_mut() {
|
||||
top_query.add_synthetic_read(durability, revision);
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Takes the query stack and returns it. This is used when
|
||||
/// the current thread is blocking. The stack must be restored
|
||||
/// with [`Self::restore_query_stack`] when the thread unblocks.
|
||||
pub(super) fn take_query_stack(&self) -> Vec<ActiveQuery> {
|
||||
assert!(
|
||||
self.query_stack.borrow().is_some(),
|
||||
"query stack already taken"
|
||||
);
|
||||
self.query_stack.take().unwrap()
|
||||
}
|
||||
|
||||
/// Restores a query stack taken with [`Self::take_query_stack`] once
|
||||
/// the thread unblocks.
|
||||
pub(super) fn restore_query_stack(&self, stack: Vec<ActiveQuery>) {
|
||||
assert!(self.query_stack.borrow().is_none(), "query stack not taken");
|
||||
self.query_stack.replace(Some(stack));
|
||||
}
|
||||
}
|
||||
|
||||
impl std::panic::RefUnwindSafe for LocalState {}
|
||||
|
||||
/// When a query is pushed onto the `active_query` stack, this guard
|
||||
/// is returned to represent its slot. The guard can be used to pop
|
||||
/// the query from the stack -- in the case of unwinding, the guard's
|
||||
/// destructor will also remove the query.
|
||||
pub(crate) struct ActiveQueryGuard<'me> {
|
||||
local_state: &'me LocalState,
|
||||
push_len: usize,
|
||||
database_key_index: DatabaseKeyIndex,
|
||||
}
|
||||
|
||||
impl ActiveQueryGuard<'_> {
|
||||
fn pop_helper(&self) -> ActiveQuery {
|
||||
self.local_state.with_query_stack(|stack| {
|
||||
// Sanity check: pushes and pops should be balanced.
|
||||
assert_eq!(stack.len(), self.push_len);
|
||||
debug_assert_eq!(
|
||||
stack.last().unwrap().database_key_index,
|
||||
self.database_key_index
|
||||
);
|
||||
stack.pop().unwrap()
|
||||
})
|
||||
}
|
||||
|
||||
/// Invoked when the query has successfully completed execution.
|
||||
pub(super) fn complete(self) -> ActiveQuery {
|
||||
let query = self.pop_helper();
|
||||
std::mem::forget(self);
|
||||
query
|
||||
}
|
||||
|
||||
/// Pops an active query from the stack. Returns the [`QueryRevisions`]
|
||||
/// which summarizes the other queries that were accessed during this
|
||||
/// query's execution.
|
||||
#[inline]
|
||||
pub(crate) fn pop(self) -> QueryRevisions {
|
||||
// Extract accumulated inputs.
|
||||
let popped_query = self.complete();
|
||||
|
||||
// If this frame were a cycle participant, it would have unwound.
|
||||
assert!(popped_query.cycle.is_none());
|
||||
|
||||
popped_query.revisions()
|
||||
}
|
||||
|
||||
/// If the active query is registered as a cycle participant, remove and
|
||||
/// return that cycle.
|
||||
pub(crate) fn take_cycle(&self) -> Option<Cycle> {
|
||||
self.local_state
|
||||
.with_query_stack(|stack| stack.last_mut()?.cycle.take())
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for ActiveQueryGuard<'_> {
|
||||
fn drop(&mut self) {
|
||||
self.pop_helper();
|
||||
}
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue