Merge pull request #501 from mbrobbel/remove-old-salsa

Remove the old Salsa, rename `salsa-2022` crate to `salsa`
This commit is contained in:
Niko Matsakis 2024-06-19 09:45:11 +00:00 committed by GitHub
commit 38a44eef87
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
269 changed files with 2327 additions and 11485 deletions

View file

@ -3,15 +3,15 @@ name: Test
on:
push:
branches:
- master
- staging
- trying
- master
- staging
- trying
pull_request:
paths:
- '**.rs'
- '**/Cargo.*'
- '.github/workflows/**.yml'
- 'salsa-2022-tests/tests/compile-fail/**.stderr'
- "**.rs"
- "**/Cargo.*"
- ".github/workflows/**.yml"
- "tests/compile-fail/**.stderr"
merge_group:
jobs:
@ -20,52 +20,52 @@ jobs:
strategy:
matrix:
rust:
- stable
- beta
- stable
- beta
experimental:
- false
- false
include:
- rust: nightly
experimental: true
- rust: nightly
experimental: true
continue-on-error: ${{ matrix.experimental }}
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Setup Rust toolchain
uses: actions-rs/toolchain@v1
with:
toolchain: ${{ matrix.rust }}
components: rustfmt, clippy
default: true
- name: Format
uses: actions-rs/cargo@v1
with:
command: fmt
args: -- --check
- name: Clippy
uses: actions-rs/cargo@v1
with:
command: clippy
args: --all-features --all-targets --all
- name: Test
uses: actions-rs/cargo@v1
with:
command: test
args: --all-features --all-targets --all
- name: Test docs
uses: actions-rs/cargo@v1
with:
command: test
args: --doc
- name: Check (without default features)
uses: actions-rs/cargo@v1
with:
command: check
args: --no-default-features
- name: Checkout
uses: actions/checkout@v2
- name: Setup Rust toolchain
uses: actions-rs/toolchain@v1
with:
toolchain: ${{ matrix.rust }}
components: rustfmt, clippy
default: true
- name: Format
uses: actions-rs/cargo@v1
with:
command: fmt
args: -- --check
- name: Clippy
uses: actions-rs/cargo@v1
with:
command: clippy
args: --all-features --all-targets --workspace
- name: Test
uses: actions-rs/cargo@v1
with:
command: test
args: --all-features --all-targets --workspace
- name: Test docs
uses: actions-rs/cargo@v1
with:
command: test
args: --doc
- name: Check (without default features)
uses: actions-rs/cargo@v1
with:
command: check
args: --no-default-features
miri:
name: "Miri"
name: Miri
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
@ -75,4 +75,7 @@ jobs:
rustup override set nightly
cargo miri setup
- name: Test with Miri
run: cargo miri test --no-fail-fast -p salsa-2022 -p salsa-2022-tests -p calc -p lazy-input
run: cargo miri test --no-fail-fast
- name: Run examples with Miri
run: |
cargo miri run --example calc

View file

@ -1,41 +1,36 @@
[package]
name = "salsa"
version = "0.17.0-pre.2" # CHECK salsa-macros version
version = "0.18.0"
authors = ["Salsa developers"]
edition = "2018"
edition = "2021"
license = "Apache-2.0 OR MIT"
repository = "https://github.com/salsa-rs/salsa"
description = "A generic framework for on-demand, incrementalized computation (experimental)"
[dependencies]
arc-swap = "1.4.0"
arc-swap = "1.6.0"
crossbeam = "0.8.1"
crossbeam-utils = { version = "0.8", default-features = false }
dashmap = "5.3.4"
hashlink = "0.8.0"
indexmap = "2"
lock_api = "0.4.7"
log = "0.4.5"
parking_lot = "0.12.1"
rustc-hash = "1.0"
rustc-hash = "1.1.0"
salsa-macros = { path = "components/salsa-macros" }
smallvec = "1.0.0"
oorandom = "11"
salsa-macros = { version = "0.17.0-pre.2", path = "components/salsa-macros" }
[dev-dependencies]
diff = "0.1.0"
env_logger = "0.9"
linked-hash-map = "0.5.2"
rand = "0.8"
rand_distr = "0.4.3"
derive-new = "0.5.9"
env_logger = "*"
expect-test = "1.4.0"
eyre = "0.6.8"
notify-debouncer-mini = "0.2.1"
ordered-float = "3.0"
parking_lot = "0.12.1"
rustversion = "1.0"
test-log = "0.2.11"
insta = "1.8.0"
trybuild = "1.0"
[workspace]
members = [
"components/salsa-macros",
"components/salsa-2022",
"components/salsa-2022-macros",
"examples-2022/calc",
"examples-2022/lazy-input",
"salsa-2022-tests",
]
members = ["components/salsa-macros"]

View file

@ -1,2 +0,0 @@
# Redirects from what the browser requests to what we serve
/ /salsa2022

View file

@ -14,33 +14,6 @@ curl -L https://github.com/Michael-F-Bryan/mdbook-linkcheck/releases/download/v$
unzip mdbook-linkcheck.v$MDBOOK_LINKCHECK_VERSION.x86_64-unknown-linux-gnu.zip -d ~/.cargo/bin
chmod +x ~/.cargo/bin/mdbook-linkcheck
# ======================================================================
# The following script automates the deployment of both the latest and a
# specified older version of the 'salsa' documentation using mdbook
# Store the current branch or commit
original_branch=$(git rev-parse --abbrev-ref HEAD)
if [ "$original_branch" == "HEAD" ]; then
original_branch=$(git rev-parse HEAD)
fi
mkdir -p versions # Create a root directory for all versions
# Declare an associative array to map commits to custom version directory names
declare -A commit_to_version=( ["$original_branch"]="salsa2022" ["754eea8b5f8a31b1100ba313d59e41260b494225"]="salsa" )
# Loop over the keys (commit hashes or branch names) in the associative array
for commit in "${!commit_to_version[@]}"; do
git checkout $commit
mdbook build
version_dir="versions/${commit_to_version[$commit]}"
mkdir -p $version_dir
mv book/html/* $version_dir
rm -rf book
done
# Return to the original branch or commit
git checkout $original_branch
# Copy _redirects to the root directory
cp _redirects versions
mdbook build
mkdir versions
mv book/html/* versions

View file

@ -19,7 +19,6 @@
- [Durability](./reference/durability.md)
- [Algorithm](./reference/algorithm.md)
- [Common patterns](./common_patterns.md)
- [Selection](./common_patterns/selection.md)
- [On-demand (Lazy) inputs](./common_patterns/on_demand_inputs.md)
- [Tuning](./tuning.md)
- [Cycle handling](./cycles.md)

View file

@ -1,5 +0,0 @@
> ⚠️ **IN-PROGRESS VERSION OF SALSA.** ⚠️
>
> This page describes the unreleased "Salsa 2022" version, which is a major departure from older versions of salsa. The code here works but is only available on github and from the `salsa-2022` crate.
>
> If you are looking for the older version of salsa, simply visit [this link](https://salsa-rs.netlify.app/salsa)

View file

@ -1,7 +1,5 @@
# On-Demand (Lazy) Inputs
{{#include ../caveat.md}}
Salsa inputs work best if you can easily provide all of the inputs upfront.
However sometimes the set of inputs is not known beforehand.
@ -15,12 +13,12 @@ That is, when a query requests the text of a file for the first time:
This is possible to achieve in salsa, by caching the inputs in your database structs and adding a method to the database trait to retrieve them out of this cache.
A complete, runnable file-watching example can be found in [the lazy-input example](https://github.com/salsa-rs/salsa/tree/master/examples-2022/lazy-input).
A complete, runnable file-watching example can be found in [the lazy-input example](https://github.com/salsa-rs/salsa/tree/master/examples/lazy-input).
The setup looks roughly like this:
```rust,ignore
{{#include ../../../examples-2022/lazy-input/src/main.rs:db}}
{{#include ../../../examples/lazy-input/main.rs:db}}
```
- We declare a method on the `Db` trait that gives us a `File` input on-demand (it only requires a `&dyn Db` not a `&mut dyn Db`).
@ -33,5 +31,5 @@ Here we implement a simple driving loop, that recompiles the code whenever a fil
You can use the logs to check that only the queries that could have changed are re-evaluated.
```rust,ignore
{{#include ../../../examples-2022/lazy-input/src/main.rs:main}}
{{#include ../../../examples/lazy-input/main.rs:main}}
```

View file

@ -1,78 +0,0 @@
# Selection
The "selection" (or "firewall") pattern is when you have a query Qsel that reads from some
other Qbase and extracts some small bit of information from Qbase that it returns.
In particular, Qsel does not combine values from other queries. In some sense,
then, Qsel is redundant -- you could have just extracted the information
the information from Qbase yourself, and done without the salsa machinery. But
Qsel serves a role in that it limits the amount of re-execution that is required
when Qbase changes.
## Example: the base query
For example, imagine that you have a query `parse` that parses the input text of a request
and returns a `ParsedResult`, which contains a header and a body:
```rust,ignore
{{#include ../../../examples/selection/main.rs:request}}
```
## Example: a selecting query
And now you have a number of derived queries that only look at the header.
For example, one might extract the "content-type' header:
```rust,ignore
{{#include ../../../examples/selection/util1.rs:util1}}
```
## Why prefer a selecting query?
This `content_type` query is an instance of the *selection* pattern. It only
"selects" a small bit of information from the `ParsedResult`. You might not have
made it a query at all, but instead made it a method on `ParsedResult`.
But using a query for `content_type` has an advantage: now if there are downstream
queries that only depend on the `content_type` (or perhaps on other headers extracted
via a similar pattern), those queries will not have to be re-executed when the request
changes *unless* the content-type header changes. Consider the dependency graph:
```text
request_text --> parse --> content_type --> (other queries)
```
When the `request_text` changes, we are always going to have to re-execute `parse`.
If that produces a new parsed result, we are *also* going to re-execute `content_type`.
But if the result of `content_type` has not changed, then we will *not* re-execute
the other queries.
## More levels of selection
In fact, in our example we might consider introducing another level of selection.
Instead of having `content_type` directly access the results of `parse`, it might be better
to insert a selecting query that just extracts the header:
```rust,ignore
{{#include ../../../examples/selection/util2.rs:util2}}
```
This will result in a dependency graph like so:
```text
request_text --> parse --> header --> content_type --> (other queries)
```
The advantage of this is that changes that only effect the "body" or
only consume small parts of the request will
not require us to re-execute `content_type` at all. This would be particularly
valuable if there are a lot of dependent headers.
## A note on cloning and efficiency
In this example, we used common Rust types like `Vec` and `String`,
and we cloned them quite frequently. This will work just fine in Salsa,
but it may not be the most efficient choice. This is because each clone
is going to produce a deep copy of the result. As a simple fix, you
might convert your data structures to use `Arc` (e.g., `Arc<Vec<ParsedHeader>>`),
which makes cloning cheap.

View file

@ -1,7 +1,5 @@
# Salsa overview
{{#include caveat.md}}
This page contains a brief overview of the pieces of a Salsa program.
For a more detailed look, check out the [tutorial](./tutorial.md), which walks through the creation of an entire project end-to-end.
@ -154,7 +152,7 @@ Tracked functions can return any clone-able type. A clone is required since, whe
**Tracked structs** are intermediate structs created during your computation.
Like inputs, their fields are stored inside the database, and the struct itself just wraps an id.
Unlike inputs, they can only be created inside a tracked function, and their fields can never change once they are created (until the next revision, at least).
Getter methods are provided to read the fields, but there are no setter methods.
Getter methods are provided to read the fields, but there are no setter methods.
Example:
```rust

View file

@ -1,7 +1,5 @@
# Plumbing
{{#include caveat.md}}
This chapter documents the code that salsa generates and its "inner workings".
We refer to this as the "plumbing".
@ -9,11 +7,11 @@ We refer to this as the "plumbing".
The plumbing section is broken up into chapters:
* The [jars and ingredients](./plumbing/jars_and_ingredients.md) covers how each salsa item (like a tracked function) specifies what data it needs and runtime, and how links between items work.
* The [database and runtime](./plumbing/database_and_runtime.md) covers the data structures that are used at runtime to coordinate workers, trigger cancellation, track which functions are active and what dependencies they have accrued, and so forth.
* The [query operations](./plumbing/query_ops.md) chapter describes how the major operations on function ingredients work. This text was written for an older version of salsa but the logic is the same:
* The [maybe changed after](./plumbing/maybe_changed_after.md) operation determines when a memoized value for a tracked function is out of date.
* The [fetch](./plumbing/fetch.md) operation computes the most recent value.
* The [derived queries flowchart](./plumbing/derived_flowchart.md) depicts the logic in flowchart form.
* The [cycle handling](./plumbing/cycles.md) handling chapter describes what happens when cycles occur.
* The [terminology](./plumbing/terminology.md) section describes various words that appear throughout.
- The [jars and ingredients](./plumbing/jars_and_ingredients.md) covers how each salsa item (like a tracked function) specifies what data it needs and runtime, and how links between items work.
- The [database and runtime](./plumbing/database_and_runtime.md) covers the data structures that are used at runtime to coordinate workers, trigger cancellation, track which functions are active and what dependencies they have accrued, and so forth.
- The [query operations](./plumbing/query_ops.md) chapter describes how the major operations on function ingredients work. This text was written for an older version of salsa but the logic is the same:
- The [maybe changed after](./plumbing/maybe_changed_after.md) operation determines when a memoized value for a tracked function is out of date.
- The [fetch](./plumbing/fetch.md) operation computes the most recent value.
- The [derived queries flowchart](./plumbing/derived_flowchart.md) depicts the logic in flowchart form.
- The [cycle handling](./plumbing/cycles.md) handling chapter describes what happens when cycles occur.
- The [terminology](./plumbing/terminology.md) section describes various words that appear throughout.

View file

@ -13,8 +13,8 @@ struct MyDatabase {
This data is divided into two categories:
* Salsa-governed storage, contained in the `Storage<Self>` field. This data is mandatory.
* Other fields (like `maybe_other_fields`) defined by the user. This can be anything. This allows for you to give access to special resources or whatever.
- Salsa-governed storage, contained in the `Storage<Self>` field. This data is mandatory.
- Other fields (like `maybe_other_fields`) defined by the user. This can be anything. This allows for you to give access to special resources or whatever.
## Parallel handles
@ -28,10 +28,10 @@ The `Snapshot` method returns a `Snapshot<DB>` type, which prevents these clones
The salsa `Storage` struct contains all the data that salsa itself will use and work with.
There are three key bits of data:
* The `Shared` struct, which contains the data stored across all snapshots. This is primarily the ingredients described in the [jars and ingredients chapter](./jars_and_ingredients.md), but it also contains some synchronization information (a cond var). This is used for cancellation, as described below.
* The data in the `Shared` struct is only shared across threads when other threads are active. Some operations, like mutating an input, require an `&mut` handle to the `Shared` struct. This is obtained by using the `Arc::get_mut` methods; obviously this is only possible when all snapshots and threads have ceased executing, since there must be a single handle to the `Arc`.
* The `Routes` struct, which contains the information to find any particular ingredient -- this is also shared across all handles, and its construction is also described in the [jars and ingredients chapter](./jars_and_ingredients.md). The routes are separated out from the `Shared` struct because they are truly immutable at all times, and we want to be able to hold a handle to them while getting `&mut` access to the `Shared` struct.
* The `Runtime` struct, which is specific to a particular database instance. It contains the data for a single active thread, along with some links to shared data of its own.
- The `Shared` struct, which contains the data stored across all snapshots. This is primarily the ingredients described in the [jars and ingredients chapter](./jars_and_ingredients.md), but it also contains some synchronization information (a cond var). This is used for cancellation, as described below.
- The data in the `Shared` struct is only shared across threads when other threads are active. Some operations, like mutating an input, require an `&mut` handle to the `Shared` struct. This is obtained by using the `Arc::get_mut` methods; obviously this is only possible when all snapshots and threads have ceased executing, since there must be a single handle to the `Arc`.
- The `Routes` struct, which contains the information to find any particular ingredient -- this is also shared across all handles, and its construction is also described in the [jars and ingredients chapter](./jars_and_ingredients.md). The routes are separated out from the `Shared` struct because they are truly immutable at all times, and we want to be able to hold a handle to them while getting `&mut` access to the `Shared` struct.
- The `Runtime` struct, which is specific to a particular database instance. It contains the data for a single active thread, along with some links to shared data of its own.
## Incrementing the revision counter and getting mutable access to the jars
@ -43,20 +43,20 @@ Each of the snapshots however onlys another handle on the `Arc` in `Storage` tha
Whenever the user attempts to do an `&mut`-operation, such as modifying an input field, that needs to
first cancel any parallel snapshots and wait for those parallel threads to finish.
Once the snapshots have completed, we can use `Arc::get_mut` to get an `&mut` reference to the ingredient data.
This allows us to get `&mut` access without any unsafe code and
This allows us to get `&mut` access without any unsafe code and
guarantees that we have successfully managed to cancel the other worker threads
(or gotten ourselves into a deadlock).
The code to acquire `&mut` access to the database is the `jars_mut` method:
```rust
{{#include ../../../components/salsa-2022/src/storage.rs:jars_mut}}
{{#include ../../../src/storage.rs:jars_mut}}
```
The key initial point is that it invokes `cancel_other_workers` before proceeding:
```rust
{{#include ../../../components/salsa-2022/src/storage.rs:cancel_other_workers}}
{{#include ../../../src/storage.rs:cancel_other_workers}}
```
## The Salsa runtime
@ -68,5 +68,3 @@ It also tracks the current revision and information about when values with low o
Basically, the ingredient structures store the "data at rest" -- like memoized values -- and things that are "per ingredient".
The runtime stores the "active, in-progress" data, such as which queries are on the stack, and/or the dependencies accessed by the currently active query.

View file

@ -1,10 +1,8 @@
# Jars and ingredients
{{#include ../caveat.md}}
This page covers how data is organized in Salsa and how links between Salsa items (e.g., dependency tracking) work.
## Salsa items and ingredients
## Salsa items and ingredients
A **Salsa item** is some item annotated with a Salsa annotation that can be included in a jar.
For example, a tracked function is a Salsa item:
@ -117,7 +115,7 @@ struct MyDatabase {
...the `salsa::db` macro would generate a `HasJars` impl that (among other things) contains `type Jars = (Jar1, ..., JarN)`:
```rust,ignore
{{#include ../../../components/salsa-2022-macros/src/db.rs:HasJars}}
{{#include ../../../components/salsa-macros/src/db.rs:HasJars}}
```
In turn, the `salsa::Storage<DB>` type ultimately contains a struct `Shared` that embeds `DB::Jars`, thus embedding all the data for each jar.
@ -131,7 +129,7 @@ This is a 32-bit number that identifies a particular ingredient from a particula
### Routes
In addition to an index, each ingredient in the database also has a corresponding *route*.
In addition to an index, each ingredient in the database also has a corresponding _route_.
A route is a closure that, given a reference to the `DB::Jars` tuple,
returns a `&dyn Ingredient<DB>` reference.
The route table allows us to go from the `IngredientIndex` for a particular ingredient
@ -145,7 +143,7 @@ A `DatabaseKeyIndex` identifies a specific value stored in some specific ingredi
It combines an [`IngredientIndex`] with a `key_index`, which is a `salsa::Id`:
```rust,ignore
{{#include ../../../components/salsa-2022/src/key.rs:DatabaseKeyIndex}}
{{#include ../../../src/key.rs:DatabaseKeyIndex}}
```
A `DependencyIndex` is similar, but the `key_index` is optional.
@ -153,11 +151,11 @@ This is used when we sometimes wish to refer to the ingredient as a whole, and n
These kinds of indices are used to store connetions between ingredients.
For example, each memoized value has to track its inputs.
Those inputs are stored as dependency indices.
Those inputs are stored as dependency indices.
We can then do things like ask, "did this input change since revision R?" by
* using the ingredient index to find the route and get a `&dyn Ingredient<DB>`
* and then invoking the `maybe_changed_since` method on that trait object.
- using the ingredient index to find the route and get a `&dyn Ingredient<DB>`
- and then invoking the `maybe_changed_since` method on that trait object.
### `HasJarsDyn`
@ -166,23 +164,23 @@ The user's code always interacts with a `dyn crate::Db` value, where `crate::Db`
Ideally, we would have `salsa::Database` extend `salsa::HasJars`, which is the main trait that gives access to the jars data.
But we don't want to do that because `HasJars` defines an associated type `Jars`, and that would mean that every reference to `dyn crate::Db` would have to specify the jars type using something like `dyn crate::Db<Jars = J>`.
This would be unergonomic, but what's worse, it would actually be impossible: the final Jars type combines the jars from multiple crates, and so it is not known to any individual jar crate.
To workaround this, `salsa::Database` in fact extends *another* trait, `HasJarsDyn`, that doesn't reveal the `Jars` or ingredient types directly, but just has various method that can be performed on an ingredient, given its `IngredientIndex`.
To workaround this, `salsa::Database` in fact extends _another_ trait, `HasJarsDyn`, that doesn't reveal the `Jars` or ingredient types directly, but just has various method that can be performed on an ingredient, given its `IngredientIndex`.
Traits like `Ingredient<DB>` require knowing the full `DB` type.
If we had one function ingredient directly invoke a method on `Ingredient<DB>`, that would imply that it has to be fully generic and only instantiated at the final crate, when the full database type is available.
We solve this via the `HasJarsDyn` trait. The `HasJarsDyn` trait exports a method that combines the "find ingredient, invoking method" steps into one method:
```rust,ignore aasaaasdfijjAasdfa
{{#include ../../../components/salsa-2022/src/storage.rs:HasJarsDyn}}
{{#include ../../../src/storage.rs:HasJarsDyn}}
```
So, technically, to check if an input has changed, an ingredient:
* Invokes `HasJarsDyn::maybe_changed_after` on the `dyn Database`
* The impl for this method (generated by `#[salsa::db]`):
* gets the route for the ingredient from the ingredient index
* uses the route to get a `&dyn Ingredient`
* invokes `maybe_changed_after` on that ingredient
- Invokes `HasJarsDyn::maybe_changed_after` on the `dyn Database`
- The impl for this method (generated by `#[salsa::db]`):
- gets the route for the ingredient from the ingredient index
- uses the route to get a `&dyn Ingredient`
- invokes `maybe_changed_after` on that ingredient
### Initializing the database
@ -190,7 +188,7 @@ The last thing to dicsuss is how the database is initialized.
The `Default` implementation for `Storage<DB>` does the work:
```rust,ignore
{{#include ../../../components/salsa-2022/src/storage.rs:default}}
{{#include ../../../src/storage.rs:default}}
```
First, it creates an empty `Routes` instance.
@ -198,16 +196,16 @@ Then it invokes the `DB::create_jars` method.
The implementation of this method is defined by the `#[salsa::db]` macro; it invokes `salsa::plumbing::create_jars_inplace` to allocate memory for the jars, and then invokes the `Jar::init_jar` method on each of the jars to initialize them:
```rust,ignore
{{#include ../../../components/salsa-2022-macros/src/db.rs:create_jars}}
{{#include ../../../components/salsa-macros/src/db.rs:create_jars}}
```
This implementation for `init_jar` is generated by the `#[salsa::jar]` macro, and simply walks over the representative type for each salsa item and asks *it* to create its ingredients
This implementation for `init_jar` is generated by the `#[salsa::jar]` macro, and simply walks over the representative type for each salsa item and asks _it_ to create its ingredients
```rust,ignore
{{#include ../../../components/salsa-2022-macros/src/jar.rs:init_jar}}
{{#include ../../../components/salsa-macros/src/jar.rs:init_jar}}
```
The code to create the ingredients for any particular item is generated by their associated macros (e.g., `#[salsa::tracked]`, `#[salsa::input]`), but it always follows a particular structure.
To create an ingredient, we first invoke `Routes::push`, which creates the routes to that ingredient and assigns it an `IngredientIndex`.
We can then invoke a function such as `FunctionIngredient::new` to create the structure.
The *routes* to an ingredient are defined as closures that, given the `DB::Jars`, can find the data for a particular ingredient.
The _routes_ to an ingredient are defined as closures that, given the `DB::Jars`, can find the data for a particular ingredient.

View file

@ -20,11 +20,11 @@ contains both the field values but also the revisions when they last changed val
## Each tracked struct has a globally unique id
This will begin by creating a *globally unique, 32-bit id* for the tracked struct. It is created by interning a combination of
This will begin by creating a _globally unique, 32-bit id_ for the tracked struct. It is created by interning a combination of
* the currently executing query;
* a u64 hash of the `#[id]` fields;
* a *disambiguator* that makes this hash unique within the current query. i.e., when a query starts executing, it creates an empty map, and the first time a tracked struct with a given hash is created, it gets disambiguator 0. The next one will be given 1, etc.
- the currently executing query;
- a u64 hash of the `#[id]` fields;
- a _disambiguator_ that makes this hash unique within the current query. i.e., when a query starts executing, it creates an empty map, and the first time a tracked struct with a given hash is created, it gets disambiguator 0. The next one will be given 1, etc.
## Each tracked struct has a `ValueStruct` storing its data
@ -32,10 +32,10 @@ The struct and field ingredients share access to a hashmap that maps
each field id to a value struct:
```rust,ignore
{{#include ../../../components/salsa-2022/src/tracked_struct.rs:ValueStruct}}
{{#include ../../../src/tracked_struct.rs:ValueStruct}}
```
The value struct stores the values of the fields but also the revisions when
The value struct stores the values of the fields but also the revisions when
that field last changed. Each time the struct is recreated in a new revision,
the old and new values for its fields are compared and a new revision is created.
@ -46,5 +46,5 @@ but also various important operations such as extracting the hashable id fields
and updating the "revisions" to track when a field last changed:
```rust,ignore
{{#include ../../../components/salsa-2022/src/tracked_struct.rs:Configuration}}
{{#include ../../../src/tracked_struct.rs:Configuration}}
```

View file

@ -25,14 +25,9 @@ Interning is especially useful for queries that involve nested,
tree-like data structures.
See:
- The [`compiler` example](https://github.com/salsa-rs/salsa/blob/master/examples/compiler/main.rs),
which uses interning.
## Granularity of Incrementality
See:
- [common patterns: selection](./common_patterns/selection.md) and
- The [`selection` example](https://github.com/salsa-rs/salsa/blob/master/examples/selection/main.rs)
which uses interning.
## Cancellation
@ -45,4 +40,3 @@ salsa won't be able to cancel it automatically. You may wish to check for cancel
by invoking `db.unwind_if_cancelled()`.
For more details on cancellation, see the tests for cancellation behavior in the Salsa repo.

View file

@ -1,7 +1,5 @@
# Tutorial: calc
{{#include caveat.md}}
This tutorial walks through an end-to-end example of using Salsa.
It does not assume you know anything about salsa,
but reading the [overview](./overview.md) first is probably a good idea to get familiar with the basic concepts.

View file

@ -10,7 +10,7 @@ Salsa defines a mechanism for managing this called an **accumulator**.
In our case, we define an accumulator struct called `Diagnostics` in the `ir` module:
```rust
{{#include ../../../examples-2022/calc/src/ir.rs:diagnostic}}
{{#include ../../../examples/calc/ir.rs:diagnostic}}
```
Accumulator structs are always newtype structs with a single field, in this case of type `Diagnostic`.
@ -22,7 +22,7 @@ or any functions that they called
The `Parser::report_error` method contains an example of pushing a diagnostic:
```rust
{{#include ../../../examples-2022/calc/src/parser.rs:report_error}}
{{#include ../../../examples/calc/parser.rs:report_error}}
```
To get the set of diagnostics produced by `parse_errors`, or any other memoized function,

View file

@ -7,10 +7,10 @@ the one which starts up the program, supplies the inputs, and relays the outputs
In `calc`, the database struct is in the [`db`] module, and it looks like this:
[`db`]: https://github.com/salsa-rs/salsa/blob/master/examples-2022/calc/src/db.rs
[`db`]: https://github.com/salsa-rs/salsa/blob/master/examples/calc/db.rs
```rust
{{#include ../../../examples-2022/calc/src/db.rs:db_struct}}
{{#include ../../../examples/calc/db.rs:db_struct}}
```
The `#[salsa::db(...)]` attribute takes a list of all the jars to include.
@ -24,7 +24,7 @@ The `salsa::db` attribute autogenerates a bunch of impls for things like the `sa
In addition to the struct itself, we must add an impl of `salsa::Database`:
```rust
{{#include ../../../examples-2022/calc/src/db.rs:db_impl}}
{{#include ../../../examples/calc/db.rs:db_impl}}
```
## Implementing the `salsa::ParallelDatabase` trait
@ -32,7 +32,7 @@ In addition to the struct itself, we must add an impl of `salsa::Database`:
If you want to permit accessing your database from multiple threads at once, then you also need to implement the `ParallelDatabase` trait:
```rust
{{#include ../../../examples-2022/calc/src/db.rs:par_db_impl}}
{{#include ../../../examples/calc/db.rs:par_db_impl}}
```
## Implementing the traits for each jar

View file

@ -25,7 +25,7 @@ The `DebugWithDb` trait is automatically derived for all `#[input]`, `#[interned
For consistency, it is sometimes useful to have a `DebugWithDb` implementation even for types, like `Op`, that are just ordinary enums. You can do that like so:
```rust
{{#include ../../../examples-2022/calc/src/ir.rs:op_debug_impl}}
{{#include ../../../examples/calc/ir.rs:op_debug_impl}}
```
## Writing the unit test
@ -34,11 +34,11 @@ Now that we have our `DebugWithDb` impls in place, we can write a simple unit te
The `parse_string` function below creates a database, sets the source text, and then invokes the parser:
```rust
{{#include ../../../examples-2022/calc/src/parser.rs:parse_string}}
{{#include ../../../examples/calc/parser.rs:parse_string}}
```
Combined with the [`expect-test`](https://crates.io/crates/expect-test) crate, we can then write unit tests like this one:
```rust
{{#include ../../../examples-2022/calc/src/parser.rs:parse_print}}
{{#include ../../../examples/calc/parser.rs:parse_print}}
```

View file

@ -9,9 +9,9 @@ now we are going to define them for real.
In addition to regular Rust types, we will make use of various **Salsa structs**.
A Salsa struct is a struct that has been annotated with one of the Salsa annotations:
* [`#[salsa::input]`](#input-structs), which designates the "base inputs" to your computation;
* [`#[salsa::tracked]`](#tracked-structs), which designate intermediate values created during your computation;
* [`#[salsa::interned]`](#interned-structs), which designate small values that are easy to compare for equality.
- [`#[salsa::input]`](#input-structs), which designates the "base inputs" to your computation;
- [`#[salsa::tracked]`](#tracked-structs), which designate intermediate values created during your computation;
- [`#[salsa::interned]`](#interned-structs), which designate small values that are easy to compare for equality.
All Salsa structs store the actual values of their fields in the Salsa database.
This permits us to track when the values of those fields change to figure out what work will need to be re-executed.
@ -23,7 +23,7 @@ You must also list the struct in the jar definition itself, or you will get erro
## Input structs
The first thing we will define is our **input**.
The first thing we will define is our **input**.
Every Salsa program has some basic inputs that drive the rest of the computation.
The rest of the program must be some deterministic function of those base inputs,
such that when those inputs change, we can try to efficiently recompute the new result of that function.
@ -31,7 +31,7 @@ such that when those inputs change, we can try to efficiently recompute the new
Inputs are defined as Rust structs with a `#[salsa::input]` annotation:
```rust
{{#include ../../../examples-2022/calc/src/ir.rs:input}}
{{#include ../../../examples/calc/ir.rs:input}}
```
In our compiler, we have just one simple input, the `SourceProgram`, which has a `text` field (the string).
@ -58,12 +58,12 @@ For an input, a `&db` reference is required, along with the values for each fiel
let source = SourceProgram::new(&db, "print 11 + 11".to_string());
```
You can read the value of the field with `source.text(&db)`,
You can read the value of the field with `source.text(&db)`,
and you can set the value of the field with `source.set_text(&mut db, "print 11 * 2".to_string())`.
### Database revisions
Whenever a function takes an `&mut` reference to the database,
Whenever a function takes an `&mut` reference to the database,
that means that it can only be invoked from outside the incrementalized part of your program,
as explained in [the overview](../overview.md#goal-of-salsa).
When you change the value of an input field, that increments a 'revision counter' in the database,
@ -73,12 +73,12 @@ When we talk about a "revision" of the database, we are referring to the state o
### Representing the parsed program
Next we will define a **tracked struct**.
Whereas inputs represent the *start* of a computation, tracked structs represent intermediate values created during your computation.
Whereas inputs represent the _start_ of a computation, tracked structs represent intermediate values created during your computation.
In this case, the parser is going to take in the `SourceProgram` struct that we saw and return a `Program` that represents the fully parsed program:
```rust
{{#include ../../../examples-2022/calc/src/ir.rs:program}}
{{#include ../../../examples/calc/ir.rs:program}}
```
Like with an input, the fields of a tracked struct are also stored in the database.
@ -90,9 +90,9 @@ then subsequent parts of the computation won't need to re-execute.
Apart from the fields being immutable, the API for working with a tracked struct is quite similar to an input:
* You can create a new value by using `new`: e.g., `Program::new(&db, some_statements)`
* You use a getter to read the value of a field, just like with an input (e.g., `my_func.statements(db)` to read the `statements` field).
* In this case, the field is tagged as `#[return_ref]`, which means that the getter will return a `&Vec<Statement>`, instead of cloning the vector.
- You can create a new value by using `new`: e.g., `Program::new(&db, some_statements)`
- You use a getter to read the value of a field, just like with an input (e.g., `my_func.statements(db)` to read the `statements` field).
- In this case, the field is tagged as `#[return_ref]`, which means that the getter will return a `&Vec<Statement>`, instead of cloning the vector.
### The `'db` lifetime
@ -113,18 +113,18 @@ We will also use a tracked struct to represent each function:
The `Function` struct is going to be created by the parser to represent each of the functions defined by the user:
```rust
{{#include ../../../examples-2022/calc/src/ir.rs:functions}}
{{#include ../../../examples/calc/ir.rs:functions}}
```
If we had created some `Function` instance `f`, for example, we might find that `the f.body` field changes
because the user changed the definition of `f`.
This would mean that we have to re-execute those parts of the code that depended on `f.body`
(but not those parts of the code that depended on the body of *other* functions).
(but not those parts of the code that depended on the body of _other_ functions).
Apart from the fields being immutable, the API for working with a tracked struct is quite similar to an input:
* You can create a new value by using `new`: e.g., `Function::new(&db, some_name, some_args, some_body)`
* You use a getter to read the value of a field, just like with an input (e.g., `my_func.args(db)` to read the `args` field).
- You can create a new value by using `new`: e.g., `Function::new(&db, some_name, some_args, some_body)`
- You use a getter to read the value of a field, just like with an input (e.g., `my_func.args(db)` to read the `args` field).
### id fields
@ -136,7 +136,7 @@ For more details, see the [algorithm](../reference/algorithm.md) page of the ref
## Interned structs
The final kind of Salsa struct are *interned structs*.
The final kind of Salsa struct are _interned structs_.
As with input and tracked structs, the data for an interned struct is stored in the database.
Unlike those structs, if you intern the same data twice, you get back the **same integer**.
@ -146,7 +146,7 @@ it's also inefficient to have to compare them for equality via string comparison
Therefore, we define two interned structs, `FunctionId` and `VariableId`, each with a single field that stores the string:
```rust
{{#include ../../../examples-2022/calc/src/ir.rs:interned_ids}}
{{#include ../../../examples/calc/ir.rs:interned_ids}}
```
When you invoke e.g. `FunctionId::new(&db, "my_string".to_string())`, you will get back a `FunctionId` that is just a newtype'd integer.
@ -172,7 +172,7 @@ while an interned value is in active use.
We won't use any special "Salsa structs" for expressions and statements:
```rust
{{#include ../../../examples-2022/calc/src/ir.rs:statements_and_expressions}}
{{#include ../../../examples/calc/ir.rs:statements_and_expressions}}
```
Since statements and expressions are not tracked, this implies that we are only attempting to get incremental re-use at the granularity of functions --

View file

@ -19,24 +19,24 @@ This permits the crates to define private functions and other things that are me
To define a jar struct, you create a tuple struct with the `#[salsa::jar]` annotation:
```rust
{{#include ../../../examples-2022/calc/src/main.rs:jar_struct}}
{{#include ../../../examples/calc/main.rs:jar_struct}}
```
Although it's not required, it's highly recommended to put the `jar` struct at the root of your crate, so that it can be referred to as `crate::Jar`.
All of the other Salsa annotations reference a jar struct, and they all default to the path `crate::Jar`.
All of the other Salsa annotations reference a jar struct, and they all default to the path `crate::Jar`.
If you put the jar somewhere else, you will have to override that default.
## Defining the database trait
The `#[salsa::jar]` annotation also includes a `db = Db` field.
The `#[salsa::jar]` annotation also includes a `db = Db` field.
The value of this field (normally `Db`) is the name of a trait that represents the database.
Salsa programs never refer *directly* to the database; instead, they take a `&dyn Db` argument.
Salsa programs never refer _directly_ to the database; instead, they take a `&dyn Db` argument.
This allows for separate compilation, where you have a database that contains the data for two jars, but those jars don't depend on one another.
The database trait for our `calc` crate is very simple:
```rust
{{#include ../../../examples-2022/calc/src/main.rs:jar_db}}
{{#include ../../../examples/calc/main.rs:jar_db}}
```
When you define a database trait like `Db`, the one thing that is required is that it must have a supertrait `salsa::DbWithJar<Jar>`,
@ -57,7 +57,7 @@ a common choice is to write a blanket impl for any type that implements `DbWithJ
and that's what we do here:
```rust
{{#include ../../../examples-2022/calc/src/main.rs:jar_db_impl}}
{{#include ../../../examples/calc/main.rs:jar_db_impl}}
```
## Summary

View file

@ -17,12 +17,12 @@ We're going to focus only on the Salsa-related aspects.
The starting point for the parser is the `parse_statements` function:
```rust
{{#include ../../../examples-2022/calc/src/parser.rs:parse_statements}}
{{#include ../../../examples/calc/parser.rs:parse_statements}}
```
This function is annotated as `#[salsa::tracked]`.
That means that, when it is called, Salsa will track what inputs it reads as well as what value it returns.
The return value is *memoized*,
The return value is _memoized_,
which means that if you call this function again without changing the inputs,
Salsa will just clone the result rather than re-execute it.
@ -38,11 +38,11 @@ In the case of `parse_statements`, it directly reads `ProgramSource::text`, so i
By choosing which functions to mark as `#[tracked]`, you control how much reuse you get.
In our case, we're opting to mark the outermost parsing function as tracked, but not the inner ones.
This means that if the input changes, we will always re-parse the entire input and re-create the resulting statements and so forth.
We'll see later that this *doesn't* mean we will always re-run the type checker and other parts of the compiler.
We'll see later that this _doesn't_ mean we will always re-run the type checker and other parts of the compiler.
This trade-off makes sense because (a) parsing is very cheap, so the overhead of tracking and enabling finer-grained reuse doesn't pay off
and because (b) since strings are just a big blob-o-bytes without any structure, it's rather hard to identify which parts of the IR need to be reparsed.
Some systems do choose to do more granular reparsing, often by doing a "first pass" over the string to give it a bit of structure,
Some systems do choose to do more granular reparsing, often by doing a "first pass" over the string to give it a bit of structure,
e.g. to identify the functions,
but deferring the parsing of the body of each function until later.
Setting up a scheme like this is relatively easy in Salsa and uses the same principles that we will use later to avoid re-executing the type checker.
@ -63,11 +63,10 @@ It's generally better to structure tracked functions as functions of a single Sa
### The `return_ref` annotation
You may have noticed that `parse_statements` is tagged with `#[salsa::tracked(return_ref)]`.
You may have noticed that `parse_statements` is tagged with `#[salsa::tracked(return_ref)]`.
Ordinarily, when you call a tracked function, the result you get back is cloned out of the database.
The `return_ref` attribute means that a reference into the database is returned instead.
So, when called, `parse_statements` will return an `&Vec<Statement>` rather than cloning the `Vec`.
This is useful as a performance optimization.
(You may recall the `return_ref` annotation from the [ir](./ir.md) section of the tutorial,
(You may recall the `return_ref` annotation from the [ir](./ir.md) section of the tutorial,
where it was placed on struct fields, with roughly the same meaning.)

View file

@ -1,15 +0,0 @@
[package]
name = "salsa-2022-macros"
version = "0.1.0"
edition = "2021"
[lib]
proc-macro = true
[dependencies]
heck = "0.4"
proc-macro2 = "1.0"
quote = "1.0"
eyre = "0.6.5"
syn = { version = "2.0.64", features = ["full", "visit-mut"] }
synstructure = "0.13.1"

View file

@ -1,102 +0,0 @@
//! This crate provides salsa's macros and attributes.
#![recursion_limit = "256"]
extern crate proc_macro;
extern crate proc_macro2;
#[macro_use]
extern crate quote;
use proc_macro::TokenStream;
macro_rules! parse_quote {
($($inp:tt)*) => {
{
let tt = quote!{$($inp)*};
syn::parse2(tt.clone()).unwrap_or_else(|err| {
panic!("failed to parse `{}` at {}:{}:{}: {}", tt, file!(), line!(), column!(), err)
})
}
}
}
macro_rules! parse_quote_spanned {
($($inp:tt)*) => {
{
let tt = quote_spanned!{$($inp)*};
syn::parse2(tt.clone()).unwrap_or_else(|err| {
panic!("failed to parse `{}` at {}:{}:{}: {}", tt, file!(), line!(), column!(), err)
})
}
}
}
/// Convert a single Ident to Literal: useful when &'static str is needed.
pub(crate) fn literal(ident: &proc_macro2::Ident) -> proc_macro2::Literal {
proc_macro2::Literal::string(&ident.to_string())
}
mod accumulator;
mod configuration;
mod db;
mod db_lifetime;
mod debug;
mod debug_with_db;
mod input;
mod interned;
mod jar;
mod options;
mod salsa_struct;
mod tracked;
mod tracked_fn;
mod tracked_struct;
mod update;
mod xform;
#[proc_macro_attribute]
pub fn accumulator(args: TokenStream, input: TokenStream) -> TokenStream {
accumulator::accumulator(args, input)
}
#[proc_macro_attribute]
pub fn jar(args: TokenStream, input: TokenStream) -> TokenStream {
jar::jar(args, input)
}
#[proc_macro_attribute]
pub fn db(args: TokenStream, input: TokenStream) -> TokenStream {
db::db(args, input)
}
#[proc_macro_attribute]
pub fn interned(args: TokenStream, input: TokenStream) -> TokenStream {
interned::interned(args, input)
}
#[proc_macro_attribute]
pub fn input(args: TokenStream, input: TokenStream) -> TokenStream {
input::input(args, input)
}
#[proc_macro_attribute]
pub fn tracked(args: TokenStream, input: TokenStream) -> TokenStream {
tracked::tracked(args, input)
}
#[proc_macro_derive(Update)]
pub fn update(input: TokenStream) -> TokenStream {
let item = syn::parse_macro_input!(input as syn::DeriveInput);
match update::update_derive(item) {
Ok(tokens) => tokens.into(),
Err(err) => err.to_compile_error().into(),
}
}
#[proc_macro_derive(DebugWithDb)]
pub fn debug(input: TokenStream) -> TokenStream {
let item = syn::parse_macro_input!(input as syn::DeriveInput);
match debug_with_db::debug_with_db(item) {
Ok(tokens) => tokens.into(),
Err(err) => err.to_compile_error().into(),
}
}

View file

@ -1,19 +0,0 @@
[package]
name = "salsa-2022"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
crossbeam = "0.8.1"
dashmap = "5.3.4"
rustc-hash = "1.1.0"
indexmap = "2"
hashlink = "0.8.0"
arc-swap = "1.6.0"
crossbeam-utils = { version = "0.8", default-features = false }
log = "0.4.5"
parking_lot = "0.12.1"
smallvec = "1.0.0"
salsa-2022-macros = { path = "../salsa-2022-macros" }

View file

@ -1,264 +0,0 @@
use std::{
collections::{HashMap, HashSet},
fmt,
rc::Rc,
sync::Arc,
};
use crate::database::AsSalsaDatabase;
/// `DebugWithDb` is a version of the traditional [`Debug`](`std::fmt::Debug`)
/// trait that gives access to the salsa database, allowing tracked
/// structs to print the values of their fields. It is typically not used
/// directly, instead you should write (e.g.) `format!("{:?}", foo.debug(db))`.
/// Implementations are automatically provided for `#[salsa::tracked]`
/// items, though you can opt-out from that if you wish to provide a manual
/// implementation.
///
/// # WARNING: Intended for debug use only!
///
/// Debug print-outs of tracked structs include the value of all their fields,
/// but the reads of those fields are ignored by salsa. This avoids creating
/// spurious dependencies from debugging code, but if you use the resulting
/// string to influence the outputs (return value, accumulators, etc) from your
/// query, salsa's dependency tracking will be undermined.
///
/// If for some reason you *want* to incorporate dependency output into
/// your query, do not use the `debug` or `into_debug` helpers and instead
/// invoke `fmt` manually.
pub trait DebugWithDb<Db: ?Sized + AsSalsaDatabase> {
/// Creates a wrapper type that implements `Debug` but which
/// uses the `DebugWithDb::fmt`.
///
/// # WARNING: Intended for debug use only!
///
/// The wrapper type Debug impl will access the value of all
/// fields but those accesses are ignored by salsa. This is only
/// suitable for debug output. See [`DebugWithDb`][] trait comment
/// for more details.
fn debug<'me, 'db>(&'me self, db: &'me Db) -> DebugWith<'me, Db>
where
Self: Sized + 'me,
{
DebugWith {
value: BoxRef::Ref(self),
db,
}
}
/// Creates a wrapper type that implements `Debug` but which
/// uses the `DebugWithDb::fmt`.
///
/// # WARNING: Intended for debug use only!
///
/// The wrapper type Debug impl will access the value of all
/// fields but those accesses are ignored by salsa. This is only
/// suitable for debug output. See [`DebugWithDb`][] trait comment
/// for more details.
fn into_debug<'me, 'db>(self, db: &'me Db) -> DebugWith<'me, Db>
where
Self: Sized + 'me,
{
DebugWith {
value: BoxRef::Box(Box::new(self)),
db,
}
}
/// Format `self` given the database `db`.
///
/// # Dependency tracking
///
/// When invoked manually, field accesses that occur
/// within this method are tracked by salsa. But when invoked
/// the [`DebugWith`][] value returned by the [`debug`](`Self::debug`)
/// and [`into_debug`][`Self::into_debug`] methods,
/// those accesses are ignored.
fn fmt(&self, f: &mut fmt::Formatter<'_>, db: &Db) -> fmt::Result;
}
/// Helper type for the [`DebugWithDb`][] trait that
/// wraps a value and implements [`std::fmt::Debug`][],
/// redirecting calls to the `fmt` method from [`DebugWithDb`][].
///
/// # WARNING: Intended for debug use only!
///
/// This type intentionally ignores salsa dependencies used
/// to generate the debug output. See the [`DebugWithDb`][] trait
/// for more notes on this.
pub struct DebugWith<'me, Db: ?Sized + AsSalsaDatabase> {
value: BoxRef<'me, dyn DebugWithDb<Db> + 'me>,
db: &'me Db,
}
enum BoxRef<'me, T: ?Sized> {
Box(Box<T>),
Ref(&'me T),
}
impl<T: ?Sized> std::ops::Deref for BoxRef<'_, T> {
type Target = T;
fn deref(&self) -> &Self::Target {
match self {
BoxRef::Box(b) => b,
BoxRef::Ref(r) => r,
}
}
}
impl<Db: ?Sized> fmt::Debug for DebugWith<'_, Db>
where
Db: AsSalsaDatabase,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let db = self.db.as_salsa_database();
db.runtime()
.debug_probe(|| DebugWithDb::fmt(&*self.value, f, self.db))
}
}
impl<Db: ?Sized, T: ?Sized> DebugWithDb<Db> for &T
where
T: DebugWithDb<Db>,
Db: AsSalsaDatabase,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>, db: &Db) -> fmt::Result {
T::fmt(self, f, db)
}
}
impl<Db: ?Sized, T: ?Sized> DebugWithDb<Db> for Box<T>
where
T: DebugWithDb<Db>,
Db: AsSalsaDatabase,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>, db: &Db) -> fmt::Result {
T::fmt(self, f, db)
}
}
impl<Db: ?Sized, T> DebugWithDb<Db> for Rc<T>
where
T: DebugWithDb<Db>,
Db: AsSalsaDatabase,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>, db: &Db) -> fmt::Result {
T::fmt(self, f, db)
}
}
impl<Db: ?Sized, T: ?Sized> DebugWithDb<Db> for Arc<T>
where
T: DebugWithDb<Db>,
Db: AsSalsaDatabase,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>, db: &Db) -> fmt::Result {
T::fmt(self, f, db)
}
}
impl<Db: ?Sized, T> DebugWithDb<Db> for Vec<T>
where
T: DebugWithDb<Db>,
Db: AsSalsaDatabase,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>, db: &Db) -> fmt::Result {
let elements = self.iter().map(|e| e.debug(db));
f.debug_list().entries(elements).finish()
}
}
impl<Db: ?Sized, T> DebugWithDb<Db> for Option<T>
where
T: DebugWithDb<Db>,
Db: AsSalsaDatabase,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>, db: &Db) -> fmt::Result {
let me = self.as_ref().map(|v| v.debug(db));
fmt::Debug::fmt(&me, f)
}
}
impl<Db: ?Sized, K, V, S> DebugWithDb<Db> for HashMap<K, V, S>
where
K: DebugWithDb<Db>,
V: DebugWithDb<Db>,
Db: AsSalsaDatabase,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>, db: &Db) -> fmt::Result {
let elements = self.iter().map(|(k, v)| (k.debug(db), v.debug(db)));
f.debug_map().entries(elements).finish()
}
}
impl<Db: ?Sized, A, B> DebugWithDb<Db> for (A, B)
where
A: DebugWithDb<Db>,
B: DebugWithDb<Db>,
Db: AsSalsaDatabase,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>, db: &Db) -> fmt::Result {
f.debug_tuple("")
.field(&self.0.debug(db))
.field(&self.1.debug(db))
.finish()
}
}
impl<Db: ?Sized, A, B, C> DebugWithDb<Db> for (A, B, C)
where
A: DebugWithDb<Db>,
B: DebugWithDb<Db>,
C: DebugWithDb<Db>,
Db: AsSalsaDatabase,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>, db: &Db) -> fmt::Result {
f.debug_tuple("")
.field(&self.0.debug(db))
.field(&self.1.debug(db))
.field(&self.2.debug(db))
.finish()
}
}
impl<Db: ?Sized, V, S> DebugWithDb<Db> for HashSet<V, S>
where
V: DebugWithDb<Db>,
Db: AsSalsaDatabase,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>, db: &Db) -> fmt::Result {
let elements = self.iter().map(|e| e.debug(db));
f.debug_list().entries(elements).finish()
}
}
/// This is used by the macro generated code.
/// If the field type implements `DebugWithDb`, uses that, otherwise, uses `Debug`.
/// That's the "has impl" trick (https://github.com/nvzqz/impls#how-it-works)
#[doc(hidden)]
pub mod helper {
use super::{AsSalsaDatabase, DebugWith, DebugWithDb};
use std::{fmt, marker::PhantomData};
pub trait Fallback<T: fmt::Debug, Db: ?Sized> {
fn salsa_debug<'a>(a: &'a T, _db: &Db) -> &'a dyn fmt::Debug {
a
}
}
impl<Everything, Db: ?Sized, T: fmt::Debug> Fallback<T, Db> for Everything {}
pub struct SalsaDebug<T, Db: ?Sized>(PhantomData<T>, PhantomData<Db>);
impl<T, Db: ?Sized> SalsaDebug<T, Db>
where
T: DebugWithDb<Db>,
Db: AsSalsaDatabase,
{
#[allow(dead_code)]
pub fn salsa_debug<'a>(a: &'a T, db: &'a Db) -> DebugWith<'a, Db> {
a.debug(db)
}
}
}

View file

@ -1,49 +0,0 @@
/// Describes how likely a value is to change -- how "durable" it is.
/// By default, inputs have `Durability::LOW` and interned values have
/// `Durability::HIGH`. But inputs can be explicitly set with other
/// durabilities.
///
/// We use durabilities to optimize the work of "revalidating" a query
/// after some input has changed. Ordinarily, in a new revision,
/// queries have to trace all their inputs back to the base inputs to
/// determine if any of those inputs have changed. But if we know that
/// the only changes were to inputs of low durability (the common
/// case), and we know that the query only used inputs of medium
/// durability or higher, then we can skip that enumeration.
///
/// Typically, one assigns low durabilites to inputs that the user is
/// frequently editing. Medium or high durabilities are used for
/// configuration, the source from library crates, or other things
/// that are unlikely to be edited.
#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
pub struct Durability(u8);
impl Durability {
/// Low durability: things that change frequently.
///
/// Example: part of the crate being edited
pub const LOW: Durability = Durability(0);
/// Medium durability: things that change sometimes, but rarely.
///
/// Example: a Cargo.toml file
pub const MEDIUM: Durability = Durability(1);
/// High durability: things that are not expected to change under
/// common usage.
///
/// Example: the standard library or something from crates.io
pub const HIGH: Durability = Durability(2);
/// The maximum possible durability; equivalent to HIGH but
/// "conceptually" distinct (i.e., if we add more durability
/// levels, this could change).
pub(crate) const MAX: Durability = Self::HIGH;
/// Number of durability levels.
pub(crate) const LEN: usize = 3;
pub(crate) fn index(self) -> usize {
self.0 as usize
}
}

View file

@ -1,12 +0,0 @@
use std::hash::{BuildHasher, Hash};
pub(crate) type FxHasher = std::hash::BuildHasherDefault<rustc_hash::FxHasher>;
pub(crate) type FxIndexSet<K> = indexmap::IndexSet<K, FxHasher>;
pub(crate) type FxIndexMap<K, V> = indexmap::IndexMap<K, V, FxHasher>;
pub(crate) type FxDashMap<K, V> = dashmap::DashMap<K, V, FxHasher>;
pub(crate) type FxLinkedHashSet<K> = hashlink::LinkedHashSet<K, FxHasher>;
pub(crate) type FxHashSet<K> = std::collections::HashSet<K, FxHasher>;
pub(crate) fn hash<T: Hash>(t: &T) -> u64 {
FxHasher::default().hash_one(t)
}

View file

@ -1,134 +0,0 @@
use std::{
fmt,
sync::atomic::{AtomicU32, Ordering},
};
use crate::{
cycle::CycleRecoveryStrategy,
id::FromId,
ingredient::{fmt_index, Ingredient, IngredientRequiresReset},
key::{DatabaseKeyIndex, DependencyIndex},
runtime::{local_state::QueryOrigin, Runtime},
IngredientIndex, Revision,
};
pub trait InputId: FromId {}
impl<T: FromId> InputId for T {}
pub struct InputIngredient<Id>
where
Id: InputId,
{
ingredient_index: IngredientIndex,
counter: AtomicU32,
debug_name: &'static str,
_phantom: std::marker::PhantomData<Id>,
}
impl<Id> InputIngredient<Id>
where
Id: InputId,
{
pub fn new(index: IngredientIndex, debug_name: &'static str) -> Self {
Self {
ingredient_index: index,
counter: Default::default(),
debug_name,
_phantom: std::marker::PhantomData,
}
}
pub fn database_key_index(&self, id: Id) -> DatabaseKeyIndex {
DatabaseKeyIndex {
ingredient_index: self.ingredient_index,
key_index: id.as_id(),
}
}
pub fn new_input(&self, _runtime: &Runtime) -> Id {
let next_id = self.counter.fetch_add(1, Ordering::Relaxed);
Id::from_id(crate::Id::from_u32(next_id))
}
pub fn new_singleton_input(&self, _runtime: &Runtime) -> Id {
// when one exists already, panic
if self.counter.load(Ordering::Relaxed) >= 1 {
panic!("singleton struct may not be duplicated");
}
// fresh new ingredient
self.counter.store(1, Ordering::Relaxed);
Id::from_id(crate::Id::from_u32(0))
}
pub fn get_singleton_input(&self, _runtime: &Runtime) -> Option<Id> {
(self.counter.load(Ordering::Relaxed) > 0).then(|| Id::from_id(crate::Id::from_u32(0)))
}
}
impl<DB: ?Sized, Id> Ingredient<DB> for InputIngredient<Id>
where
Id: InputId,
{
fn ingredient_index(&self) -> IngredientIndex {
self.ingredient_index
}
fn maybe_changed_after(&self, _db: &DB, _input: DependencyIndex, _revision: Revision) -> bool {
// Input ingredients are just a counter, they store no data, they are immortal.
// Their *fields* are stored in function ingredients elsewhere.
false
}
fn cycle_recovery_strategy(&self) -> CycleRecoveryStrategy {
CycleRecoveryStrategy::Panic
}
fn origin(&self, _key_index: crate::Id) -> Option<QueryOrigin> {
None
}
fn mark_validated_output(
&self,
_db: &DB,
executor: DatabaseKeyIndex,
output_key: Option<crate::Id>,
) {
unreachable!(
"mark_validated_output({:?}, {:?}): input cannot be the output of a tracked function",
executor, output_key
);
}
fn remove_stale_output(
&self,
_db: &DB,
executor: DatabaseKeyIndex,
stale_output_key: Option<crate::Id>,
) {
unreachable!(
"remove_stale_output({:?}, {:?}): input cannot be the output of a tracked function",
executor, stale_output_key
);
}
fn reset_for_new_revision(&mut self) {
panic!("unexpected call to `reset_for_new_revision`")
}
fn salsa_struct_deleted(&self, _db: &DB, _id: crate::Id) {
panic!(
"unexpected call: input ingredients do not register for salsa struct deletion events"
);
}
fn fmt_index(&self, index: Option<crate::Id>, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt_index(self.debug_name, index, fmt)
}
}
impl<Id> IngredientRequiresReset for InputIngredient<Id>
where
Id: InputId,
{
const RESET_ON_NEW_REVISION: bool = false;
}

View file

@ -1,304 +0,0 @@
use crossbeam::atomic::AtomicCell;
use std::fmt;
use std::hash::Hash;
use std::marker::PhantomData;
use std::ptr::NonNull;
use crate::alloc::Alloc;
use crate::durability::Durability;
use crate::id::{AsId, LookupId};
use crate::ingredient::{fmt_index, IngredientRequiresReset};
use crate::key::DependencyIndex;
use crate::runtime::local_state::QueryOrigin;
use crate::runtime::Runtime;
use crate::{DatabaseKeyIndex, Id};
use super::hash::FxDashMap;
use super::ingredient::Ingredient;
use super::routes::IngredientIndex;
use super::Revision;
pub trait Configuration: Sized {
type Data<'db>: InternedData;
type Struct<'db>: Copy;
/// Create an end-user struct from the underlying raw pointer.
///
/// This call is an "end-step" to the tracked struct lookup/creation
/// process in a given revision: it occurs only when the struct is newly
/// created or, if a struct is being reused, after we have updated its
/// fields (or confirmed it is green and no updates are required).
///
/// # Safety
///
/// Requires that `ptr` represents a "confirmed" value in this revision,
/// which means that it will remain valid and immutable for the remainder of this
/// revision, represented by the lifetime `'db`.
unsafe fn struct_from_raw<'db>(ptr: NonNull<ValueStruct<Self>>) -> Self::Struct<'db>;
/// Deref the struct to yield the underlying value struct.
/// Since we are still part of the `'db` lifetime in which the struct was created,
/// this deref is safe, and the value-struct fields are immutable and verified.
fn deref_struct(s: Self::Struct<'_>) -> &ValueStruct<Self>;
}
pub trait InternedData: Sized + Eq + Hash + Clone {}
impl<T: Eq + Hash + Clone> InternedData for T {}
/// The interned ingredient has the job of hashing values of type `Data` to produce an `Id`.
/// It used to store interned structs but also to store the id fields of a tracked struct.
/// Interned values endure until they are explicitly removed in some way.
pub struct InternedIngredient<C: Configuration> {
/// Index of this ingredient in the database (used to construct database-ids, etc).
ingredient_index: IngredientIndex,
/// Maps from data to the existing interned id for that data.
///
/// Deadlock requirement: We access `value_map` while holding lock on `key_map`, but not vice versa.
key_map: FxDashMap<C::Data<'static>, Id>,
/// Maps from an interned id to its data.
///
/// Deadlock requirement: We access `value_map` while holding lock on `key_map`, but not vice versa.
value_map: FxDashMap<Id, Alloc<ValueStruct<C>>>,
/// counter for the next id.
counter: AtomicCell<u32>,
/// Stores the revision when this interned ingredient was last cleared.
/// You can clear an interned table at any point, deleting all its entries,
/// but that will make anything dependent on those entries dirty and in need
/// of being recomputed.
reset_at: Revision,
debug_name: &'static str,
}
/// Struct storing the interned fields.
pub struct ValueStruct<C>
where
C: Configuration,
{
id: Id,
fields: C::Data<'static>,
}
impl<C> InternedIngredient<C>
where
C: Configuration,
{
pub fn new(ingredient_index: IngredientIndex, debug_name: &'static str) -> Self {
Self {
ingredient_index,
key_map: Default::default(),
value_map: Default::default(),
counter: AtomicCell::default(),
reset_at: Revision::start(),
debug_name,
}
}
unsafe fn to_internal_data<'db>(&'db self, data: C::Data<'db>) -> C::Data<'static> {
unsafe { std::mem::transmute(data) }
}
pub fn intern_id<'db>(&'db self, runtime: &'db Runtime, data: C::Data<'db>) -> crate::Id {
C::deref_struct(self.intern(runtime, data)).as_id()
}
/// Intern data to a unique reference.
pub fn intern<'db>(&'db self, runtime: &'db Runtime, data: C::Data<'db>) -> C::Struct<'db> {
runtime.report_tracked_read(
DependencyIndex::for_table(self.ingredient_index),
Durability::MAX,
self.reset_at,
);
// Optimisation to only get read lock on the map if the data has already
// been interned.
let internal_data = unsafe { self.to_internal_data(data) };
if let Some(guard) = self.key_map.get(&internal_data) {
let id = *guard;
drop(guard);
return self.interned_value(id);
}
match self.key_map.entry(internal_data.clone()) {
// Data has been interned by a racing call, use that ID instead
dashmap::mapref::entry::Entry::Occupied(entry) => {
let id = *entry.get();
drop(entry);
self.interned_value(id)
}
// We won any races so should intern the data
dashmap::mapref::entry::Entry::Vacant(entry) => {
let next_id = self.counter.fetch_add(1);
let next_id = crate::id::Id::from_u32(next_id);
let value = self
.value_map
.entry(next_id)
.or_insert(Alloc::new(ValueStruct {
id: next_id,
fields: internal_data,
}));
let value_raw = value.as_raw();
drop(value);
entry.insert(next_id);
// SAFETY: Items are only removed from the `value_map` with an `&mut self` reference.
unsafe { C::struct_from_raw(value_raw) }
}
}
}
pub fn interned_value(&self, id: Id) -> C::Struct<'_> {
let r = self.value_map.get(&id).unwrap();
// SAFETY: Items are only removed from the `value_map` with an `&mut self` reference.
unsafe { C::struct_from_raw(r.as_raw()) }
}
/// Lookup the data for an interned value based on its id.
/// Rarely used since end-users generally carry a struct with a pointer directly
/// to the interned item.
pub fn data(&self, id: Id) -> &C::Data<'_> {
C::deref_struct(self.interned_value(id)).data()
}
/// Variant of `data` that takes a (unnecessary) database argument.
/// This exists because tracked functions sometimes use true interning and sometimes use
/// [`IdentityInterner`][], which requires the database argument.
pub fn data_with_db<'db, DB: ?Sized>(&'db self, id: Id, _db: &'db DB) -> &'db C::Data<'db> {
self.data(id)
}
pub fn reset(&mut self, revision: Revision) {
assert!(revision > self.reset_at);
self.reset_at = revision;
self.key_map.clear();
self.value_map.clear();
}
}
impl<DB: ?Sized, C> Ingredient<DB> for InternedIngredient<C>
where
C: Configuration,
{
fn ingredient_index(&self) -> IngredientIndex {
self.ingredient_index
}
fn maybe_changed_after(&self, _db: &DB, _input: DependencyIndex, revision: Revision) -> bool {
revision < self.reset_at
}
fn cycle_recovery_strategy(&self) -> crate::cycle::CycleRecoveryStrategy {
crate::cycle::CycleRecoveryStrategy::Panic
}
fn origin(&self, _key_index: crate::Id) -> Option<QueryOrigin> {
None
}
fn mark_validated_output(
&self,
_db: &DB,
executor: DatabaseKeyIndex,
output_key: Option<crate::Id>,
) {
unreachable!(
"mark_validated_output({:?}, {:?}): input cannot be the output of a tracked function",
executor, output_key
);
}
fn remove_stale_output(
&self,
_db: &DB,
executor: DatabaseKeyIndex,
stale_output_key: Option<crate::Id>,
) {
unreachable!(
"remove_stale_output({:?}, {:?}): interned ids are not outputs",
executor, stale_output_key
);
}
fn reset_for_new_revision(&mut self) {
// Interned ingredients do not, normally, get deleted except when they are "reset" en masse.
// There ARE methods (e.g., `clear_deleted_entries` and `remove`) for deleting individual
// items, but those are only used for tracked struct ingredients.
panic!("unexpected call to `reset_for_new_revision`")
}
fn salsa_struct_deleted(&self, _db: &DB, _id: crate::Id) {
panic!("unexpected call: interned ingredients do not register for salsa struct deletion events");
}
fn fmt_index(&self, index: Option<crate::Id>, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt_index(self.debug_name, index, fmt)
}
}
impl<C> IngredientRequiresReset for InternedIngredient<C>
where
C: Configuration,
{
const RESET_ON_NEW_REVISION: bool = false;
}
pub struct IdentityInterner<C>
where
C: Configuration,
{
data: PhantomData<C>,
}
impl<C> IdentityInterner<C>
where
C: Configuration,
{
#[allow(clippy::new_without_default)]
pub fn new() -> Self {
IdentityInterner { data: PhantomData }
}
pub fn intern_id<'db>(&'db self, _runtime: &'db Runtime, id: C::Data<'db>) -> crate::Id
where
C::Data<'db>: AsId,
{
id.as_id()
}
pub fn data_with_db<'db, DB>(&'db self, id: crate::Id, db: &'db DB) -> C::Data<'db>
where
DB: ?Sized,
C::Data<'db>: LookupId<&'db DB>,
{
<C::Data<'db>>::lookup_id(id, db)
}
}
impl<C> ValueStruct<C>
where
C: Configuration,
{
pub fn data(&self) -> &C::Data<'_> {
// SAFETY: The lifetime of `self` is tied to the interning ingredient;
// we never remove data without an `&mut self` access to the interning ingredient.
unsafe { self.to_self_ref(&self.fields) }
}
unsafe fn to_self_ref<'db>(&'db self, fields: &'db C::Data<'static>) -> &'db C::Data<'db> {
unsafe { std::mem::transmute(fields) }
}
}
impl<C> AsId for ValueStruct<C>
where
C: Configuration,
{
fn as_id(&self) -> Id {
self.id
}
}

View file

@ -1,53 +0,0 @@
pub mod accumulator;
mod alloc;
pub mod cancelled;
pub mod cycle;
pub mod database;
pub mod debug;
pub mod durability;
pub mod event;
pub mod function;
pub mod hash;
pub mod id;
pub mod ingredient;
pub mod ingredient_list;
pub mod input;
pub mod input_field;
pub mod interned;
pub mod jar;
pub mod key;
pub mod plumbing;
pub mod revision;
pub mod routes;
pub mod runtime;
pub mod salsa_struct;
pub mod setter;
pub mod storage;
pub mod tracked_struct;
pub mod update;
pub use self::cancelled::Cancelled;
pub use self::cycle::Cycle;
pub use self::database::Database;
pub use self::database::ParallelDatabase;
pub use self::database::Snapshot;
pub use self::debug::DebugWith;
pub use self::debug::DebugWithDb;
pub use self::durability::Durability;
pub use self::event::Event;
pub use self::event::EventKind;
pub use self::id::Id;
pub use self::key::DatabaseKeyIndex;
pub use self::revision::Revision;
pub use self::routes::IngredientIndex;
pub use self::runtime::Runtime;
pub use self::storage::DbWithJar;
pub use self::storage::Storage;
pub use salsa_2022_macros::accumulator;
pub use salsa_2022_macros::db;
pub use salsa_2022_macros::input;
pub use salsa_2022_macros::interned;
pub use salsa_2022_macros::jar;
pub use salsa_2022_macros::tracked;
pub use salsa_2022_macros::DebugWithDb;
pub use salsa_2022_macros::Update;

View file

@ -1,37 +0,0 @@
use std::{alloc, ptr};
use crate::storage::HasJars;
/// Initializes the `DB`'s jars in-place
///
/// # Safety
///
/// `init` must fully initialize all of jars fields
pub unsafe fn create_jars_inplace<DB: HasJars>(init: impl FnOnce(*mut DB::Jars)) -> Box<DB::Jars> {
let layout = alloc::Layout::new::<DB::Jars>();
if layout.size() == 0 {
// SAFETY: This is the recommended way of creating a Box
// to a ZST in the std docs
unsafe { Box::from_raw(ptr::NonNull::dangling().as_ptr()) }
} else {
// SAFETY: We've checked that the size isn't 0
let place = unsafe { alloc::alloc_zeroed(layout) };
let place = place.cast::<DB::Jars>();
init(place);
// SAFETY: Caller invariant requires that `init` must've
// initialized all of the fields
unsafe { Box::from_raw(place) }
}
}
// Returns `u` but with the lifetime of `t`.
//
// Safe if you know that data at `u` will remain shared
// until the reference `t` expires.
#[allow(clippy::needless_lifetimes)]
pub(crate) unsafe fn transmute_lifetime<'t, 'u, T, U>(_t: &'t T, u: &'u U) -> &'t U {
std::mem::transmute(u)
}

View file

@ -1,63 +0,0 @@
use std::num::NonZeroUsize;
use std::sync::atomic::{AtomicUsize, Ordering};
/// Value of the initial revision, as a u64. We don't use 0
/// because we want to use a `NonZeroUsize`.
const START: usize = 1;
/// A unique identifier for the current version of the database; each
/// time an input is changed, the revision number is incremented.
/// `Revision` is used internally to track which values may need to be
/// recomputed, but is not something you should have to interact with
/// directly as a user of salsa.
#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)]
pub struct Revision {
generation: NonZeroUsize,
}
impl Revision {
pub(crate) fn start() -> Self {
Self::from(START)
}
pub(crate) fn from(g: usize) -> Self {
Self {
generation: NonZeroUsize::new(g).unwrap(),
}
}
pub(crate) fn next(self) -> Revision {
Self::from(self.generation.get() + 1)
}
fn as_usize(self) -> usize {
self.generation.get()
}
}
impl std::fmt::Debug for Revision {
fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(fmt, "R{}", self.generation)
}
}
#[derive(Debug)]
pub(crate) struct AtomicRevision {
data: AtomicUsize,
}
impl AtomicRevision {
pub(crate) fn start() -> Self {
Self {
data: AtomicUsize::new(START),
}
}
pub(crate) fn load(&self) -> Revision {
Revision::from(self.data.load(Ordering::SeqCst))
}
pub(crate) fn store(&self, r: Revision) {
self.data.store(r.as_usize(), Ordering::SeqCst);
}
}

View file

@ -1,463 +0,0 @@
use std::{
panic::panic_any,
sync::{atomic::Ordering, Arc},
};
use crate::{
cycle::CycleRecoveryStrategy,
debug::DebugWithDb,
durability::Durability,
key::{DatabaseKeyIndex, DependencyIndex},
runtime::active_query::ActiveQuery,
Cancelled, Cycle, Database, Event, EventKind, Revision,
};
use self::{
dependency_graph::DependencyGraph,
local_state::{ActiveQueryGuard, EdgeKind},
};
use super::{tracked_struct::Disambiguator, IngredientIndex};
mod active_query;
mod dependency_graph;
pub mod local_state;
mod shared_state;
pub struct Runtime {
/// Our unique runtime id.
id: RuntimeId,
/// Local state that is specific to this runtime (thread).
local_state: local_state::LocalState,
/// Shared state that is accessible via all runtimes.
shared_state: Arc<shared_state::SharedState>,
}
#[derive(Clone, Debug)]
pub(crate) enum WaitResult {
Completed,
Panicked,
Cycle(Cycle),
}
/// A unique identifier for a particular runtime. Each time you create
/// a snapshot, a fresh `RuntimeId` is generated. Once a snapshot is
/// complete, its `RuntimeId` may potentially be re-used.
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)]
pub struct RuntimeId {
counter: usize,
}
#[derive(Clone, Debug)]
pub(crate) struct StampedValue<V> {
pub(crate) value: V,
pub(crate) durability: Durability,
pub(crate) changed_at: Revision,
}
impl<V> StampedValue<V> {
// FIXME: Use or remove this.
#[allow(dead_code)]
pub(crate) fn merge_revision_info<U>(&mut self, other: &StampedValue<U>) {
self.durability = self.durability.min(other.durability);
self.changed_at = self.changed_at.max(other.changed_at);
}
}
impl Default for Runtime {
fn default() -> Self {
Runtime {
id: RuntimeId { counter: 0 },
shared_state: Default::default(),
local_state: Default::default(),
}
}
}
impl std::fmt::Debug for Runtime {
fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
fmt.debug_struct("Runtime")
.field("id", &self.id())
.field("shared_state", &self.shared_state)
.finish()
}
}
impl Runtime {
pub(crate) fn id(&self) -> RuntimeId {
self.id
}
pub(crate) fn current_revision(&self) -> Revision {
self.shared_state.revisions[0].load()
}
/// Returns the index of the active query along with its *current* durability/changed-at
/// information. As the query continues to execute, naturally, that information may change.
pub(crate) fn active_query(&self) -> Option<(DatabaseKeyIndex, StampedValue<()>)> {
self.local_state.active_query()
}
pub(crate) fn empty_dependencies(&self) -> Arc<[(EdgeKind, DependencyIndex)]> {
self.shared_state.empty_dependencies.clone()
}
/// Executes `op` but ignores its effect on
/// the query dependencies; intended for use
/// by `DebugWithDb` only.
///
/// # Danger: intended for debugging only
///
/// This operation is intended for **debugging only**.
/// Misuse will cause Salsa to give incorrect results.
/// The expectation is that the type `R` produced will be
/// logged or printed out. **The type `R` that is produced
/// should not affect the result or other outputs
/// (such as accumulators) from the current Salsa query.**
pub fn debug_probe<R>(&self, op: impl FnOnce() -> R) -> R {
self.local_state.debug_probe(op)
}
pub fn snapshot(&self) -> Self {
if self.local_state.query_in_progress() {
panic!("it is not legal to `snapshot` during a query (see salsa-rs/salsa#80)");
}
let id = RuntimeId {
counter: self.shared_state.next_id.fetch_add(1, Ordering::SeqCst),
};
Runtime {
id,
shared_state: self.shared_state.clone(),
local_state: Default::default(),
}
}
pub(crate) fn report_tracked_read(
&self,
key_index: DependencyIndex,
durability: Durability,
changed_at: Revision,
) {
self.local_state
.report_tracked_read(key_index, durability, changed_at)
}
/// Reports that the query depends on some state unknown to salsa.
///
/// Queries which report untracked reads will be re-executed in the next
/// revision.
pub fn report_untracked_read(&self) {
self.local_state
.report_untracked_read(self.current_revision());
}
/// Reports that an input with durability `durability` changed.
/// This will update the 'last changed at' values for every durability
/// less than or equal to `durability` to the current revision.
pub(crate) fn report_tracked_write(&mut self, durability: Durability) {
let new_revision = self.current_revision();
for rev in &self.shared_state.revisions[1..=durability.index()] {
rev.store(new_revision);
}
}
/// Adds `key` to the list of output created by the current query
/// (if not already present).
pub(crate) fn add_output(&self, key: DependencyIndex) {
self.local_state.add_output(key);
}
/// Check whether `entity` is contained the list of outputs written by the current query.
pub(super) fn is_output_of_active_query(&self, entity: DependencyIndex) -> bool {
self.local_state.is_output(entity)
}
/// Called when the active queries creates an index from the
/// entity table with the index `entity_index`. Has the following effects:
///
/// * Add a query read on `DatabaseKeyIndex::for_table(entity_index)`
/// * Identify a unique disambiguator for the hash within the current query,
/// adding the hash to the current query's disambiguator table.
/// * Returns a tuple of:
/// * the id of the current query
/// * the current dependencies (durability, changed_at) of current query
/// * the disambiguator index
pub(crate) fn disambiguate_entity(
&self,
entity_index: IngredientIndex,
reset_at: Revision,
data_hash: u64,
) -> (DatabaseKeyIndex, StampedValue<()>, Disambiguator) {
self.report_tracked_read(
DependencyIndex::for_table(entity_index),
Durability::MAX,
reset_at,
);
self.local_state.disambiguate(data_hash)
}
/// The revision in which values with durability `d` may have last
/// changed. For D0, this is just the current revision. But for
/// higher levels of durability, this value may lag behind the
/// current revision. If we encounter a value of durability Di,
/// then, we can check this function to get a "bound" on when the
/// value may have changed, which allows us to skip walking its
/// dependencies.
#[inline]
pub(crate) fn last_changed_revision(&self, d: Durability) -> Revision {
self.shared_state.revisions[d.index()].load()
}
/// Starts unwinding the stack if the current revision is cancelled.
///
/// This method can be called by query implementations that perform
/// potentially expensive computations, in order to speed up propagation of
/// cancellation.
///
/// Cancellation will automatically be triggered by salsa on any query
/// invocation.
///
/// This method should not be overridden by `Database` implementors. A
/// `salsa_event` is emitted when this method is called, so that should be
/// used instead.
pub(crate) fn unwind_if_revision_cancelled<DB: ?Sized + Database>(&self, db: &DB) {
db.salsa_event(Event {
runtime_id: self.id(),
kind: EventKind::WillCheckCancellation,
});
if self.shared_state.revision_canceled.load() {
db.salsa_event(Event {
runtime_id: self.id(),
kind: EventKind::WillCheckCancellation,
});
self.unwind_cancelled();
}
}
#[cold]
pub(crate) fn unwind_cancelled(&self) {
self.report_untracked_read();
Cancelled::PendingWrite.throw();
}
pub(crate) fn set_cancellation_flag(&self) {
self.shared_state.revision_canceled.store(true);
}
/// Increments the "current revision" counter and clears
/// the cancellation flag.
///
/// This should only be done by the storage when the state is "quiescent".
pub(crate) fn new_revision(&mut self) -> Revision {
let r_old = self.current_revision();
let r_new = r_old.next();
self.shared_state.revisions[0].store(r_new);
self.shared_state.revision_canceled.store(false);
r_new
}
#[inline]
pub(crate) fn push_query(&self, database_key_index: DatabaseKeyIndex) -> ActiveQueryGuard<'_> {
self.local_state.push_query(database_key_index)
}
/// Block until `other_id` completes executing `database_key`;
/// panic or unwind in the case of a cycle.
///
/// `query_mutex_guard` is the guard for the current query's state;
/// it will be dropped after we have successfully registered the
/// dependency.
///
/// # Propagating panics
///
/// If the thread `other_id` panics, then our thread is considered
/// cancelled, so this function will panic with a `Cancelled` value.
///
/// # Cycle handling
///
/// If the thread `other_id` already depends on the current thread,
/// and hence there is a cycle in the query graph, then this function
/// will unwind instead of returning normally. The method of unwinding
/// depends on the [`Self::mutual_cycle_recovery_strategy`]
/// of the cycle participants:
///
/// * [`CycleRecoveryStrategy::Panic`]: panic with the [`Cycle`] as the value.
/// * [`CycleRecoveryStrategy::Fallback`]: initiate unwinding with [`CycleParticipant::unwind`].
pub(crate) fn block_on_or_unwind<QueryMutexGuard>(
&self,
db: &dyn Database,
database_key: DatabaseKeyIndex,
other_id: RuntimeId,
query_mutex_guard: QueryMutexGuard,
) {
let mut dg = self.shared_state.dependency_graph.lock();
if dg.depends_on(other_id, self.id()) {
self.unblock_cycle_and_maybe_throw(db, &mut dg, database_key, other_id);
// If the above fn returns, then (via cycle recovery) it has unblocked the
// cycle, so we can continue.
assert!(!dg.depends_on(other_id, self.id()));
}
db.salsa_event(Event {
runtime_id: self.id(),
kind: EventKind::WillBlockOn {
other_runtime_id: other_id,
database_key,
},
});
let stack = self.local_state.take_query_stack();
let (stack, result) = DependencyGraph::block_on(
dg,
self.id(),
database_key,
other_id,
stack,
query_mutex_guard,
);
self.local_state.restore_query_stack(stack);
match result {
WaitResult::Completed => (),
// If the other thread panicked, then we consider this thread
// cancelled. The assumption is that the panic will be detected
// by the other thread and responded to appropriately.
WaitResult::Panicked => Cancelled::PropagatedPanic.throw(),
WaitResult::Cycle(c) => c.throw(),
}
}
/// Handles a cycle in the dependency graph that was detected when the
/// current thread tried to block on `database_key_index` which is being
/// executed by `to_id`. If this function returns, then `to_id` no longer
/// depends on the current thread, and so we should continue executing
/// as normal. Otherwise, the function will throw a `Cycle` which is expected
/// to be caught by some frame on our stack. This occurs either if there is
/// a frame on our stack with cycle recovery (possibly the top one!) or if there
/// is no cycle recovery at all.
fn unblock_cycle_and_maybe_throw(
&self,
db: &dyn Database,
dg: &mut DependencyGraph,
database_key_index: DatabaseKeyIndex,
to_id: RuntimeId,
) {
log::debug!(
"unblock_cycle_and_maybe_throw(database_key={:?})",
database_key_index
);
let mut from_stack = self.local_state.take_query_stack();
let from_id = self.id();
// Make a "dummy stack frame". As we iterate through the cycle, we will collect the
// inputs from each participant. Then, if we are participating in cycle recovery, we
// will propagate those results to all participants.
let mut cycle_query = ActiveQuery::new(database_key_index);
// Identify the cycle participants:
let cycle = {
let mut v = vec![];
dg.for_each_cycle_participant(
from_id,
&mut from_stack,
database_key_index,
to_id,
|aqs| {
aqs.iter_mut().for_each(|aq| {
cycle_query.add_from(aq);
v.push(aq.database_key_index);
});
},
);
// We want to give the participants in a deterministic order
// (at least for this execution, not necessarily across executions),
// no matter where it started on the stack. Find the minimum
// key and rotate it to the front.
let min = v.iter().min().unwrap();
let index = v.iter().position(|p| p == min).unwrap();
v.rotate_left(index);
// No need to store extra memory.
v.shrink_to_fit();
Cycle::new(Arc::new(v))
};
log::debug!(
"cycle {:?}, cycle_query {:#?}",
cycle.debug(db),
cycle_query,
);
// We can remove the cycle participants from the list of dependencies;
// they are a strongly connected component (SCC) and we only care about
// dependencies to things outside the SCC that control whether it will
// form again.
cycle_query.remove_cycle_participants(&cycle);
// Mark each cycle participant that has recovery set, along with
// any frames that come after them on the same thread. Those frames
// are going to be unwound so that fallback can occur.
dg.for_each_cycle_participant(from_id, &mut from_stack, database_key_index, to_id, |aqs| {
aqs.iter_mut()
.skip_while(|aq| {
match db.cycle_recovery_strategy(aq.database_key_index.ingredient_index) {
CycleRecoveryStrategy::Panic => true,
CycleRecoveryStrategy::Fallback => false,
}
})
.for_each(|aq| {
log::debug!("marking {:?} for fallback", aq.database_key_index.debug(db));
aq.take_inputs_from(&cycle_query);
assert!(aq.cycle.is_none());
aq.cycle = Some(cycle.clone());
});
});
// Unblock every thread that has cycle recovery with a `WaitResult::Cycle`.
// They will throw the cycle, which will be caught by the frame that has
// cycle recovery so that it can execute that recovery.
let (me_recovered, others_recovered) =
dg.maybe_unblock_runtimes_in_cycle(from_id, &from_stack, database_key_index, to_id);
self.local_state.restore_query_stack(from_stack);
if me_recovered {
// If the current thread has recovery, we want to throw
// so that it can begin.
cycle.throw()
} else if others_recovered {
// If other threads have recovery but we didn't: return and we will block on them.
} else {
// if nobody has recover, then we panic
panic_any(cycle);
}
}
/// Invoked when this runtime completed computing `database_key` with
/// the given result `wait_result` (`wait_result` should be `None` if
/// computing `database_key` panicked and could not complete).
/// This function unblocks any dependent queries and allows them
/// to continue executing.
pub(crate) fn unblock_queries_blocked_on(
&self,
database_key: DatabaseKeyIndex,
wait_result: WaitResult,
) {
self.shared_state
.dependency_graph
.lock()
.unblock_runtimes_blocked_on(database_key, wait_result);
}
}

View file

@ -1,277 +0,0 @@
use std::sync::Arc;
use crate::key::DatabaseKeyIndex;
use parking_lot::{Condvar, MutexGuard};
use rustc_hash::FxHashMap;
use smallvec::SmallVec;
use super::{active_query::ActiveQuery, RuntimeId, WaitResult};
type QueryStack = Vec<ActiveQuery>;
#[derive(Debug, Default)]
pub(super) struct DependencyGraph {
/// A `(K -> V)` pair in this map indicates that the the runtime
/// `K` is blocked on some query executing in the runtime `V`.
/// This encodes a graph that must be acyclic (or else deadlock
/// will result).
edges: FxHashMap<RuntimeId, Edge>,
/// Encodes the `RuntimeId` that are blocked waiting for the result
/// of a given query.
query_dependents: FxHashMap<DatabaseKeyIndex, SmallVec<[RuntimeId; 4]>>,
/// When a key K completes which had dependent queries Qs blocked on it,
/// it stores its `WaitResult` here. As they wake up, each query Q in Qs will
/// come here to fetch their results.
wait_results: FxHashMap<RuntimeId, (QueryStack, WaitResult)>,
}
#[derive(Debug)]
struct Edge {
blocked_on_id: RuntimeId,
blocked_on_key: DatabaseKeyIndex,
stack: QueryStack,
/// Signalled whenever a query with dependents completes.
/// Allows those dependents to check if they are ready to unblock.
condvar: Arc<parking_lot::Condvar>,
}
impl DependencyGraph {
/// True if `from_id` depends on `to_id`.
///
/// (i.e., there is a path from `from_id` to `to_id` in the graph.)
pub(super) fn depends_on(&mut self, from_id: RuntimeId, to_id: RuntimeId) -> bool {
let mut p = from_id;
while let Some(q) = self.edges.get(&p).map(|edge| edge.blocked_on_id) {
if q == to_id {
return true;
}
p = q;
}
p == to_id
}
/// Invokes `closure` with a `&mut ActiveQuery` for each query that participates in the cycle.
/// The cycle runs as follows:
///
/// 1. The runtime `from_id`, which has the stack `from_stack`, would like to invoke `database_key`...
/// 2. ...but `database_key` is already being executed by `to_id`...
/// 3. ...and `to_id` is transitively dependent on something which is present on `from_stack`.
pub(super) fn for_each_cycle_participant(
&mut self,
from_id: RuntimeId,
from_stack: &mut QueryStack,
database_key: DatabaseKeyIndex,
to_id: RuntimeId,
mut closure: impl FnMut(&mut [ActiveQuery]),
) {
debug_assert!(self.depends_on(to_id, from_id));
// To understand this algorithm, consider this [drawing](https://is.gd/TGLI9v):
//
// database_key = QB2
// from_id = A
// to_id = B
// from_stack = [QA1, QA2, QA3]
//
// self.edges[B] = { C, QC2, [QB1..QB3] }
// self.edges[C] = { A, QA2, [QC1..QC3] }
//
// The cyclic
// edge we have
// failed to add.
// :
// A : B C
// :
// QA1 v QB1 QC1
// ┌► QA2 ┌──► QB2 ┌─► QC2
// │ QA3 ───┘ QB3 ──┘ QC3 ───┐
// │ │
// └───────────────────────────────┘
//
// Final output: [QB2, QB3, QC2, QC3, QA2, QA3]
let mut id = to_id;
let mut key = database_key;
while id != from_id {
// Looking at the diagram above, the idea is to
// take the edge from `to_id` starting at `key`
// (inclusive) and down to the end. We can then
// load up the next thread (i.e., we start at B/QB2,
// and then load up the dependency on C/QC2).
let edge = self.edges.get_mut(&id).unwrap();
let prefix = edge
.stack
.iter_mut()
.take_while(|p| p.database_key_index != key)
.count();
closure(&mut edge.stack[prefix..]);
id = edge.blocked_on_id;
key = edge.blocked_on_key;
}
// Finally, we copy in the results from `from_stack`.
let prefix = from_stack
.iter_mut()
.take_while(|p| p.database_key_index != key)
.count();
closure(&mut from_stack[prefix..]);
}
/// Unblock each blocked runtime (excluding the current one) if some
/// query executing in that runtime is participating in cycle fallback.
///
/// Returns a boolean (Current, Others) where:
/// * Current is true if the current runtime has cycle participants
/// with fallback;
/// * Others is true if other runtimes were unblocked.
pub(super) fn maybe_unblock_runtimes_in_cycle(
&mut self,
from_id: RuntimeId,
from_stack: &QueryStack,
database_key: DatabaseKeyIndex,
to_id: RuntimeId,
) -> (bool, bool) {
// See diagram in `for_each_cycle_participant`.
let mut id = to_id;
let mut key = database_key;
let mut others_unblocked = false;
while id != from_id {
let edge = self.edges.get(&id).unwrap();
let prefix = edge
.stack
.iter()
.take_while(|p| p.database_key_index != key)
.count();
let next_id = edge.blocked_on_id;
let next_key = edge.blocked_on_key;
if let Some(cycle) = edge.stack[prefix..]
.iter()
.rev()
.find_map(|aq| aq.cycle.clone())
{
// Remove `id` from the list of runtimes blocked on `next_key`:
self.query_dependents
.get_mut(&next_key)
.unwrap()
.retain(|r| *r != id);
// Unblock runtime so that it can resume execution once lock is released:
self.unblock_runtime(id, WaitResult::Cycle(cycle));
others_unblocked = true;
}
id = next_id;
key = next_key;
}
let prefix = from_stack
.iter()
.take_while(|p| p.database_key_index != key)
.count();
let this_unblocked = from_stack[prefix..].iter().any(|aq| aq.cycle.is_some());
(this_unblocked, others_unblocked)
}
/// Modifies the graph so that `from_id` is blocked
/// on `database_key`, which is being computed by
/// `to_id`.
///
/// For this to be reasonable, the lock on the
/// results table for `database_key` must be held.
/// This ensures that computing `database_key` doesn't
/// complete before `block_on` executes.
///
/// Preconditions:
/// * No path from `to_id` to `from_id`
/// (i.e., `me.depends_on(to_id, from_id)` is false)
/// * `held_mutex` is a read lock (or stronger) on `database_key`
pub(super) fn block_on<QueryMutexGuard>(
mut me: MutexGuard<'_, Self>,
from_id: RuntimeId,
database_key: DatabaseKeyIndex,
to_id: RuntimeId,
from_stack: QueryStack,
query_mutex_guard: QueryMutexGuard,
) -> (QueryStack, WaitResult) {
let condvar = me.add_edge(from_id, database_key, to_id, from_stack);
// Release the mutex that prevents `database_key`
// from completing, now that the edge has been added.
drop(query_mutex_guard);
loop {
if let Some(stack_and_result) = me.wait_results.remove(&from_id) {
debug_assert!(!me.edges.contains_key(&from_id));
return stack_and_result;
}
condvar.wait(&mut me);
}
}
/// Helper for `block_on`: performs actual graph modification
/// to add a dependency edge from `from_id` to `to_id`, which is
/// computing `database_key`.
fn add_edge(
&mut self,
from_id: RuntimeId,
database_key: DatabaseKeyIndex,
to_id: RuntimeId,
from_stack: QueryStack,
) -> Arc<parking_lot::Condvar> {
assert_ne!(from_id, to_id);
debug_assert!(!self.edges.contains_key(&from_id));
debug_assert!(!self.depends_on(to_id, from_id));
let condvar = Arc::new(Condvar::new());
self.edges.insert(
from_id,
Edge {
blocked_on_id: to_id,
blocked_on_key: database_key,
stack: from_stack,
condvar: condvar.clone(),
},
);
self.query_dependents
.entry(database_key)
.or_default()
.push(from_id);
condvar
}
/// Invoked when runtime `to_id` completes executing
/// `database_key`.
pub(super) fn unblock_runtimes_blocked_on(
&mut self,
database_key: DatabaseKeyIndex,
wait_result: WaitResult,
) {
let dependents = self
.query_dependents
.remove(&database_key)
.unwrap_or_default();
for from_id in dependents {
self.unblock_runtime(from_id, wait_result.clone());
}
}
/// Unblock the runtime with the given id with the given wait-result.
/// This will cause it resume execution (though it will have to grab
/// the lock on this data structure first, to recover the wait result).
fn unblock_runtime(&mut self, id: RuntimeId, wait_result: WaitResult) {
let edge = self.edges.remove(&id).expect("not blocked");
self.wait_results.insert(id, (edge.stack, wait_result));
// Now that we have inserted the `wait_results`,
// notify the thread.
edge.condvar.notify_one();
}
}

View file

@ -1,394 +0,0 @@
use log::debug;
use crate::durability::Durability;
use crate::key::DatabaseKeyIndex;
use crate::key::DependencyIndex;
use crate::runtime::Revision;
use crate::tracked_struct::Disambiguator;
use crate::Cycle;
use crate::Runtime;
use std::cell::RefCell;
use std::sync::Arc;
use super::active_query::ActiveQuery;
use super::StampedValue;
/// State that is specific to a single execution thread.
///
/// Internally, this type uses ref-cells.
///
/// **Note also that all mutations to the database handle (and hence
/// to the local-state) must be undone during unwinding.**
pub(super) struct LocalState {
/// Vector of active queries.
///
/// This is normally `Some`, but it is set to `None`
/// while the query is blocked waiting for a result.
///
/// Unwinding note: pushes onto this vector must be popped -- even
/// during unwinding.
query_stack: RefCell<Option<Vec<ActiveQuery>>>,
}
/// Summarizes "all the inputs that a query used"
#[derive(Debug, Clone)]
pub(crate) struct QueryRevisions {
/// The most revision in which some input changed.
pub(crate) changed_at: Revision,
/// Minimum durability of the inputs to this query.
pub(crate) durability: Durability,
/// How was this query computed?
pub(crate) origin: QueryOrigin,
}
impl QueryRevisions {
pub(crate) fn stamped_value<V>(&self, value: V) -> StampedValue<V> {
StampedValue {
value,
durability: self.durability,
changed_at: self.changed_at,
}
}
}
/// Tracks the way that a memoized value for a query was created.
#[derive(Debug, Clone)]
pub enum QueryOrigin {
/// The value was assigned as the output of another query (e.g., using `specify`).
/// The `DatabaseKeyIndex` is the identity of the assigning query.
Assigned(DatabaseKeyIndex),
/// This value was set as a base input to the computation.
BaseInput,
/// The value was derived by executing a function
/// and we were able to track ALL of that function's inputs.
/// Those inputs are described in [`QueryEdges`].
Derived(QueryEdges),
/// The value was derived by executing a function
/// but that function also reported that it read untracked inputs.
/// The [`QueryEdges`] argument contains a listing of all the inputs we saw
/// (but we know there were more).
DerivedUntracked(QueryEdges),
}
impl QueryOrigin {
/// Indices for queries *written* by this query (or `vec![]` if its value was assigned).
pub(crate) fn outputs(&self) -> impl Iterator<Item = DependencyIndex> + '_ {
let opt_edges = match self {
QueryOrigin::Derived(edges) | QueryOrigin::DerivedUntracked(edges) => Some(edges),
QueryOrigin::Assigned(_) | QueryOrigin::BaseInput => None,
};
opt_edges.into_iter().flat_map(|edges| edges.outputs())
}
}
#[derive(Debug, PartialEq, Eq, Clone, Copy, Hash)]
pub enum EdgeKind {
Input,
Output,
}
/// The edges between a memoized value and other queries in the dependency graph.
/// These edges include both dependency edges
/// e.g., when creating the memoized value for Q0 executed another function Q1)
/// and output edges
/// (e.g., when Q0 specified the value for another query Q2).
#[derive(Debug, Clone)]
pub struct QueryEdges {
/// The list of outgoing edges from this node.
/// This list combines *both* inputs and outputs.
///
/// Note that we always track input dependencies even when there are untracked reads.
/// Untracked reads mean that we can't verify values, so we don't use the list of inputs for that,
/// but we still use it for finding the transitive inputs to an accumulator.
///
/// You can access the input/output list via the methods [`inputs`] and [`outputs`] respectively.
///
/// Important:
///
/// * The inputs must be in **execution order** for the red-green algorithm to work.
pub input_outputs: Arc<[(EdgeKind, DependencyIndex)]>,
}
impl QueryEdges {
/// Returns the (tracked) inputs that were executed in computing this memoized value.
///
/// These will always be in execution order.
pub(crate) fn inputs(&self) -> impl Iterator<Item = DependencyIndex> + '_ {
self.input_outputs
.iter()
.filter(|(edge_kind, _)| *edge_kind == EdgeKind::Input)
.map(|(_, dependency_index)| *dependency_index)
}
/// Returns the (tracked) outputs that were executed in computing this memoized value.
///
/// These will always be in execution order.
pub(crate) fn outputs(&self) -> impl Iterator<Item = DependencyIndex> + '_ {
self.input_outputs
.iter()
.filter(|(edge_kind, _)| *edge_kind == EdgeKind::Output)
.map(|(_, dependency_index)| *dependency_index)
}
/// Creates a new `QueryEdges`; the values given for each field must meet struct invariants.
pub(crate) fn new(input_outputs: Arc<[(EdgeKind, DependencyIndex)]>) -> Self {
Self { input_outputs }
}
}
impl Default for LocalState {
fn default() -> Self {
LocalState {
query_stack: RefCell::new(Some(Vec::new())),
}
}
}
impl LocalState {
#[inline]
pub(super) fn push_query(&self, database_key_index: DatabaseKeyIndex) -> ActiveQueryGuard<'_> {
let mut query_stack = self.query_stack.borrow_mut();
let query_stack = query_stack.as_mut().expect("local stack taken");
query_stack.push(ActiveQuery::new(database_key_index));
ActiveQueryGuard {
local_state: self,
database_key_index,
push_len: query_stack.len(),
}
}
fn with_query_stack<R>(&self, c: impl FnOnce(&mut Vec<ActiveQuery>) -> R) -> R {
c(self
.query_stack
.borrow_mut()
.as_mut()
.expect("query stack taken"))
}
pub(super) fn query_in_progress(&self) -> bool {
self.with_query_stack(|stack| !stack.is_empty())
}
/// Dangerous operation: executes `op` but ignores its effect on
/// the query dependencies. Useful for debugging statements, but
/// otherwise not to be toyed with!
pub(super) fn debug_probe<R>(&self, op: impl FnOnce() -> R) -> R {
let saved_state: Option<_> =
self.with_query_stack(|stack| Some(stack.last()?.save_query_state()));
let result = op();
if let Some(saved_state) = saved_state {
self.with_query_stack(|stack| {
let active_query = stack.last_mut().expect("query stack not empty");
active_query.restore_query_state(saved_state);
});
}
result
}
/// Returns the index of the active query along with its *current* durability/changed-at
/// information. As the query continues to execute, naturally, that information may change.
pub(super) fn active_query(&self) -> Option<(DatabaseKeyIndex, StampedValue<()>)> {
self.with_query_stack(|stack| {
stack.last().map(|active_query| {
(
active_query.database_key_index,
StampedValue {
value: (),
durability: active_query.durability,
changed_at: active_query.changed_at,
},
)
})
})
}
pub(super) fn add_output(&self, entity: DependencyIndex) {
self.with_query_stack(|stack| {
if let Some(top_query) = stack.last_mut() {
top_query.add_output(entity)
}
})
}
pub(super) fn is_output(&self, entity: DependencyIndex) -> bool {
self.with_query_stack(|stack| {
if let Some(top_query) = stack.last_mut() {
top_query.is_output(entity)
} else {
false
}
})
}
pub(super) fn report_tracked_read(
&self,
input: DependencyIndex,
durability: Durability,
changed_at: Revision,
) {
debug!(
"report_query_read_and_unwind_if_cycle_resulted(input={:?}, durability={:?}, changed_at={:?})",
input, durability, changed_at
);
self.with_query_stack(|stack| {
if let Some(top_query) = stack.last_mut() {
top_query.add_read(input, durability, changed_at);
// We are a cycle participant:
//
// C0 --> ... --> Ci --> Ci+1 -> ... -> Cn --> C0
// ^ ^
// : |
// This edge -----+ |
// |
// |
// N0
//
// In this case, the value we have just read from `Ci+1`
// is actually the cycle fallback value and not especially
// interesting. We unwind now with `CycleParticipant` to avoid
// executing the rest of our query function. This unwinding
// will be caught and our own fallback value will be used.
//
// Note that `Ci+1` may` have *other* callers who are not
// participants in the cycle (e.g., N0 in the graph above).
// They will not have the `cycle` marker set in their
// stack frames, so they will just read the fallback value
// from `Ci+1` and continue on their merry way.
if let Some(cycle) = &top_query.cycle {
cycle.clone().throw()
}
}
})
}
pub(super) fn report_untracked_read(&self, current_revision: Revision) {
self.with_query_stack(|stack| {
if let Some(top_query) = stack.last_mut() {
top_query.add_untracked_read(current_revision);
}
})
}
/// Update the top query on the stack to act as though it read a value
/// of durability `durability` which changed in `revision`.
// FIXME: Use or remove this.
#[allow(dead_code)]
pub(super) fn report_synthetic_read(&self, durability: Durability, revision: Revision) {
self.with_query_stack(|stack| {
if let Some(top_query) = stack.last_mut() {
top_query.add_synthetic_read(durability, revision);
}
})
}
/// Takes the query stack and returns it. This is used when
/// the current thread is blocking. The stack must be restored
/// with [`Self::restore_query_stack`] when the thread unblocks.
pub(super) fn take_query_stack(&self) -> Vec<ActiveQuery> {
assert!(
self.query_stack.borrow().is_some(),
"query stack already taken"
);
self.query_stack.take().unwrap()
}
/// Restores a query stack taken with [`Self::take_query_stack`] once
/// the thread unblocks.
pub(super) fn restore_query_stack(&self, stack: Vec<ActiveQuery>) {
assert!(self.query_stack.borrow().is_none(), "query stack not taken");
self.query_stack.replace(Some(stack));
}
#[track_caller]
pub(crate) fn disambiguate(
&self,
data_hash: u64,
) -> (DatabaseKeyIndex, StampedValue<()>, Disambiguator) {
assert!(
self.query_in_progress(),
"cannot create a tracked struct disambiguator outside of a tracked function"
);
self.with_query_stack(|stack| {
let top_query = stack.last_mut().unwrap();
let disambiguator = top_query.disambiguate(data_hash);
(
top_query.database_key_index,
StampedValue {
value: (),
durability: top_query.durability,
changed_at: top_query.changed_at,
},
disambiguator,
)
})
}
}
impl std::panic::RefUnwindSafe for LocalState {}
/// When a query is pushed onto the `active_query` stack, this guard
/// is returned to represent its slot. The guard can be used to pop
/// the query from the stack -- in the case of unwinding, the guard's
/// destructor will also remove the query.
pub(crate) struct ActiveQueryGuard<'me> {
local_state: &'me LocalState,
push_len: usize,
pub(crate) database_key_index: DatabaseKeyIndex,
}
impl ActiveQueryGuard<'_> {
fn pop_helper(&self) -> ActiveQuery {
self.local_state.with_query_stack(|stack| {
// Sanity check: pushes and pops should be balanced.
assert_eq!(stack.len(), self.push_len);
debug_assert_eq!(
stack.last().unwrap().database_key_index,
self.database_key_index
);
stack.pop().unwrap()
})
}
/// Invoked when the query has successfully completed execution.
pub(super) fn complete(self) -> ActiveQuery {
let query = self.pop_helper();
std::mem::forget(self);
query
}
/// Pops an active query from the stack. Returns the [`QueryRevisions`]
/// which summarizes the other queries that were accessed during this
/// query's execution.
#[inline]
pub(crate) fn pop(self, runtime: &Runtime) -> QueryRevisions {
// Extract accumulated inputs.
let popped_query = self.complete();
// If this frame were a cycle participant, it would have unwound.
assert!(popped_query.cycle.is_none());
popped_query.revisions(runtime)
}
/// If the active query is registered as a cycle participant, remove and
/// return that cycle.
pub(crate) fn take_cycle(&self) -> Option<Cycle> {
self.local_state
.with_query_stack(|stack| stack.last_mut()?.cycle.take())
}
}
impl Drop for ActiveQueryGuard<'_> {
fn drop(&mut self) {
self.pop_helper();
}
}

View file

@ -1,274 +0,0 @@
use std::{fmt, sync::Arc};
use parking_lot::Condvar;
use crate::cycle::CycleRecoveryStrategy;
use crate::ingredient::Ingredient;
use crate::jar::Jar;
use crate::key::DependencyIndex;
use crate::runtime::local_state::QueryOrigin;
use crate::runtime::Runtime;
use crate::{Database, DatabaseKeyIndex, Id, IngredientIndex};
use super::routes::Routes;
use super::{ParallelDatabase, Revision};
/// The "storage" struct stores all the data for the jars.
/// It is shared between the main database and any active snapshots.
pub struct Storage<DB: HasJars> {
/// Data shared across all databases. This contains the ingredients needed by each jar.
/// See the ["jars and ingredients" chapter](https://salsa-rs.github.io/salsa/plumbing/jars_and_ingredients.html)
/// for more detailed description.
shared: Shared<DB>,
/// The "ingredients" structure stores the information about how to find each ingredient in the database.
/// It allows us to take the [`IngredientIndex`] assigned to a particular ingredient
/// and get back a [`dyn Ingredient`][`Ingredient`] for the struct that stores its data.
///
/// This is kept separate from `shared` so that we can clone it and retain `&`-access even when we have `&mut` access to `shared`.
routes: Arc<Routes<DB>>,
/// The runtime for this particular salsa database handle.
/// Each handle gets its own runtime, but the runtimes have shared state between them.
runtime: Runtime,
}
/// Data shared between all threads.
/// This is where the actual data for tracked functions, structs, inputs, etc lives,
/// along with some coordination variables between treads.
struct Shared<DB: HasJars> {
/// Contains the data for each jar in the database.
/// Each jar stores its own structs in there that ultimately contain ingredients
/// (types that implement the [`Ingredient`] trait, like [`crate::function::FunctionIngredient`]).
///
/// Even though these jars are stored in an `Arc`, we sometimes get mutable access to them
/// by using `Arc::get_mut`. This is only possible when all parallel snapshots have been dropped.
jars: Option<Arc<DB::Jars>>,
/// Conditional variable that is used to coordinate cancellation.
/// When the main thread writes to the database, it blocks until each of the snapshots can be cancelled.
cvar: Arc<Condvar>,
/// Mutex that is used to protect the `jars` field when waiting for snapshots to be dropped.
noti_lock: Arc<parking_lot::Mutex<()>>,
}
// ANCHOR: default
impl<DB> Default for Storage<DB>
where
DB: HasJars,
{
fn default() -> Self {
let mut routes = Routes::new();
let jars = DB::create_jars(&mut routes);
Self {
shared: Shared {
jars: Some(Arc::from(jars)),
cvar: Arc::new(Default::default()),
noti_lock: Arc::new(parking_lot::Mutex::new(())),
},
routes: Arc::new(routes),
runtime: Runtime::default(),
}
}
}
// ANCHOR_END: default
impl<DB> Storage<DB>
where
DB: HasJars,
{
pub fn snapshot(&self) -> Storage<DB>
where
DB: ParallelDatabase,
{
Self {
shared: self.shared.clone(),
routes: self.routes.clone(),
runtime: self.runtime.snapshot(),
}
}
pub fn jars(&self) -> (&DB::Jars, &Runtime) {
(self.shared.jars.as_ref().unwrap(), &self.runtime)
}
pub fn runtime(&self) -> &Runtime {
&self.runtime
}
pub fn runtime_mut(&mut self) -> &mut Runtime {
self.jars_mut().1
}
// ANCHOR: jars_mut
/// Gets mutable access to the jars. This will trigger a new revision
/// and it will also cancel any ongoing work in the current revision.
/// Any actual writes that occur to data in a jar should use
/// [`Runtime::report_tracked_write`].
pub fn jars_mut(&mut self) -> (&mut DB::Jars, &mut Runtime) {
// Wait for all snapshots to be dropped.
self.cancel_other_workers();
// Increment revision counter.
self.runtime.new_revision();
// Acquire `&mut` access to `self.shared` -- this is only possible because
// the snapshots have all been dropped, so we hold the only handle to the `Arc`.
let jars = Arc::get_mut(self.shared.jars.as_mut().unwrap()).unwrap();
// Inform other ingredients that a new revision has begun.
// This gives them a chance to free resources that were being held until the next revision.
let routes = self.routes.clone();
for route in routes.reset_routes() {
route(jars).reset_for_new_revision();
}
// Return mut ref to jars + runtime.
(jars, &mut self.runtime)
}
// ANCHOR_END: jars_mut
// ANCHOR: cancel_other_workers
/// Sets cancellation flag and blocks until all other workers with access
/// to this storage have completed.
///
/// This could deadlock if there is a single worker with two handles to the
/// same database!
fn cancel_other_workers(&mut self) {
loop {
self.runtime.set_cancellation_flag();
// Acquire lock before we check if we have unique access to the jars.
// If we do not yet have unique access, we will go to sleep and wait for
// the snapshots to be dropped, which will signal the cond var associated
// with this lock.
//
// NB: We have to acquire the lock first to ensure that we can check for
// unique access and go to sleep waiting on the condvar atomically,
// as described in PR #474.
let mut guard = self.shared.noti_lock.lock();
// If we have unique access to the jars, we are done.
if Arc::get_mut(self.shared.jars.as_mut().unwrap()).is_some() {
return;
}
// Otherwise, wait until some other storage entities have dropped.
//
// The cvar `self.shared.cvar` is notified by the `Drop` impl.
self.shared.cvar.wait(&mut guard);
}
}
// ANCHOR_END: cancel_other_workers
pub fn ingredient(&self, ingredient_index: IngredientIndex) -> &dyn Ingredient<DB> {
let route = self.routes.route(ingredient_index);
route(self.shared.jars.as_ref().unwrap())
}
}
impl<DB> Clone for Shared<DB>
where
DB: HasJars,
{
fn clone(&self) -> Self {
Self {
jars: self.jars.clone(),
cvar: self.cvar.clone(),
noti_lock: self.noti_lock.clone(),
}
}
}
impl<DB> Drop for Storage<DB>
where
DB: HasJars,
{
fn drop(&mut self) {
// Drop the Arc reference before the cvar is notified,
// since other threads are sleeping, waiting for it to reach 1.
let _guard = self.shared.noti_lock.lock();
drop(self.shared.jars.take());
self.shared.cvar.notify_all();
}
}
pub trait HasJars: HasJarsDyn + Sized {
type Jars;
fn jars(&self) -> (&Self::Jars, &Runtime);
/// Gets mutable access to the jars. This will trigger a new revision
/// and it will also cancel any ongoing work in the current revision.
fn jars_mut(&mut self) -> (&mut Self::Jars, &mut Runtime);
fn create_jars(routes: &mut Routes<Self>) -> Box<Self::Jars>;
}
pub trait DbWithJar<J>: HasJar<J> + Database {
fn as_jar_db<'db>(&'db self) -> &<J as Jar<'db>>::DynDb
where
J: Jar<'db>;
}
pub trait JarFromJars<J>: HasJars {
fn jar_from_jars(jars: &Self::Jars) -> &J;
fn jar_from_jars_mut(jars: &mut Self::Jars) -> &mut J;
}
pub trait HasJar<J> {
fn jar(&self) -> (&J, &Runtime);
fn jar_mut(&mut self) -> (&mut J, &mut Runtime);
}
// ANCHOR: HasJarsDyn
/// Dyn friendly subset of HasJars
pub trait HasJarsDyn {
fn runtime(&self) -> &Runtime;
fn runtime_mut(&mut self) -> &mut Runtime;
fn maybe_changed_after(&self, input: DependencyIndex, revision: Revision) -> bool;
fn cycle_recovery_strategy(&self, input: IngredientIndex) -> CycleRecoveryStrategy;
fn origin(&self, input: DatabaseKeyIndex) -> Option<QueryOrigin>;
fn mark_validated_output(&self, executor: DatabaseKeyIndex, output: DependencyIndex);
/// Invoked when `executor` used to output `stale_output` but no longer does.
/// This method routes that into a call to the [`remove_stale_output`](`crate::ingredient::Ingredient::remove_stale_output`)
/// method on the ingredient for `stale_output`.
fn remove_stale_output(&self, executor: DatabaseKeyIndex, stale_output: DependencyIndex);
/// Informs `ingredient` that the salsa struct with id `id` has been deleted.
/// This means that `id` will not be used in this revision and hence
/// any memoized values keyed by that struct can be discarded.
///
/// In order to receive this callback, `ingredient` must have registered itself
/// as a dependent function using
/// [`SalsaStructInDb::register_dependent_fn`](`crate::salsa_struct::SalsaStructInDb::register_dependent_fn`).
fn salsa_struct_deleted(&self, ingredient: IngredientIndex, id: Id);
fn fmt_index(&self, index: DependencyIndex, fmt: &mut fmt::Formatter<'_>) -> fmt::Result;
}
// ANCHOR_END: HasJarsDyn
pub trait HasIngredientsFor<I>
where
I: IngredientsFor,
{
fn ingredient(&self) -> &I::Ingredients;
fn ingredient_mut(&mut self) -> &mut I::Ingredients;
}
pub trait IngredientsFor {
type Jar;
type Ingredients;
fn create_ingredients<DB>(routes: &mut Routes<DB>) -> Self::Ingredients
where
DB: DbWithJar<Self::Jar> + JarFromJars<Self::Jar>;
}

View file

@ -1,8 +1,8 @@
[package]
name = "salsa-macros"
version = "0.17.0-pre.2"
version = "0.18.0"
authors = ["Salsa developers"]
edition = "2018"
edition = "2021"
license = "Apache-2.0 OR MIT"
repository = "https://github.com/salsa-rs/salsa"
description = "Procedural macros for the salsa crate"
@ -11,7 +11,9 @@ description = "Procedural macros for the salsa crate"
proc-macro = true
[dependencies]
eyre = "0.6.5"
heck = "0.4"
proc-macro2 = "1.0"
quote = "1.0"
syn = { version = "1.0", features = ["full", "extra-traits"] }
syn = { version = "2.0.64", features = ["full", "visit-mut"] }
synstructure = "0.13.1"

View file

@ -1 +0,0 @@
../../LICENSE-APACHE

View file

@ -1 +0,0 @@
../../LICENSE-MIT

View file

@ -1 +0,0 @@
../../README.md

View file

@ -1,256 +0,0 @@
use heck::ToSnakeCase;
use proc_macro::TokenStream;
use syn::parse::{Parse, ParseStream};
use syn::punctuated::Punctuated;
use syn::{Ident, ItemStruct, Path, Token};
type PunctuatedQueryGroups = Punctuated<QueryGroup, Token![,]>;
pub(crate) fn database(args: TokenStream, input: TokenStream) -> TokenStream {
let args = syn::parse_macro_input!(args as QueryGroupList);
let input = syn::parse_macro_input!(input as ItemStruct);
let query_groups = &args.query_groups;
let database_name = &input.ident;
let visibility = &input.vis;
let db_storage_field = quote! { storage };
let mut output = proc_macro2::TokenStream::new();
output.extend(quote! { #input });
let query_group_names_snake: Vec<_> = query_groups
.iter()
.map(|query_group| {
let group_name = query_group.name();
Ident::new(&group_name.to_string().to_snake_case(), group_name.span())
})
.collect();
let query_group_storage_names: Vec<_> = query_groups
.iter()
.map(|QueryGroup { group_path }| {
quote! {
<#group_path as salsa::plumbing::QueryGroup>::GroupStorage
}
})
.collect();
// For each query group `foo::MyGroup` create a link to its
// `foo::MyGroupGroupStorage`
let mut storage_fields = proc_macro2::TokenStream::new();
let mut storage_initializers = proc_macro2::TokenStream::new();
let mut has_group_impls = proc_macro2::TokenStream::new();
for (((query_group, group_name_snake), group_storage), group_index) in query_groups
.iter()
.zip(&query_group_names_snake)
.zip(&query_group_storage_names)
.zip(0_u16..)
{
let group_path = &query_group.group_path;
// rewrite the last identifier (`MyGroup`, above) to
// (e.g.) `MyGroupGroupStorage`.
storage_fields.extend(quote! {
#group_name_snake: #group_storage,
});
// rewrite the last identifier (`MyGroup`, above) to
// (e.g.) `MyGroupGroupStorage`.
storage_initializers.extend(quote! {
#group_name_snake: #group_storage::new(#group_index),
});
// ANCHOR:HasQueryGroup
has_group_impls.extend(quote! {
impl salsa::plumbing::HasQueryGroup<#group_path> for #database_name {
fn group_storage(&self) -> &#group_storage {
&self.#db_storage_field.query_store().#group_name_snake
}
fn group_storage_mut(&mut self) -> (&#group_storage, &mut salsa::Runtime) {
let (query_store_mut, runtime) = self.#db_storage_field.query_store_mut();
(&query_store_mut.#group_name_snake, runtime)
}
}
});
// ANCHOR_END:HasQueryGroup
}
// create group storage wrapper struct
output.extend(quote! {
#[doc(hidden)]
#visibility struct __SalsaDatabaseStorage {
#storage_fields
}
impl Default for __SalsaDatabaseStorage {
fn default() -> Self {
Self {
#storage_initializers
}
}
}
});
// Create a tuple (D1, D2, ...) where Di is the data for a given query group.
let mut database_data = vec![];
for QueryGroup { group_path } in query_groups {
database_data.push(quote! {
<#group_path as salsa::plumbing::QueryGroup>::GroupData
});
}
// ANCHOR:DatabaseStorageTypes
output.extend(quote! {
impl salsa::plumbing::DatabaseStorageTypes for #database_name {
type DatabaseStorage = __SalsaDatabaseStorage;
}
});
// ANCHOR_END:DatabaseStorageTypes
// ANCHOR:DatabaseOps
let mut fmt_ops = proc_macro2::TokenStream::new();
let mut maybe_changed_ops = proc_macro2::TokenStream::new();
let mut cycle_recovery_strategy_ops = proc_macro2::TokenStream::new();
let mut for_each_ops = proc_macro2::TokenStream::new();
for ((QueryGroup { group_path }, group_storage), group_index) in query_groups
.iter()
.zip(&query_group_storage_names)
.zip(0_u16..)
{
fmt_ops.extend(quote! {
#group_index => {
let storage: &#group_storage =
<Self as salsa::plumbing::HasQueryGroup<#group_path>>::group_storage(self);
storage.fmt_index(self, input, fmt)
}
});
maybe_changed_ops.extend(quote! {
#group_index => {
let storage: &#group_storage =
<Self as salsa::plumbing::HasQueryGroup<#group_path>>::group_storage(self);
storage.maybe_changed_after(self, input, revision)
}
});
cycle_recovery_strategy_ops.extend(quote! {
#group_index => {
let storage: &#group_storage =
<Self as salsa::plumbing::HasQueryGroup<#group_path>>::group_storage(self);
storage.cycle_recovery_strategy(self, input)
}
});
for_each_ops.extend(quote! {
let storage: &#group_storage =
<Self as salsa::plumbing::HasQueryGroup<#group_path>>::group_storage(self);
storage.for_each_query(runtime, &mut op);
});
}
output.extend(quote! {
impl salsa::plumbing::DatabaseOps for #database_name {
fn ops_database(&self) -> &dyn salsa::Database {
self
}
fn ops_salsa_runtime(&self) -> &salsa::Runtime {
self.#db_storage_field.salsa_runtime()
}
fn ops_salsa_runtime_mut(&mut self) -> &mut salsa::Runtime {
self.#db_storage_field.salsa_runtime_mut()
}
fn fmt_index(
&self,
input: salsa::DatabaseKeyIndex,
fmt: &mut std::fmt::Formatter<'_>,
) -> std::fmt::Result {
match input.group_index() {
#fmt_ops
i => panic!("salsa: invalid group index {}", i)
}
}
fn maybe_changed_after(
&self,
input: salsa::DatabaseKeyIndex,
revision: salsa::Revision
) -> bool {
match input.group_index() {
#maybe_changed_ops
i => panic!("salsa: invalid group index {}", i)
}
}
fn cycle_recovery_strategy(
&self,
input: salsa::DatabaseKeyIndex,
) -> salsa::plumbing::CycleRecoveryStrategy {
match input.group_index() {
#cycle_recovery_strategy_ops
i => panic!("salsa: invalid group index {}", i)
}
}
fn for_each_query(
&self,
mut op: &mut dyn FnMut(&dyn salsa::plumbing::QueryStorageMassOps),
) {
let runtime = salsa::Database::salsa_runtime(self);
#for_each_ops
}
}
});
// ANCHOR_END:DatabaseOps
output.extend(has_group_impls);
if std::env::var("SALSA_DUMP").is_ok() {
println!("~~~ database_storage");
println!("{}", output);
println!("~~~ database_storage");
}
output.into()
}
#[derive(Clone, Debug)]
struct QueryGroupList {
query_groups: PunctuatedQueryGroups,
}
impl Parse for QueryGroupList {
fn parse(input: ParseStream) -> syn::Result<Self> {
let query_groups: PunctuatedQueryGroups = input.parse_terminated(QueryGroup::parse)?;
Ok(QueryGroupList { query_groups })
}
}
#[derive(Clone, Debug)]
struct QueryGroup {
group_path: Path,
}
impl QueryGroup {
/// The name of the query group trait.
fn name(&self) -> Ident {
self.group_path.segments.last().unwrap().ident.clone()
}
}
impl Parse for QueryGroup {
/// ```ignore
/// impl HelloWorldDatabase;
/// ```
fn parse(input: ParseStream) -> syn::Result<Self> {
let group_path: Path = input.parse()?;
Ok(QueryGroup { group_path })
}
}
struct Nothing;
impl Parse for Nothing {
fn parse(_input: ParseStream) -> syn::Result<Self> {
Ok(Nothing)
}
}

View file

@ -9,140 +9,94 @@ extern crate quote;
use proc_macro::TokenStream;
mod database_storage;
mod parenthesized;
mod query_group;
/// The decorator that defines a salsa "query group" trait. This is a
/// trait that defines everything that a block of queries need to
/// execute, as well as defining the queries themselves that are
/// exported for others to use.
///
/// This macro declares the "prototype" for a group of queries. It will
/// expand into a trait and a set of structs, one per query.
///
/// For each query, you give the name of the accessor method to invoke
/// the query (e.g., `my_query`, below), as well as its parameter
/// types and the output type. You also give the name for a query type
/// (e.g., `MyQuery`, below) that represents the query, and optionally
/// other details, such as its storage.
///
/// # Examples
///
/// The simplest example is something like this:
///
/// ```ignore
/// #[salsa::query_group]
/// trait TypeckDatabase {
/// #[salsa::input] // see below for other legal attributes
/// fn my_query(&self, input: u32) -> u64;
///
/// /// Queries can have any number of inputs (including zero); if there
/// /// is not exactly one input, then the key type will be
/// /// a tuple of the input types, so in this case `(u32, f32)`.
/// fn other_query(&self, input1: u32, input2: f32) -> u64;
/// }
/// ```
///
/// Here is a list of legal `salsa::XXX` attributes:
///
/// - Storage attributes: control how the query data is stored and set. These
/// are described in detail in the section below.
/// - `#[salsa::input]`
/// - `#[salsa::memoized]`
/// - `#[salsa::dependencies]`
/// - Query execution:
/// - `#[salsa::invoke(path::to::my_fn)]` -- for a non-input, this
/// indicates the function to call when a query must be
/// recomputed. The default is to call a function in the same
/// module with the same name as the query.
/// - `#[query_type(MyQueryTypeName)]` specifies the name of the
/// dummy struct created for the query. Default is the name of the
/// query, in camel case, plus the word "Query" (e.g.,
/// `MyQueryQuery` and `OtherQueryQuery` in the examples above).
///
/// # Storage attributes
///
/// Here are the possible storage values for each query. The default
/// is `storage memoized`.
///
/// ## Input queries
///
/// Specifying `storage input` will give you an **input
/// query**. Unlike derived queries, whose value is given by a
/// function, input queries are explicitly set by doing
/// `db.query(QueryType).set(key, value)` (where `QueryType` is the
/// `type` specified for the query). Accessing a value that has not
/// yet been set will panic. Each time you invoke `set`, we assume the
/// value has changed, and so we will potentially re-execute derived
/// queries that read (transitively) from this input.
///
/// ## Derived queries
///
/// Derived queries are specified by a function.
///
/// - `#[salsa::memoized]` (the default) -- The result is memoized
/// between calls. If the inputs have changed, we will recompute
/// the value, but then compare against the old memoized value,
/// which can significantly reduce the amount of recomputation
/// required in new revisions. This does require that the value
/// implements `Eq`.
/// - `#[salsa::dependencies]` -- does not cache the value, so it will
/// be recomputed every time it is needed. We do track the inputs, however,
/// so if they have not changed, then things that rely on this query
/// may be known not to have changed.
///
/// ## Attribute combinations
///
/// Some attributes are mutually exclusive. For example, it is an error to add
/// multiple storage specifiers:
///
/// ```compile_fail
/// # use salsa_macros as salsa;
/// #[salsa::query_group]
/// trait CodegenDatabase {
/// #[salsa::input]
/// #[salsa::memoized]
/// fn my_query(&self, input: u32) -> u64;
/// }
/// ```
///
/// It is also an error to annotate a function to `invoke` on an `input` query:
///
/// ```compile_fail
/// # use salsa_macros as salsa;
/// #[salsa::query_group]
/// trait CodegenDatabase {
/// #[salsa::input]
/// #[salsa::invoke(typeck::my_query)]
/// fn my_query(&self, input: u32) -> u64;
/// }
/// ```
#[proc_macro_attribute]
pub fn query_group(args: TokenStream, input: TokenStream) -> TokenStream {
query_group::query_group(args, input)
macro_rules! parse_quote {
($($inp:tt)*) => {
{
let tt = quote!{$($inp)*};
syn::parse2(tt.clone()).unwrap_or_else(|err| {
panic!("failed to parse `{}` at {}:{}:{}: {}", tt, file!(), line!(), column!(), err)
})
}
}
}
/// This attribute is placed on your database struct. It takes a list of the
/// query groups that your database supports. The format looks like so:
///
/// ```rust,ignore
/// #[salsa::database(MyQueryGroup1, MyQueryGroup2)]
/// struct MyDatabase {
/// runtime: salsa::Runtime<MyDatabase>, // <-- your database will need this field, too
/// }
/// ```
///
/// Here, the struct `MyDatabase` would support the two query groups
/// `MyQueryGroup1` and `MyQueryGroup2`. In addition to the `database`
/// attribute, the struct needs to have a `runtime` field (of type
/// [`salsa::Runtime`]) and to implement the `salsa::Database` trait.
///
/// See [the `hello_world` example][hw] for more details.
///
/// [`salsa::Runtime`]: struct.Runtime.html
/// [hw]: https://github.com/salsa-rs/salsa/tree/master/examples/hello_world
#[proc_macro_attribute]
pub fn database(args: TokenStream, input: TokenStream) -> TokenStream {
database_storage::database(args, input)
macro_rules! parse_quote_spanned {
($($inp:tt)*) => {
{
let tt = quote_spanned!{$($inp)*};
syn::parse2(tt.clone()).unwrap_or_else(|err| {
panic!("failed to parse `{}` at {}:{}:{}: {}", tt, file!(), line!(), column!(), err)
})
}
}
}
/// Convert a single Ident to Literal: useful when &'static str is needed.
pub(crate) fn literal(ident: &proc_macro2::Ident) -> proc_macro2::Literal {
proc_macro2::Literal::string(&ident.to_string())
}
mod accumulator;
mod configuration;
mod db;
mod db_lifetime;
mod debug;
mod debug_with_db;
mod input;
mod interned;
mod jar;
mod options;
mod salsa_struct;
mod tracked;
mod tracked_fn;
mod tracked_struct;
mod update;
mod xform;
#[proc_macro_attribute]
pub fn accumulator(args: TokenStream, input: TokenStream) -> TokenStream {
accumulator::accumulator(args, input)
}
#[proc_macro_attribute]
pub fn jar(args: TokenStream, input: TokenStream) -> TokenStream {
jar::jar(args, input)
}
#[proc_macro_attribute]
pub fn db(args: TokenStream, input: TokenStream) -> TokenStream {
db::db(args, input)
}
#[proc_macro_attribute]
pub fn interned(args: TokenStream, input: TokenStream) -> TokenStream {
interned::interned(args, input)
}
#[proc_macro_attribute]
pub fn input(args: TokenStream, input: TokenStream) -> TokenStream {
input::input(args, input)
}
#[proc_macro_attribute]
pub fn tracked(args: TokenStream, input: TokenStream) -> TokenStream {
tracked::tracked(args, input)
}
#[proc_macro_derive(Update)]
pub fn update(input: TokenStream) -> TokenStream {
let item = syn::parse_macro_input!(input as syn::DeriveInput);
match update::update_derive(item) {
Ok(tokens) => tokens.into(),
Err(err) => err.to_compile_error().into(),
}
}
#[proc_macro_derive(DebugWithDb)]
pub fn debug(input: TokenStream) -> TokenStream {
let item = syn::parse_macro_input!(input as syn::DeriveInput);
match debug_with_db::debug_with_db(item) {
Ok(tokens) => tokens.into(),
Err(err) => err.to_compile_error().into(),
}
}

View file

@ -1,12 +0,0 @@
pub(crate) struct Parenthesized<T>(pub T);
impl<T> syn::parse::Parse for Parenthesized<T>
where
T: syn::parse::Parse,
{
fn parse(input: syn::parse::ParseStream) -> syn::Result<Self> {
let content;
syn::parenthesized!(content in input);
content.parse::<T>().map(Parenthesized)
}
}

View file

@ -1,767 +0,0 @@
use std::convert::TryFrom;
use crate::parenthesized::Parenthesized;
use heck::ToUpperCamelCase;
use proc_macro::TokenStream;
use proc_macro2::Span;
use quote::ToTokens;
use syn::{
parse_macro_input, parse_quote, spanned::Spanned, Attribute, Error, FnArg, Ident, ItemTrait,
ReturnType, TraitItem, Type,
};
/// Implementation for `[salsa::query_group]` decorator.
pub(crate) fn query_group(args: TokenStream, input: TokenStream) -> TokenStream {
let group_struct = parse_macro_input!(args as Ident);
let input: ItemTrait = parse_macro_input!(input as ItemTrait);
// println!("args: {:#?}", args);
// println!("input: {:#?}", input);
let input_span = input.span();
let (trait_attrs, salsa_attrs) = filter_attrs(input.attrs);
if !salsa_attrs.is_empty() {
return Error::new(
input_span,
format!("unsupported attributes: {:?}", salsa_attrs),
)
.to_compile_error()
.into();
}
let trait_vis = input.vis;
let trait_name = input.ident;
let _generics = input.generics.clone();
let dyn_db = quote! { dyn #trait_name };
// Decompose the trait into the corresponding queries.
let mut queries = vec![];
for item in input.items {
if let TraitItem::Method(method) = item {
let query_name = method.sig.ident.to_string();
let mut storage = QueryStorage::Memoized;
let mut cycle = None;
let mut invoke = None;
let mut query_type =
format_ident!("{}Query", query_name.to_string().to_upper_camel_case());
let mut num_storages = 0;
// Extract attributes.
let (attrs, salsa_attrs) = filter_attrs(method.attrs);
for SalsaAttr { name, tts, span } in salsa_attrs {
match name.as_str() {
"memoized" => {
storage = QueryStorage::Memoized;
num_storages += 1;
}
"dependencies" => {
storage = QueryStorage::Dependencies;
num_storages += 1;
}
"input" => {
storage = QueryStorage::Input;
num_storages += 1;
}
"interned" => {
storage = QueryStorage::Interned;
num_storages += 1;
}
"cycle" => {
cycle = Some(parse_macro_input!(tts as Parenthesized<syn::Path>).0);
}
"invoke" => {
invoke = Some(parse_macro_input!(tts as Parenthesized<syn::Path>).0);
}
"query_type" => {
query_type = parse_macro_input!(tts as Parenthesized<Ident>).0;
}
"transparent" => {
storage = QueryStorage::Transparent;
num_storages += 1;
}
_ => {
return Error::new(span, format!("unknown salsa attribute `{}`", name))
.to_compile_error()
.into();
}
}
}
let sig_span = method.sig.span();
// Check attribute combinations.
if num_storages > 1 {
return Error::new(sig_span, "multiple storage attributes specified")
.to_compile_error()
.into();
}
match &invoke {
Some(invoke) if storage == QueryStorage::Input => {
return Error::new(
invoke.span(),
"#[salsa::invoke] cannot be set on #[salsa::input] queries",
)
.to_compile_error()
.into();
}
_ => {}
}
// Extract keys.
let mut iter = method.sig.inputs.iter();
let self_receiver = match iter.next() {
Some(FnArg::Receiver(sr)) if sr.mutability.is_none() => sr,
_ => {
return Error::new(
sig_span,
format!("first argument of query `{}` must be `&self`", query_name),
)
.to_compile_error()
.into();
}
};
let mut keys: Vec<(Ident, Type)> = vec![];
for (idx, arg) in iter.enumerate() {
match arg {
FnArg::Typed(syn::PatType { pat, ty, .. }) => keys.push((
match pat.as_ref() {
syn::Pat::Ident(ident_pat) => ident_pat.ident.clone(),
_ => format_ident!("key{}", idx),
},
Type::clone(ty),
)),
arg => {
return Error::new(
arg.span(),
format!("unsupported argument `{:?}` of `{}`", arg, query_name,),
)
.to_compile_error()
.into();
}
}
}
// Extract value.
let value = match method.sig.output {
ReturnType::Type(_, ref ty) => ty.as_ref().clone(),
ref ret => {
return Error::new(
ret.span(),
format!("unsupported return type `{:?}` of `{}`", ret, query_name),
)
.to_compile_error()
.into();
}
};
// For `#[salsa::interned]` keys, we create a "lookup key" automatically.
//
// For a query like:
//
// fn foo(&self, x: Key1, y: Key2) -> u32
//
// we would create
//
// fn lookup_foo(&self, x: u32) -> (Key1, Key2)
let lookup_query = if let QueryStorage::Interned = storage {
let lookup_query_type = format_ident!(
"{}LookupQuery",
query_name.to_string().to_upper_camel_case()
);
let lookup_fn_name = format_ident!("lookup_{}", query_name);
let keys = keys.iter().map(|(_, ty)| ty);
let lookup_value: Type = parse_quote!((#(#keys),*));
let lookup_keys = vec![(parse_quote! { key }, value.clone())];
Some(Query {
query_type: lookup_query_type,
query_name: format!("{}", lookup_fn_name),
fn_name: lookup_fn_name,
receiver: self_receiver.clone(),
attrs: vec![], // FIXME -- some automatically generated docs on this method?
storage: QueryStorage::InternedLookup {
intern_query_type: query_type.clone(),
},
keys: lookup_keys,
value: lookup_value,
invoke: None,
cycle: cycle.clone(),
})
} else {
None
};
queries.push(Query {
query_type,
query_name,
fn_name: method.sig.ident,
receiver: self_receiver.clone(),
attrs,
storage,
keys,
value,
invoke,
cycle,
});
queries.extend(lookup_query);
}
}
let group_storage = format_ident!("{}GroupStorage__", trait_name, span = Span::call_site());
let mut query_fn_declarations = proc_macro2::TokenStream::new();
let mut query_fn_definitions = proc_macro2::TokenStream::new();
let mut storage_fields = proc_macro2::TokenStream::new();
let mut queries_with_storage = vec![];
for query in &queries {
let (key_names, keys): (Vec<_>, Vec<_>) =
query.keys.iter().map(|(pat, ty)| (pat, ty)).unzip();
let value = &query.value;
let fn_name = &query.fn_name;
let qt = &query.query_type;
let attrs = &query.attrs;
let self_receiver = &query.receiver;
query_fn_declarations.extend(quote! {
#(#attrs)*
fn #fn_name(#self_receiver, #(#key_names: #keys),*) -> #value;
});
// Special case: transparent queries don't create actual storage,
// just inline the definition
if let QueryStorage::Transparent = query.storage {
let invoke = query.invoke_tt();
query_fn_definitions.extend(quote! {
fn #fn_name(&self, #(#key_names: #keys),*) -> #value {
#invoke(self, #(#key_names),*)
}
});
continue;
}
queries_with_storage.push(fn_name);
query_fn_definitions.extend(quote! {
fn #fn_name(&self, #(#key_names: #keys),*) -> #value {
// Create a shim to force the code to be monomorphized in the
// query crate. Our experiments revealed that this makes a big
// difference in total compilation time in rust-analyzer, though
// it's not totally obvious why that should be.
fn __shim(db: &(dyn #trait_name + '_), #(#key_names: #keys),*) -> #value {
salsa::plumbing::get_query_table::<#qt>(db).get((#(#key_names),*))
}
__shim(self, #(#key_names),*)
}
});
// For input queries, we need `set_foo` etc
if let QueryStorage::Input = query.storage {
let set_fn_name = format_ident!("set_{}", fn_name);
let set_with_durability_fn_name = format_ident!("set_{}_with_durability", fn_name);
let remove_fn_name = format_ident!("remove_{}", fn_name);
let set_fn_docs = format!(
"
Set the value of the `{fn_name}` input.
See `{fn_name}` for details.
*Note:* Setting values will trigger cancellation
of any ongoing queries; this method blocks until
those queries have been cancelled.
",
fn_name = fn_name
);
let set_constant_fn_docs = format!(
"
Set the value of the `{fn_name}` input with a
specific durability instead of the default of
`Durability::LOW`. You can use `Durability::MAX`
to promise that its value will never change again.
See `{fn_name}` for details.
*Note:* Setting values will trigger cancellation
of any ongoing queries; this method blocks until
those queries have been cancelled.
",
fn_name = fn_name
);
let remove_fn_docs = format!(
"
Remove the value from the `{fn_name}` input.
See `{fn_name}` for details. Panics if a value has
not previously been set using `set_{fn_name}` or
`set_{fn_name}_with_durability`.
*Note:* Setting values will trigger cancellation
of any ongoing queries; this method blocks until
those queries have been cancelled.
",
fn_name = fn_name
);
query_fn_declarations.extend(quote! {
# [doc = #set_fn_docs]
fn #set_fn_name(&mut self, #(#key_names: #keys,)* value__: #value);
# [doc = #set_constant_fn_docs]
fn #set_with_durability_fn_name(&mut self, #(#key_names: #keys,)* value__: #value, durability__: salsa::Durability);
# [doc = #remove_fn_docs]
fn #remove_fn_name(&mut self, #(#key_names: #keys,)*) -> #value;
});
query_fn_definitions.extend(quote! {
fn #set_fn_name(&mut self, #(#key_names: #keys,)* value__: #value) {
fn __shim(db: &mut dyn #trait_name, #(#key_names: #keys,)* value__: #value) {
salsa::plumbing::get_query_table_mut::<#qt>(db).set((#(#key_names),*), value__)
}
__shim(self, #(#key_names,)* value__)
}
fn #set_with_durability_fn_name(&mut self, #(#key_names: #keys,)* value__: #value, durability__: salsa::Durability) {
fn __shim(db: &mut dyn #trait_name, #(#key_names: #keys,)* value__: #value, durability__: salsa::Durability) {
salsa::plumbing::get_query_table_mut::<#qt>(db).set_with_durability((#(#key_names),*), value__, durability__)
}
__shim(self, #(#key_names,)* value__ ,durability__)
}
fn #remove_fn_name(&mut self, #(#key_names: #keys,)*) -> #value {
fn __shim(db: &mut dyn #trait_name, #(#key_names: #keys,)*) -> #value {
salsa::plumbing::get_query_table_mut::<#qt>(db).remove((#(#key_names),*))
}
__shim(self, #(#key_names,)*)
}
});
}
// A field for the storage struct
storage_fields.extend(quote! {
#fn_name: std::sync::Arc<<#qt as salsa::Query>::Storage>,
});
}
// Emit the trait itself.
let mut output = {
let bounds = &input.supertraits;
quote! {
#(#trait_attrs)*
#trait_vis trait #trait_name :
salsa::Database +
salsa::plumbing::HasQueryGroup<#group_struct> +
#bounds
{
#query_fn_declarations
}
}
};
// Emit the query group struct and impl of `QueryGroup`.
output.extend(quote! {
/// Representative struct for the query group.
#trait_vis struct #group_struct { }
impl salsa::plumbing::QueryGroup for #group_struct
{
type DynDb = #dyn_db;
type GroupStorage = #group_storage;
}
});
// Emit an impl of the trait
output.extend({
let bounds = input.supertraits;
quote! {
impl<DB> #trait_name for DB
where
DB: #bounds,
DB: salsa::Database,
DB: salsa::plumbing::HasQueryGroup<#group_struct>,
{
#query_fn_definitions
}
}
});
let non_transparent_queries = || {
queries
.iter()
.filter(|q| !matches!(q.storage, QueryStorage::Transparent))
};
// Emit the query types.
for (query, query_index) in non_transparent_queries().zip(0_u16..) {
let fn_name = &query.fn_name;
let qt = &query.query_type;
let storage = match &query.storage {
QueryStorage::Memoized => quote!(salsa::plumbing::MemoizedStorage<Self>),
QueryStorage::Dependencies => {
quote!(salsa::plumbing::DependencyStorage<Self>)
}
QueryStorage::Input => quote!(salsa::plumbing::InputStorage<Self>),
QueryStorage::Interned => quote!(salsa::plumbing::InternedStorage<Self>),
QueryStorage::InternedLookup { intern_query_type } => {
quote!(salsa::plumbing::LookupInternedStorage<Self, #intern_query_type>)
}
QueryStorage::Transparent => panic!("should have been filtered"),
};
let keys = query.keys.iter().map(|(_, ty)| ty);
let value = &query.value;
let query_name = &query.query_name;
// Emit the query struct and implement the Query trait on it.
output.extend(quote! {
#[derive(Default, Debug)]
#trait_vis struct #qt;
});
output.extend(quote! {
impl #qt {
/// Get access to extra methods pertaining to this query.
/// You can also use it to invoke this query.
#trait_vis fn in_db(self, db: &#dyn_db) -> salsa::QueryTable<'_, Self>
{
salsa::plumbing::get_query_table::<#qt>(db)
}
}
});
output.extend(quote! {
impl #qt {
/// Like `in_db`, but gives access to methods for setting the
/// value of an input. Not applicable to derived queries.
///
/// # Threads, cancellation, and blocking
///
/// Mutating the value of a query cannot be done while there are
/// still other queries executing. If you are using your database
/// within a single thread, this is not a problem: you only have
/// `&self` access to the database, but this method requires `&mut
/// self`.
///
/// However, if you have used `snapshot` to create other threads,
/// then attempts to `set` will **block the current thread** until
/// those snapshots are dropped (usually when those threads
/// complete). This also implies that if you create a snapshot but
/// do not send it to another thread, then invoking `set` will
/// deadlock.
///
/// Before blocking, the thread that is attempting to `set` will
/// also set a cancellation flag. This will cause any query
/// invocations in other threads to unwind with a `Cancelled`
/// sentinel value and eventually let the `set` succeed once all
/// threads have unwound past the salsa invocation.
///
/// If your query implementations are performing expensive
/// operations without invoking another query, you can also use
/// the `Runtime::unwind_if_cancelled` method to check for an
/// ongoing cancellation and bring those operations to a close,
/// thus allowing the `set` to succeed. Otherwise, long-running
/// computations may lead to "starvation", meaning that the
/// thread attempting to `set` has to wait a long, long time. =)
#trait_vis fn in_db_mut(self, db: &mut #dyn_db) -> salsa::QueryTableMut<'_, Self>
{
salsa::plumbing::get_query_table_mut::<#qt>(db)
}
}
impl<'d> salsa::QueryDb<'d> for #qt
{
type DynDb = #dyn_db + 'd;
type Group = #group_struct;
type GroupStorage = #group_storage;
}
// ANCHOR:Query_impl
impl salsa::Query for #qt
{
type Key = (#(#keys),*);
type Value = #value;
type Storage = #storage;
const QUERY_INDEX: u16 = #query_index;
const QUERY_NAME: &'static str = #query_name;
fn query_storage<'a>(
group_storage: &'a <Self as salsa::QueryDb<'_>>::GroupStorage,
) -> &'a std::sync::Arc<Self::Storage> {
&group_storage.#fn_name
}
fn query_storage_mut<'a>(
group_storage: &'a <Self as salsa::QueryDb<'_>>::GroupStorage,
) -> &'a std::sync::Arc<Self::Storage> {
&group_storage.#fn_name
}
}
// ANCHOR_END:Query_impl
});
// Implement the QueryFunction trait for queries which need it.
if query.storage.needs_query_function() {
let span = query.fn_name.span();
let key_names: Vec<_> = query.keys.iter().map(|(pat, _)| pat).collect();
let key_pattern = if query.keys.len() == 1 {
quote! { #(#key_names),* }
} else {
quote! { (#(#key_names),*) }
};
let invoke = query.invoke_tt();
let recover = if let Some(cycle_recovery_fn) = &query.cycle {
quote! {
const CYCLE_STRATEGY: salsa::plumbing::CycleRecoveryStrategy =
salsa::plumbing::CycleRecoveryStrategy::Fallback;
fn cycle_fallback(db: &<Self as salsa::QueryDb<'_>>::DynDb, cycle: &salsa::Cycle, #key_pattern: &<Self as salsa::Query>::Key)
-> <Self as salsa::Query>::Value {
#cycle_recovery_fn(
db,
cycle,
#(#key_names),*
)
}
}
} else {
quote! {
const CYCLE_STRATEGY: salsa::plumbing::CycleRecoveryStrategy =
salsa::plumbing::CycleRecoveryStrategy::Panic;
}
};
output.extend(quote_spanned! {span=>
// ANCHOR:QueryFunction_impl
impl salsa::plumbing::QueryFunction for #qt
{
fn execute(db: &<Self as salsa::QueryDb<'_>>::DynDb, #key_pattern: <Self as salsa::Query>::Key)
-> <Self as salsa::Query>::Value {
#invoke(db, #(#key_names),*)
}
#recover
}
// ANCHOR_END:QueryFunction_impl
});
}
}
let mut fmt_ops = proc_macro2::TokenStream::new();
for (Query { fn_name, .. }, query_index) in non_transparent_queries().zip(0_u16..) {
fmt_ops.extend(quote! {
#query_index => {
salsa::plumbing::QueryStorageOps::fmt_index(
&*self.#fn_name, db, input, fmt,
)
}
});
}
let mut maybe_changed_ops = proc_macro2::TokenStream::new();
for (Query { fn_name, .. }, query_index) in non_transparent_queries().zip(0_u16..) {
maybe_changed_ops.extend(quote! {
#query_index => {
salsa::plumbing::QueryStorageOps::maybe_changed_after(
&*self.#fn_name, db, input, revision
)
}
});
}
let mut cycle_recovery_strategy_ops = proc_macro2::TokenStream::new();
for (Query { fn_name, .. }, query_index) in non_transparent_queries().zip(0_u16..) {
cycle_recovery_strategy_ops.extend(quote! {
#query_index => {
salsa::plumbing::QueryStorageOps::cycle_recovery_strategy(
&*self.#fn_name
)
}
});
}
let mut for_each_ops = proc_macro2::TokenStream::new();
for Query { fn_name, .. } in non_transparent_queries() {
for_each_ops.extend(quote! {
op(&*self.#fn_name);
});
}
// Emit query group storage struct
output.extend(quote! {
#trait_vis struct #group_storage {
#storage_fields
}
// ANCHOR:group_storage_new
impl #group_storage {
#trait_vis fn new(group_index: u16) -> Self {
#group_storage {
#(
#queries_with_storage:
std::sync::Arc::new(salsa::plumbing::QueryStorageOps::new(group_index)),
)*
}
}
}
// ANCHOR_END:group_storage_new
// ANCHOR:group_storage_methods
impl #group_storage {
#trait_vis fn fmt_index(
&self,
db: &(#dyn_db + '_),
input: salsa::DatabaseKeyIndex,
fmt: &mut std::fmt::Formatter<'_>,
) -> std::fmt::Result {
match input.query_index() {
#fmt_ops
i => panic!("salsa: impossible query index {}", i),
}
}
#trait_vis fn maybe_changed_after(
&self,
db: &(#dyn_db + '_),
input: salsa::DatabaseKeyIndex,
revision: salsa::Revision,
) -> bool {
match input.query_index() {
#maybe_changed_ops
i => panic!("salsa: impossible query index {}", i),
}
}
#trait_vis fn cycle_recovery_strategy(
&self,
db: &(#dyn_db + '_),
input: salsa::DatabaseKeyIndex,
) -> salsa::plumbing::CycleRecoveryStrategy {
match input.query_index() {
#cycle_recovery_strategy_ops
i => panic!("salsa: impossible query index {}", i),
}
}
#trait_vis fn for_each_query(
&self,
_runtime: &salsa::Runtime,
mut op: &mut dyn FnMut(&dyn salsa::plumbing::QueryStorageMassOps),
) {
#for_each_ops
}
}
// ANCHOR_END:group_storage_methods
});
if std::env::var("SALSA_DUMP").is_ok() {
println!("~~~ query_group");
println!("{}", output);
println!("~~~ query_group");
}
output.into()
}
struct SalsaAttr {
name: String,
tts: TokenStream,
span: Span,
}
impl std::fmt::Debug for SalsaAttr {
fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(fmt, "{:?}", self.name)
}
}
impl TryFrom<syn::Attribute> for SalsaAttr {
type Error = syn::Attribute;
fn try_from(attr: syn::Attribute) -> Result<SalsaAttr, syn::Attribute> {
if is_not_salsa_attr_path(&attr.path) {
return Err(attr);
}
let span = attr.span();
let name = attr.path.segments[1].ident.to_string();
let tts = attr.tokens.into();
Ok(SalsaAttr { name, tts, span })
}
}
fn is_not_salsa_attr_path(path: &syn::Path) -> bool {
path.segments
.first()
.map(|s| s.ident != "salsa")
.unwrap_or(true)
|| path.segments.len() != 2
}
fn filter_attrs(attrs: Vec<Attribute>) -> (Vec<Attribute>, Vec<SalsaAttr>) {
let mut other = vec![];
let mut salsa = vec![];
// Leave non-salsa attributes untouched. These are
// attributes that don't start with `salsa::` or don't have
// exactly two segments in their path.
// Keep the salsa attributes around.
for attr in attrs {
match SalsaAttr::try_from(attr) {
Ok(it) => salsa.push(it),
Err(it) => other.push(it),
}
}
(other, salsa)
}
#[derive(Debug)]
struct Query {
fn_name: Ident,
receiver: syn::Receiver,
query_name: String,
attrs: Vec<syn::Attribute>,
query_type: Ident,
storage: QueryStorage,
keys: Vec<(Ident, syn::Type)>,
value: syn::Type,
invoke: Option<syn::Path>,
cycle: Option<syn::Path>,
}
impl Query {
fn invoke_tt(&self) -> proc_macro2::TokenStream {
match &self.invoke {
Some(i) => i.into_token_stream(),
None => self.fn_name.clone().into_token_stream(),
}
}
}
#[derive(Debug, Clone, PartialEq, Eq)]
enum QueryStorage {
Memoized,
Dependencies,
Input,
Interned,
InternedLookup { intern_query_type: Ident },
Transparent,
}
impl QueryStorage {
/// Do we need a `QueryFunction` impl for this type of query?
fn needs_query_function(&self) -> bool {
match self {
QueryStorage::Input
| QueryStorage::Interned
| QueryStorage::InternedLookup { .. }
| QueryStorage::Transparent => false,
QueryStorage::Memoized | QueryStorage::Dependencies => true,
}
}
}

View file

@ -1,15 +0,0 @@
[package]
name = "calc"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
derive-new = "0.5.9"
salsa = { path = "../../components/salsa-2022", package = "salsa-2022" }
ordered-float = "3.0"
test-log = { version = "0.2.15", features = ["trace"] }
[dev-dependencies]
expect-test = "1.4.0"

View file

@ -1,14 +0,0 @@
[package]
name = "lazy-input"
version = "0.1.0"
edition = "2021"
[dependencies]
crossbeam-channel = "0.5.6"
dashmap = "5.4.0"
eyre = "0.6.8"
notify-debouncer-mini = "0.2.1"
salsa = { path = "../../components/salsa-2022", package = "salsa-2022" }
[dev-dependencies]
expect-test = "1.4.0"

View file

@ -1,72 +0,0 @@
use std::sync::Arc;
use crate::{interner::Interner, values::*};
#[salsa::query_group(CompilerDatabase)]
pub trait Compiler: Interner {
#[salsa::input]
fn input_string(&self) -> Arc<String>;
/// Get the fields.
fn fields(&self, class: Class) -> Arc<Vec<Field>>;
/// Get the list of all classes
fn all_classes(&self) -> Arc<Vec<Class>>;
/// Get the list of all fields
fn all_fields(&self) -> Arc<Vec<Field>>;
}
/// This function parses a dummy language with the following structure:
///
/// Classes are defined one per line, consisting of a comma-separated list of fields.
///
/// Example:
///
/// ```
/// lorem,ipsum
/// dolor,sit,amet,
/// consectetur,adipiscing,elit
/// ```
fn all_classes(db: &dyn Compiler) -> Arc<Vec<Class>> {
let string = db.input_string();
let rows = string.split('\n');
let classes: Vec<_> = rows
.filter(|string| !string.is_empty())
.map(|string| {
let fields = string
.trim()
.split(',')
.filter(|string| !string.is_empty())
.map(|name_str| {
let name = name_str.to_owned();
let field_data = FieldData { name };
db.intern_field(field_data)
})
.collect();
let class_data = ClassData { fields };
db.intern_class(class_data)
})
.collect();
Arc::new(classes)
}
fn fields(db: &dyn Compiler, class: Class) -> Arc<Vec<Field>> {
let class = db.lookup_intern_class(class);
Arc::new(class.fields)
}
fn all_fields(db: &dyn Compiler) -> Arc<Vec<Field>> {
Arc::new(
db.all_classes()
.iter()
.cloned()
.flat_map(|class| {
let fields = db.fields(class);
(0..fields.len()).map(move |i| fields[i])
})
.collect(),
)
}

View file

@ -1,23 +0,0 @@
use crate::compiler::CompilerDatabase;
use crate::interner::InternerDatabase;
/// Our "database" will be threaded through our application (though
/// 99% of the application only interacts with it through traits and
/// never knows its real name). It contains all the values for all of
/// our memoized queries and encapsulates **all mutable state that
/// persists longer than a single query execution.**
///
/// Databases can contain whatever you want them to, but salsa
/// requires you to add a `salsa::Runtime` member. Note
/// though: you should be very careful if adding shared, mutable state
/// to your context (e.g., a shared counter or some such thing). If
/// mutations to that shared state affect the results of your queries,
/// that's going to mess up the incremental results.
#[salsa::database(InternerDatabase, CompilerDatabase)]
#[derive(Default)]
pub struct DatabaseImpl {
storage: salsa::Storage<DatabaseImpl>,
}
/// This impl tells salsa where to find the salsa runtime.
impl salsa::Database for DatabaseImpl {}

View file

@ -1,10 +0,0 @@
use crate::values::*;
#[salsa::query_group(InternerDatabase)]
pub trait Interner {
#[salsa::interned]
fn intern_field(&self, field: FieldData) -> Field;
#[salsa::interned]
fn intern_class(&self, class: ClassData) -> Class;
}

View file

@ -1,40 +0,0 @@
use std::sync::Arc;
mod compiler;
mod implementation;
mod interner;
mod values;
use self::compiler::Compiler;
use self::implementation::DatabaseImpl;
use self::interner::Interner;
static INPUT_STR: &str = r#"
lorem,ipsum
dolor,sit,amet,
consectetur,adipiscing,elit
"#;
#[test]
fn test() {
let mut db = DatabaseImpl::default();
db.set_input_string(Arc::new(INPUT_STR.to_owned()));
let all_fields = db.all_fields();
assert_eq!(
format!("{:?}", all_fields),
"[Field(0), Field(1), Field(2), Field(3), Field(4), Field(5), Field(6), Field(7)]"
);
}
fn main() {
let mut db = DatabaseImpl::default();
db.set_input_string(Arc::new(INPUT_STR.to_owned()));
for field in db.all_fields().iter() {
let field_data = db.lookup_intern_field(*field);
println!("{:?} => {:?}", field, field_data);
}
}

View file

@ -1,35 +0,0 @@
#[derive(Clone, Debug, Hash, PartialEq, Eq)]
pub struct ClassData {
pub fields: Vec<Field>,
}
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
pub struct Class(salsa::InternId);
impl salsa::InternKey for Class {
fn from_intern_id(id: salsa::InternId) -> Self {
Self(id)
}
fn as_intern_id(&self) -> salsa::InternId {
self.0
}
}
#[derive(Clone, Debug, Hash, PartialEq, Eq)]
pub struct FieldData {
pub name: String,
}
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
pub struct Field(salsa::InternId);
impl salsa::InternKey for Field {
fn from_intern_id(id: salsa::InternId) -> Self {
Self(id)
}
fn as_intern_id(&self) -> salsa::InternId {
self.0
}
}

View file

@ -1,104 +0,0 @@
use std::sync::Arc;
///////////////////////////////////////////////////////////////////////////
// Step 1. Define the query group
// A **query group** is a collection of queries (both inputs and
// functions) that are defined in one particular spot. Each query
// group is defined by a trait decorated with the
// `#[salsa::query_group]` attribute. The trait defines one method per
// query, with the arguments to the method being the query **keys** and
// the return value being the query's **value**.
//
// Along with the trait, each query group has an associated
// "storage struct". The name of this struct is specified in the `query_group`
// attribute -- for a query group `Foo`, it is conventionally `FooStorage`.
//
// When we define the final database (see below), we will list out the
// storage structs for each query group that it contains. The database
// will then automatically implement the traits.
//
// Note that one query group can "include" another by listing the
// trait for that query group as a supertrait.
// ANCHOR:trait
#[salsa::query_group(HelloWorldStorage)]
trait HelloWorld {
// For each query, we give the name, some input keys (here, we
// have one key, `()`) and the output type `Arc<String>`. We can
// use attributes to give other configuration:
//
// - `salsa::input` indicates that this is an "input" to the system,
// which must be explicitly set. The `salsa::query_group` method
// will autogenerate a `set_input_string` method that can be
// used to set the input.
#[salsa::input]
fn input_string(&self, key: ()) -> Arc<String>;
// This is a *derived query*, meaning its value is specified by
// a function (see Step 2, below).
fn length(&self, key: ()) -> usize;
}
// ANCHOR_END:trait
///////////////////////////////////////////////////////////////////////////
// Step 2. Define the queries.
// Define the **function** for the `length` query. This function will
// be called whenever the query's value must be recomputed. After it
// is called once, its result is typically memoized, unless we think
// that one of the inputs may have changed. Its first argument (`db`)
// is the "database". This is always a `&dyn` version of the query group
// trait, so that you can invoke all the queries you know about.
// We never know the concrete type here, as the full database may contain
// methods from other query groups that we don't know about.
fn length(db: &dyn HelloWorld, (): ()) -> usize {
// Read the input string:
let input_string = db.input_string(());
// Return its length:
input_string.len()
}
///////////////////////////////////////////////////////////////////////////
// Step 3. Define the database struct
// Define the actual database struct. This struct needs to be annotated with
// `#[salsa::database(..)]`. The list `..` will be the paths leading to the
// storage structs for each query group that this database supports. This
// attribute macro will generate the necessary impls so that the database
// implements the corresponding traits as well (so, here, `DatabaseStruct` will
// implement the `HelloWorld` trait).
//
// The database struct must have a field `storage: salsa::Storage<Self>`, but it
// can have any number of additional fields beyond that. The
// `#[salsa::database]` macro will generate glue code that accesses this
// `storage` field (but other fields are ignored). The `Storage<Self>` type
// contains all the actual hashtables and the like used to store query results
// and dependency information.
//
// In addition to including the `storage` field, you must also implement the
// `salsa::Database` trait (as shown below). This gives you a chance to define
// the callback methods within if you want to (in this example, we don't).
// ANCHOR:database
#[salsa::database(HelloWorldStorage)]
#[derive(Default)]
struct DatabaseStruct {
storage: salsa::Storage<Self>,
}
impl salsa::Database for DatabaseStruct {}
// ANCHOR_END:database
// This shows how to use a query.
fn main() {
let mut db = DatabaseStruct::default();
// You cannot access input_string yet, because it does not have a
// value. If you do, it will panic. You could create an Option
// interface by maintaining a HashSet of inserted keys.
// println!("Initially, the length is {}.", db.length(()));
db.set_input_string((), Arc::new("Hello, world".to_string()));
println!("Now, the length is {}.", db.length(()));
}

View file

@ -1,6 +1,6 @@
use std::{path::PathBuf, sync::Mutex, time::Duration};
use crossbeam_channel::{unbounded, Sender};
use crossbeam::channel::{unbounded, Sender};
use dashmap::{mapref::entry::Entry, DashMap};
use eyre::{eyre, Context, Report, Result};
use notify_debouncer_mini::{

View file

@ -1,36 +0,0 @@
/// Sources for the [selection pattern chapter][c] of the salsa book.
///
/// [c]: https://salsa-rs.github.io/salsa/common_patterns/selection.html
// ANCHOR: request
#[derive(Clone, Debug, PartialEq, Eq)]
struct ParsedResult {
header: Vec<ParsedHeader>,
body: String,
}
#[derive(Clone, Debug, PartialEq, Eq)]
struct ParsedHeader {
key: String,
value: String,
}
#[salsa::query_group(Request)]
trait RequestParser {
/// The base text of the request.
#[salsa::input]
fn request_text(&self) -> String;
/// The parsed form of the request.
fn parse(&self) -> ParsedResult;
}
// ANCHOR_END: request
fn parse(_db: &dyn RequestParser) -> ParsedResult {
panic!()
}
mod util1;
mod util2;
fn main() {}

View file

@ -1,16 +0,0 @@
use super::*;
// ANCHOR: util1
#[salsa::query_group(Request)]
trait RequestUtil: RequestParser {
fn content_type(&self) -> Option<String>;
}
fn content_type(db: &dyn RequestUtil) -> Option<String> {
db.parse()
.header
.iter()
.find(|header| header.key == "content-type")
.map(|header| header.value.clone())
}
// ANCHOR_END: util1

View file

@ -1,20 +0,0 @@
use super::*;
// ANCHOR: util2
#[salsa::query_group(Request)]
trait RequestUtil: RequestParser {
fn header(&self) -> Vec<ParsedHeader>;
fn content_type(&self) -> Option<String>;
}
fn header(db: &dyn RequestUtil) -> Vec<ParsedHeader> {
db.parse().header
}
fn content_type(db: &dyn RequestUtil) -> Option<String> {
db.header()
.iter()
.find(|header| header.key == "content-type")
.map(|header| header.value.clone())
}
// ANCHOR_END: util2

View file

@ -1,15 +0,0 @@
[package]
name = "salsa-2022-tests"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
salsa = { path = "../components/salsa-2022", package = "salsa-2022" }
expect-test = "1.4.0"
parking_lot = "0.12.1"
test-log = "0.2.11"
env_logger = "*"
trybuild = "1.0"
rustversion = "1.0"

View file

@ -1,43 +0,0 @@
error: `singleton` option not allowed here
--> tests/compile-fail/singleton_only_for_input.rs:20:18
|
20 | #[salsa::tracked(singleton)]
| ^^^^^^^^^
error: `singleton` option not allowed here
--> tests/compile-fail/singleton_only_for_input.rs:26:18
|
26 | #[salsa::tracked(singleton)]
| ^^^^^^^^^
error: `singleton` option not allowed here
--> tests/compile-fail/singleton_only_for_input.rs:33:22
|
33 | #[salsa::accumulator(singleton)]
| ^^^^^^^^^
error[E0412]: cannot find type `MyTracked` in this scope
--> tests/compile-fail/singleton_only_for_input.rs:10:21
|
10 | struct Jar(MyInput, MyTracked, Integers, create_tracked_structs );
| ^^^^^^^^^ not found in this scope
error[E0412]: cannot find type `Integers` in this scope
--> tests/compile-fail/singleton_only_for_input.rs:10:32
|
10 | struct Jar(MyInput, MyTracked, Integers, create_tracked_structs );
| ^^^^^^^^ not found in this scope
error[E0412]: cannot find type `create_tracked_structs` in this scope
--> tests/compile-fail/singleton_only_for_input.rs:10:42
|
10 | struct Jar(MyInput, MyTracked, Integers, create_tracked_structs );
| ^^^^^^^^^^^^^^^^^^^^^^ not found in this scope
warning: unused import: `test_log::test`
--> tests/compile-fail/singleton_only_for_input.rs:7:5
|
7 | use test_log::test;
| ^^^^^^^^^^^^^^
|
= note: `#[warn(unused_imports)]` on by default

View file

@ -1,459 +0,0 @@
#![allow(warnings)]
use std::panic::{RefUnwindSafe, UnwindSafe};
use expect_test::expect;
use salsa::Durability;
// Axes:
//
// Threading
// * Intra-thread
// * Cross-thread -- part of cycle is on one thread, part on another
//
// Recovery strategies:
// * Panic
// * Fallback
// * Mixed -- multiple strategies within cycle participants
//
// Across revisions:
// * N/A -- only one revision
// * Present in new revision, not old
// * Present in old revision, not new
// * Present in both revisions
//
// Dependencies
// * Tracked
// * Untracked -- cycle participant(s) contain untracked reads
//
// Layers
// * Direct -- cycle participant is directly invoked from test
// * Indirect -- invoked a query that invokes the cycle
//
//
// | Thread | Recovery | Old, New | Dep style | Layers | Test Name |
// | ------ | -------- | -------- | --------- | ------ | --------- |
// | Intra | Panic | N/A | Tracked | direct | cycle_memoized |
// | Intra | Panic | N/A | Untracked | direct | cycle_volatile |
// | Intra | Fallback | N/A | Tracked | direct | cycle_cycle |
// | Intra | Fallback | N/A | Tracked | indirect | inner_cycle |
// | Intra | Fallback | Both | Tracked | direct | cycle_revalidate |
// | Intra | Fallback | New | Tracked | direct | cycle_appears |
// | Intra | Fallback | Old | Tracked | direct | cycle_disappears |
// | Intra | Fallback | Old | Tracked | direct | cycle_disappears_durability |
// | Intra | Mixed | N/A | Tracked | direct | cycle_mixed_1 |
// | Intra | Mixed | N/A | Tracked | direct | cycle_mixed_2 |
// | Cross | Panic | N/A | Tracked | both | parallel/parallel_cycle_none_recover.rs |
// | Cross | Fallback | N/A | Tracked | both | parallel/parallel_cycle_one_recover.rs |
// | Cross | Fallback | N/A | Tracked | both | parallel/parallel_cycle_mid_recover.rs |
// | Cross | Fallback | N/A | Tracked | both | parallel/parallel_cycle_all_recover.rs |
// TODO: The following test is not yet ported.
// | Intra | Fallback | Old | Tracked | direct | cycle_disappears_durability |
#[derive(PartialEq, Eq, Hash, Clone, Debug)]
struct Error {
cycle: Vec<String>,
}
#[salsa::jar(db = Db)]
struct Jar(
MyInput,
memoized_a,
memoized_b,
volatile_a,
volatile_b,
ABC,
cycle_a,
cycle_b,
cycle_c,
);
trait Db: salsa::DbWithJar<Jar> {}
#[salsa::db(Jar)]
#[derive(Default)]
struct Database {
storage: salsa::Storage<Self>,
}
impl Db for Database {}
impl salsa::Database for Database {}
impl RefUnwindSafe for Database {}
#[salsa::input(jar = Jar)]
struct MyInput {}
#[salsa::tracked(jar = Jar)]
fn memoized_a(db: &dyn Db, input: MyInput) {
memoized_b(db, input)
}
#[salsa::tracked(jar = Jar)]
fn memoized_b(db: &dyn Db, input: MyInput) {
memoized_a(db, input)
}
#[salsa::tracked(jar = Jar)]
fn volatile_a(db: &dyn Db, input: MyInput) {
db.report_untracked_read();
volatile_b(db, input)
}
#[salsa::tracked(jar = Jar)]
fn volatile_b(db: &dyn Db, input: MyInput) {
db.report_untracked_read();
volatile_a(db, input)
}
/// The queries A, B, and C in `Database` can be configured
/// to invoke one another in arbitrary ways using this
/// enum.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
enum CycleQuery {
None,
A,
B,
C,
AthenC,
}
#[salsa::input(jar = Jar)]
struct ABC {
a: CycleQuery,
b: CycleQuery,
c: CycleQuery,
}
impl CycleQuery {
fn invoke(self, db: &dyn Db, abc: ABC) -> Result<(), Error> {
match self {
CycleQuery::A => cycle_a(db, abc),
CycleQuery::B => cycle_b(db, abc),
CycleQuery::C => cycle_c(db, abc),
CycleQuery::AthenC => {
let _ = cycle_a(db, abc);
cycle_c(db, abc)
}
CycleQuery::None => Ok(()),
}
}
}
#[salsa::tracked(jar = Jar, recovery_fn=recover_a)]
fn cycle_a(db: &dyn Db, abc: ABC) -> Result<(), Error> {
abc.a(db).invoke(db, abc)
}
fn recover_a(db: &dyn Db, cycle: &salsa::Cycle, abc: ABC) -> Result<(), Error> {
Err(Error {
cycle: cycle.all_participants(db),
})
}
#[salsa::tracked(jar = Jar, recovery_fn=recover_b)]
fn cycle_b(db: &dyn Db, abc: ABC) -> Result<(), Error> {
abc.b(db).invoke(db, abc)
}
fn recover_b(db: &dyn Db, cycle: &salsa::Cycle, abc: ABC) -> Result<(), Error> {
Err(Error {
cycle: cycle.all_participants(db),
})
}
#[salsa::tracked(jar = Jar)]
fn cycle_c(db: &dyn Db, abc: ABC) -> Result<(), Error> {
abc.c(db).invoke(db, abc)
}
#[track_caller]
fn extract_cycle(f: impl FnOnce() + UnwindSafe) -> salsa::Cycle {
let v = std::panic::catch_unwind(f);
if let Err(d) = &v {
if let Some(cycle) = d.downcast_ref::<salsa::Cycle>() {
return cycle.clone();
}
}
panic!("unexpected value: {:?}", v)
}
#[test]
fn cycle_memoized() {
let mut db = Database::default();
let input = MyInput::new(&db);
let cycle = extract_cycle(|| memoized_a(&db, input));
let expected = expect![[r#"
[
"memoized_a(0)",
"memoized_b(0)",
]
"#]];
expected.assert_debug_eq(&cycle.all_participants(&db));
}
#[test]
fn cycle_volatile() {
let mut db = Database::default();
let input = MyInput::new(&db);
let cycle = extract_cycle(|| volatile_a(&db, input));
let expected = expect![[r#"
[
"volatile_a(0)",
"volatile_b(0)",
]
"#]];
expected.assert_debug_eq(&cycle.all_participants(&db));
}
#[test]
fn expect_cycle() {
// A --> B
// ^ |
// +-----+
let mut db = Database::default();
let abc = ABC::new(&db, CycleQuery::B, CycleQuery::A, CycleQuery::None);
assert!(cycle_a(&db, abc).is_err());
}
#[test]
fn inner_cycle() {
// A --> B <-- C
// ^ |
// +-----+
let mut db = Database::default();
let abc = ABC::new(&db, CycleQuery::B, CycleQuery::A, CycleQuery::B);
let err = cycle_c(&db, abc);
assert!(err.is_err());
let expected = expect![[r#"
[
"cycle_a(0)",
"cycle_b(0)",
]
"#]];
expected.assert_debug_eq(&err.unwrap_err().cycle);
}
#[test]
fn cycle_revalidate() {
// A --> B
// ^ |
// +-----+
let mut db = Database::default();
let abc = ABC::new(&db, CycleQuery::B, CycleQuery::A, CycleQuery::None);
assert!(cycle_a(&db, abc).is_err());
abc.set_b(&mut db).to(CycleQuery::A); // same value as default
assert!(cycle_a(&db, abc).is_err());
}
#[test]
fn cycle_recovery_unchanged_twice() {
// A --> B
// ^ |
// +-----+
let mut db = Database::default();
let abc = ABC::new(&db, CycleQuery::B, CycleQuery::A, CycleQuery::None);
assert!(cycle_a(&db, abc).is_err());
abc.set_c(&mut db).to(CycleQuery::A); // force new revision
assert!(cycle_a(&db, abc).is_err());
}
#[test]
fn cycle_appears() {
let mut db = Database::default();
// A --> B
let abc = ABC::new(&db, CycleQuery::B, CycleQuery::None, CycleQuery::None);
assert!(cycle_a(&db, abc).is_ok());
// A --> B
// ^ |
// +-----+
abc.set_b(&mut db).to(CycleQuery::A);
assert!(cycle_a(&db, abc).is_err());
}
#[test]
fn cycle_disappears() {
let mut db = Database::default();
// A --> B
// ^ |
// +-----+
let abc = ABC::new(&db, CycleQuery::B, CycleQuery::A, CycleQuery::None);
assert!(cycle_a(&db, abc).is_err());
// A --> B
abc.set_b(&mut db).to(CycleQuery::None);
assert!(cycle_a(&db, abc).is_ok());
}
/// A variant on `cycle_disappears` in which the values of
/// `a` and `b` are set with durability values.
/// If we are not careful, this could cause us to overlook
/// the fact that the cycle will no longer occur.
#[test]
fn cycle_disappears_durability() {
let mut db = Database::default();
let abc = ABC::new(
&mut db,
CycleQuery::None,
CycleQuery::None,
CycleQuery::None,
);
abc.set_a(&mut db)
.with_durability(Durability::LOW)
.to(CycleQuery::B);
abc.set_b(&mut db)
.with_durability(Durability::HIGH)
.to(CycleQuery::A);
assert!(cycle_a(&db, abc).is_err());
// At this point, `a` read `LOW` input, and `b` read `HIGH` input. However,
// because `b` participates in the same cycle as `a`, its final durability
// should be `LOW`.
//
// Check that setting a `LOW` input causes us to re-execute `b` query, and
// observe that the cycle goes away.
abc.set_a(&mut db)
.with_durability(Durability::LOW)
.to(CycleQuery::None);
assert!(cycle_b(&mut db, abc).is_ok());
}
#[test]
fn cycle_mixed_1() {
let mut db = Database::default();
// A --> B <-- C
// | ^
// +-----+
let abc = ABC::new(&db, CycleQuery::B, CycleQuery::C, CycleQuery::B);
let expected = expect![[r#"
[
"cycle_b(0)",
"cycle_c(0)",
]
"#]];
expected.assert_debug_eq(&cycle_c(&db, abc).unwrap_err().cycle);
}
#[test]
fn cycle_mixed_2() {
let mut db = Database::default();
// Configuration:
//
// A --> B --> C
// ^ |
// +-----------+
let abc = ABC::new(&db, CycleQuery::B, CycleQuery::C, CycleQuery::A);
let expected = expect![[r#"
[
"cycle_a(0)",
"cycle_b(0)",
"cycle_c(0)",
]
"#]];
expected.assert_debug_eq(&cycle_a(&db, abc).unwrap_err().cycle);
}
#[test]
fn cycle_deterministic_order() {
// No matter whether we start from A or B, we get the same set of participants:
let f = || {
let mut db = Database::default();
// A --> B
// ^ |
// +-----+
let abc = ABC::new(&db, CycleQuery::B, CycleQuery::A, CycleQuery::None);
(db, abc)
};
let (db, abc) = f();
let a = cycle_a(&db, abc);
let (db, abc) = f();
let b = cycle_b(&db, abc);
let expected = expect![[r#"
(
[
"cycle_a(0)",
"cycle_b(0)",
],
[
"cycle_a(0)",
"cycle_b(0)",
],
)
"#]];
expected.assert_debug_eq(&(a.unwrap_err().cycle, b.unwrap_err().cycle));
}
#[test]
fn cycle_multiple() {
// No matter whether we start from A or B, we get the same set of participants:
let mut db = Database::default();
// Configuration:
//
// A --> B <-- C
// ^ | ^
// +-----+ |
// | |
// +-----+
//
// Here, conceptually, B encounters a cycle with A and then
// recovers.
let abc = ABC::new(&db, CycleQuery::B, CycleQuery::AthenC, CycleQuery::A);
let c = cycle_c(&db, abc);
let b = cycle_b(&db, abc);
let a = cycle_a(&db, abc);
let expected = expect![[r#"
(
[
"cycle_a(0)",
"cycle_b(0)",
],
[
"cycle_a(0)",
"cycle_b(0)",
],
[
"cycle_a(0)",
"cycle_b(0)",
],
)
"#]];
expected.assert_debug_eq(&(
c.unwrap_err().cycle,
b.unwrap_err().cycle,
a.unwrap_err().cycle,
));
}
#[test]
fn cycle_recovery_set_but_not_participating() {
let mut db = Database::default();
// A --> C -+
// ^ |
// +--+
let abc = ABC::new(&db, CycleQuery::C, CycleQuery::None, CycleQuery::C);
// Here we expect C to panic and A not to recover:
let r = extract_cycle(|| drop(cycle_a(&db, abc)));
let expected = expect![[r#"
[
"cycle_c(0)",
]
"#]];
expected.assert_debug_eq(&r.all_participants(&db));
}

View file

@ -1,187 +0,0 @@
//! Test that a `tracked` fn with lru options
//! compiles and executes successfully.
use std::sync::{
atomic::{AtomicUsize, Ordering},
Arc,
};
use salsa::Database;
use salsa_2022_tests::{HasLogger, Logger};
use test_log::test;
#[salsa::jar(db = Db)]
struct Jar(MyInput, get_hot_potato, get_hot_potato2, get_volatile);
trait Db: salsa::DbWithJar<Jar> + HasLogger {}
#[derive(Debug, PartialEq, Eq)]
struct HotPotato(u32);
thread_local! {
static N_POTATOES: AtomicUsize = const { AtomicUsize::new(0) }
}
impl HotPotato {
fn new(id: u32) -> HotPotato {
N_POTATOES.with(|n| n.fetch_add(1, Ordering::SeqCst));
HotPotato(id)
}
}
impl Drop for HotPotato {
fn drop(&mut self) {
N_POTATOES.with(|n| n.fetch_sub(1, Ordering::SeqCst));
}
}
#[salsa::input(jar = Jar)]
struct MyInput {
field: u32,
}
#[salsa::tracked(jar = Jar, lru = 32)]
fn get_hot_potato(db: &dyn Db, input: MyInput) -> Arc<HotPotato> {
db.push_log(format!("get_hot_potato({:?})", input.field(db)));
Arc::new(HotPotato::new(input.field(db)))
}
#[salsa::tracked(jar = Jar)]
fn get_hot_potato2(db: &dyn Db, input: MyInput) -> u32 {
db.push_log(format!("get_hot_potato2({:?})", input.field(db)));
get_hot_potato(db, input).0
}
#[salsa::tracked(jar = Jar, lru = 32)]
fn get_volatile(db: &dyn Db, _input: MyInput) -> usize {
static COUNTER: AtomicUsize = AtomicUsize::new(0);
db.report_untracked_read();
COUNTER.fetch_add(1, Ordering::SeqCst)
}
#[salsa::db(Jar)]
#[derive(Default)]
struct DatabaseImpl {
storage: salsa::Storage<Self>,
logger: Logger,
}
impl salsa::Database for DatabaseImpl {}
impl Db for DatabaseImpl {}
impl HasLogger for DatabaseImpl {
fn logger(&self) -> &Logger {
&self.logger
}
}
fn load_n_potatoes() -> usize {
N_POTATOES.with(|n| n.load(Ordering::SeqCst))
}
#[test]
fn lru_works() {
let db = DatabaseImpl::default();
assert_eq!(load_n_potatoes(), 0);
for i in 0..128u32 {
let input = MyInput::new(&db, i);
let p = get_hot_potato(&db, input);
assert_eq!(p.0, i)
}
// Create a new input to change the revision, and trigger the GC
MyInput::new(&db, 0);
assert_eq!(load_n_potatoes(), 32);
}
#[test]
fn lru_doesnt_break_volatile_queries() {
let db = DatabaseImpl::default();
// Create all inputs first, so that there are no revision changes among calls to `get_volatile`
let inputs: Vec<MyInput> = (0..128usize).map(|i| MyInput::new(&db, i as u32)).collect();
// Here, we check that we execute each volatile query at most once, despite
// LRU. That does mean that we have more values in DB than the LRU capacity,
// but it's much better than inconsistent results from volatile queries!
for _ in 0..3 {
for (i, input) in inputs.iter().enumerate() {
let x = get_volatile(&db, *input);
assert_eq!(x, i);
}
}
}
#[test]
fn lru_can_be_changed_at_runtime() {
let db = DatabaseImpl::default();
assert_eq!(load_n_potatoes(), 0);
let inputs: Vec<(u32, MyInput)> = (0..128).map(|i| (i, MyInput::new(&db, i))).collect();
for &(i, input) in inputs.iter() {
let p = get_hot_potato(&db, input);
assert_eq!(p.0, i)
}
// Create a new input to change the revision, and trigger the GC
MyInput::new(&db, 0);
assert_eq!(load_n_potatoes(), 32);
get_hot_potato::set_lru_capacity(&db, 64);
assert_eq!(load_n_potatoes(), 32);
for &(i, input) in inputs.iter() {
let p = get_hot_potato(&db, input);
assert_eq!(p.0, i)
}
// Create a new input to change the revision, and trigger the GC
MyInput::new(&db, 0);
assert_eq!(load_n_potatoes(), 64);
// Special case: setting capacity to zero disables LRU
get_hot_potato::set_lru_capacity(&db, 0);
assert_eq!(load_n_potatoes(), 64);
for &(i, input) in inputs.iter() {
let p = get_hot_potato(&db, input);
assert_eq!(p.0, i)
}
// Create a new input to change the revision, and trigger the GC
MyInput::new(&db, 0);
assert_eq!(load_n_potatoes(), 128);
drop(db);
assert_eq!(load_n_potatoes(), 0);
}
#[test]
fn lru_keeps_dependency_info() {
let mut db = DatabaseImpl::default();
let capacity = 32;
// Invoke `get_hot_potato2` 33 times. This will (in turn) invoke
// `get_hot_potato`, which will trigger LRU after 32 executions.
let inputs: Vec<MyInput> = (0..(capacity + 1))
.map(|i| MyInput::new(&db, i as u32))
.collect();
for (i, input) in inputs.iter().enumerate() {
let x = get_hot_potato2(&db, *input);
assert_eq!(x as usize, i);
}
db.synthetic_write(salsa::Durability::HIGH);
// We want to test that calls to `get_hot_potato2` are still considered
// clean. Check that no new executions occur as we go here.
db.assert_logs_len((capacity + 1) * 2);
// calling `get_hot_potato2(0)` has to check that `get_hot_potato(0)` is still valid;
// even though we've evicted it (LRU), we find that it is still good
let p = get_hot_potato2(&db, *inputs.first().unwrap());
assert_eq!(p, 0);
db.assert_logs_len(0);
}

View file

@ -1,7 +0,0 @@
mod setup;
mod parallel_cycle_all_recover;
mod parallel_cycle_mid_recover;
mod parallel_cycle_none_recover;
mod parallel_cycle_one_recover;
mod signal;

View file

@ -1,112 +0,0 @@
//! Test for cycle recover spread across two threads.
//! See `../cycles.rs` for a complete listing of cycle tests,
//! both intra and cross thread.
use crate::setup::Database;
use crate::setup::Knobs;
use salsa::ParallelDatabase;
pub(crate) trait Db: salsa::DbWithJar<Jar> + Knobs {}
impl<T: salsa::DbWithJar<Jar> + Knobs> Db for T {}
#[salsa::jar(db = Db)]
pub(crate) struct Jar(MyInput, a1, a2, b1, b2);
#[salsa::input(jar = Jar)]
pub(crate) struct MyInput {
field: i32,
}
#[salsa::tracked(jar = Jar, recovery_fn=recover_a1)]
pub(crate) fn a1(db: &dyn Db, input: MyInput) -> i32 {
// Wait to create the cycle until both threads have entered
db.signal(1);
db.wait_for(2);
a2(db, input)
}
fn recover_a1(db: &dyn Db, _cycle: &salsa::Cycle, key: MyInput) -> i32 {
dbg!("recover_a1");
key.field(db) * 10 + 1
}
#[salsa::tracked(jar = Jar, recovery_fn=recover_a2)]
pub(crate) fn a2(db: &dyn Db, input: MyInput) -> i32 {
b1(db, input)
}
fn recover_a2(db: &dyn Db, _cycle: &salsa::Cycle, key: MyInput) -> i32 {
dbg!("recover_a2");
key.field(db) * 10 + 2
}
#[salsa::tracked(jar = Jar, recovery_fn=recover_b1)]
pub(crate) fn b1(db: &dyn Db, input: MyInput) -> i32 {
// Wait to create the cycle until both threads have entered
db.wait_for(1);
db.signal(2);
// Wait for thread A to block on this thread
db.wait_for(3);
b2(db, input)
}
fn recover_b1(db: &dyn Db, _cycle: &salsa::Cycle, key: MyInput) -> i32 {
dbg!("recover_b1");
key.field(db) * 20 + 1
}
#[salsa::tracked(jar = Jar, recovery_fn=recover_b2)]
pub(crate) fn b2(db: &dyn Db, input: MyInput) -> i32 {
a1(db, input)
}
fn recover_b2(db: &dyn Db, _cycle: &salsa::Cycle, key: MyInput) -> i32 {
dbg!("recover_b2");
key.field(db) * 20 + 2
}
// Recover cycle test:
//
// The pattern is as follows.
//
// Thread A Thread B
// -------- --------
// a1 b1
// | wait for stage 1 (blocks)
// signal stage 1 |
// wait for stage 2 (blocks) (unblocked)
// | signal stage 2
// (unblocked) wait for stage 3 (blocks)
// a2 |
// b1 (blocks -> stage 3) |
// | (unblocked)
// | b2
// | a1 (cycle detected, recovers)
// | b2 completes, recovers
// | b1 completes, recovers
// a2 sees cycle, recovers
// a1 completes, recovers
#[test]
fn execute() {
let db = Database::default();
db.knobs().signal_on_will_block.set(3);
let input = MyInput::new(&db, 1);
let thread_a = std::thread::spawn({
let db = db.snapshot();
move || a1(&*db, input)
});
let thread_b = std::thread::spawn({
let db = db.snapshot();
move || b1(&*db, input)
});
assert_eq!(thread_a.join().unwrap(), 11);
assert_eq!(thread_b.join().unwrap(), 21);
}

View file

@ -1,110 +0,0 @@
//! Test for cycle recover spread across two threads.
//! See `../cycles.rs` for a complete listing of cycle tests,
//! both intra and cross thread.
use crate::setup::Database;
use crate::setup::Knobs;
use salsa::ParallelDatabase;
pub(crate) trait Db: salsa::DbWithJar<Jar> + Knobs {}
impl<T: salsa::DbWithJar<Jar> + Knobs> Db for T {}
#[salsa::jar(db = Db)]
pub(crate) struct Jar(MyInput, a1, a2, b1, b2, b3);
#[salsa::input(jar = Jar)]
pub(crate) struct MyInput {
field: i32,
}
#[salsa::tracked(jar = Jar)]
pub(crate) fn a1(db: &dyn Db, input: MyInput) -> i32 {
// tell thread b we have started
db.signal(1);
// wait for thread b to block on a1
db.wait_for(2);
a2(db, input)
}
#[salsa::tracked(jar = Jar)]
pub(crate) fn a2(db: &dyn Db, input: MyInput) -> i32 {
// create the cycle
b1(db, input)
}
#[salsa::tracked(jar = Jar, recovery_fn=recover_b1)]
pub(crate) fn b1(db: &dyn Db, input: MyInput) -> i32 {
// wait for thread a to have started
db.wait_for(1);
b2(db, input)
}
fn recover_b1(db: &dyn Db, _cycle: &salsa::Cycle, key: MyInput) -> i32 {
dbg!("recover_b1");
key.field(db) * 20 + 2
}
#[salsa::tracked(jar = Jar)]
pub(crate) fn b2(db: &dyn Db, input: MyInput) -> i32 {
// will encounter a cycle but recover
b3(db, input);
b1(db, input); // hasn't recovered yet
0
}
#[salsa::tracked(jar = Jar, recovery_fn=recover_b3)]
pub(crate) fn b3(db: &dyn Db, input: MyInput) -> i32 {
// will block on thread a, signaling stage 2
a1(db, input)
}
fn recover_b3(db: &dyn Db, _cycle: &salsa::Cycle, key: MyInput) -> i32 {
dbg!("recover_b3");
key.field(db) * 200 + 2
}
// Recover cycle test:
//
// The pattern is as follows.
//
// Thread A Thread B
// -------- --------
// a1 b1
// | wait for stage 1 (blocks)
// signal stage 1 |
// wait for stage 2 (blocks) (unblocked)
// | |
// | b2
// | b3
// | a1 (blocks -> stage 2)
// (unblocked) |
// a2 (cycle detected) |
// b3 recovers
// b2 resumes
// b1 recovers
#[test]
fn execute() {
let db = Database::default();
db.knobs().signal_on_will_block.set(3);
let input = MyInput::new(&db, 1);
let thread_a = std::thread::spawn({
let db = db.snapshot();
move || a1(&*db, input)
});
let thread_b = std::thread::spawn({
let db = db.snapshot();
move || b1(&*db, input)
});
// We expect that the recovery function yields
// `1 * 20 + 2`, which is returned (and forwarded)
// to b1, and from there to a2 and a1.
assert_eq!(thread_a.join().unwrap(), 22);
assert_eq!(thread_b.join().unwrap(), 22);
}

View file

@ -1,83 +0,0 @@
//! Test a cycle where no queries recover that occurs across threads.
//! See the `../cycles.rs` for a complete listing of cycle tests,
//! both intra and cross thread.
use crate::setup::Database;
use crate::setup::Knobs;
use expect_test::expect;
use salsa::ParallelDatabase;
pub(crate) trait Db: salsa::DbWithJar<Jar> + Knobs {}
impl<T: salsa::DbWithJar<Jar> + Knobs> Db for T {}
#[salsa::jar(db = Db)]
pub(crate) struct Jar(MyInput, a, b);
#[salsa::input(jar = Jar)]
pub(crate) struct MyInput {
field: i32,
}
#[salsa::tracked(jar = Jar)]
pub(crate) fn a(db: &dyn Db, input: MyInput) -> i32 {
// Wait to create the cycle until both threads have entered
db.signal(1);
db.wait_for(2);
b(db, input)
}
#[salsa::tracked(jar = Jar)]
pub(crate) fn b(db: &dyn Db, input: MyInput) -> i32 {
// Wait to create the cycle until both threads have entered
db.wait_for(1);
db.signal(2);
// Wait for thread A to block on this thread
db.wait_for(3);
// Now try to execute A
a(db, input)
}
#[test]
fn execute() {
let db = Database::default();
db.knobs().signal_on_will_block.set(3);
let input = MyInput::new(&db, -1);
let thread_a = std::thread::spawn({
let db = db.snapshot();
move || a(&*db, input)
});
let thread_b = std::thread::spawn({
let db = db.snapshot();
move || b(&*db, input)
});
// We expect B to panic because it detects a cycle (it is the one that calls A, ultimately).
// Right now, it panics with a string.
let err_b = thread_b.join().unwrap_err();
if let Some(c) = err_b.downcast_ref::<salsa::Cycle>() {
let expected = expect![[r#"
[
"a(0)",
"b(0)",
]
"#]];
expected.assert_debug_eq(&c.all_participants(&db));
} else {
panic!("b failed in an unexpected way: {:?}", err_b);
}
// We expect A to propagate a panic, which causes us to use the sentinel
// type `Canceled`.
assert!(thread_a
.join()
.unwrap_err()
.downcast_ref::<salsa::Cancelled>()
.is_some());
}

View file

@ -1,69 +0,0 @@
use std::{cell::Cell, sync::Arc};
use crate::signal::Signal;
/// Various "knobs" and utilities used by tests to force
/// a certain behavior.
pub(crate) trait Knobs {
fn knobs(&self) -> &KnobsStruct;
fn signal(&self, stage: usize);
fn wait_for(&self, stage: usize);
}
/// Various "knobs" that can be used to customize how the queries
/// behave on one specific thread. Note that this state is
/// intentionally thread-local (apart from `signal`).
#[derive(Clone, Default)]
pub(crate) struct KnobsStruct {
/// A kind of flexible barrier used to coordinate execution across
/// threads to ensure we reach various weird states.
pub(crate) signal: Arc<Signal>,
/// When this database is about to block, send a signal.
pub(crate) signal_on_will_block: Cell<usize>,
}
#[salsa::db(
crate::parallel_cycle_one_recover::Jar,
crate::parallel_cycle_none_recover::Jar,
crate::parallel_cycle_mid_recover::Jar,
crate::parallel_cycle_all_recover::Jar
)]
#[derive(Default)]
pub(crate) struct Database {
storage: salsa::Storage<Self>,
knobs: KnobsStruct,
}
impl salsa::Database for Database {
fn salsa_event(&self, event: salsa::Event) {
if let salsa::EventKind::WillBlockOn { .. } = event.kind {
self.signal(self.knobs().signal_on_will_block.get());
}
}
}
impl salsa::ParallelDatabase for Database {
fn snapshot(&self) -> salsa::Snapshot<Self> {
salsa::Snapshot::new(Database {
storage: self.storage.snapshot(),
knobs: self.knobs.clone(),
})
}
}
impl Knobs for Database {
fn knobs(&self) -> &KnobsStruct {
&self.knobs
}
fn signal(&self, stage: usize) {
self.knobs.signal.signal(stage);
}
fn wait_for(&self, stage: usize) {
self.knobs.signal.wait_for(stage);
}
}

View file

@ -1,40 +0,0 @@
use parking_lot::{Condvar, Mutex};
#[derive(Default)]
pub(crate) struct Signal {
value: Mutex<usize>,
cond_var: Condvar,
}
impl Signal {
pub(crate) fn signal(&self, stage: usize) {
dbg!(format!("signal({})", stage));
// This check avoids acquiring the lock for things that will
// clearly be a no-op. Not *necessary* but helps to ensure we
// are more likely to encounter weird race conditions;
// otherwise calls to `sum` will tend to be unnecessarily
// synchronous.
if stage > 0 {
let mut v = self.value.lock();
if stage > *v {
*v = stage;
self.cond_var.notify_all();
}
}
}
/// Waits until the given condition is true; the fn is invoked
/// with the current stage.
pub(crate) fn wait_for(&self, stage: usize) {
dbg!(format!("wait_for({})", stage));
// As above, avoid lock if clearly a no-op.
if stage > 0 {
let mut v = self.value.lock();
while *v < stage {
self.cond_var.wait(&mut v);
}
}
}
}

View file

@ -1,66 +1,264 @@
//! Debugging APIs: these are meant for use when unit-testing or
//! debugging your application but aren't ordinarily needed.
use std::{
collections::{HashMap, HashSet},
fmt,
rc::Rc,
sync::Arc,
};
use crate::durability::Durability;
use crate::plumbing::QueryStorageOps;
use crate::Query;
use crate::QueryTable;
use std::iter::FromIterator;
use crate::database::AsSalsaDatabase;
/// Additional methods on queries that can be used to "peek into"
/// their current state. These methods are meant for debugging and
/// observing the effects of garbage collection etc.
pub trait DebugQueryTable {
/// Key of this query.
type Key;
/// Value of this query.
type Value;
/// Returns a lower bound on the durability for the given key.
/// This is typically the minimum durability of all values that
/// the query accessed, but we may return a lower durability in
/// some cases.
fn durability(&self, key: Self::Key) -> Durability;
/// Get the (current) set of the entries in the query table.
fn entries<C>(&self) -> C
/// `DebugWithDb` is a version of the traditional [`Debug`](`std::fmt::Debug`)
/// trait that gives access to the salsa database, allowing tracked
/// structs to print the values of their fields. It is typically not used
/// directly, instead you should write (e.g.) `format!("{:?}", foo.debug(db))`.
/// Implementations are automatically provided for `#[salsa::tracked]`
/// items, though you can opt-out from that if you wish to provide a manual
/// implementation.
///
/// # WARNING: Intended for debug use only!
///
/// Debug print-outs of tracked structs include the value of all their fields,
/// but the reads of those fields are ignored by salsa. This avoids creating
/// spurious dependencies from debugging code, but if you use the resulting
/// string to influence the outputs (return value, accumulators, etc) from your
/// query, salsa's dependency tracking will be undermined.
///
/// If for some reason you *want* to incorporate dependency output into
/// your query, do not use the `debug` or `into_debug` helpers and instead
/// invoke `fmt` manually.
pub trait DebugWithDb<Db: ?Sized + AsSalsaDatabase> {
/// Creates a wrapper type that implements `Debug` but which
/// uses the `DebugWithDb::fmt`.
///
/// # WARNING: Intended for debug use only!
///
/// The wrapper type Debug impl will access the value of all
/// fields but those accesses are ignored by salsa. This is only
/// suitable for debug output. See [`DebugWithDb`][] trait comment
/// for more details.
fn debug<'me, 'db>(&'me self, db: &'me Db) -> DebugWith<'me, Db>
where
C: FromIterator<TableEntry<Self::Key, Self::Value>>;
}
/// An entry from a query table, for debugging and inspecting the table state.
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)]
#[non_exhaustive]
pub struct TableEntry<K, V> {
/// key of the query
pub key: K,
/// value of the query, if it is stored
pub value: Option<V>,
}
impl<K, V> TableEntry<K, V> {
pub(crate) fn new(key: K, value: Option<V>) -> TableEntry<K, V> {
TableEntry { key, value }
}
}
impl<Q> DebugQueryTable for QueryTable<'_, Q>
where
Q: Query,
Q::Storage: QueryStorageOps<Q>,
{
type Key = Q::Key;
type Value = Q::Value;
fn durability(&self, key: Q::Key) -> Durability {
self.storage.durability(self.db, &key)
}
fn entries<C>(&self) -> C
where
C: FromIterator<TableEntry<Self::Key, Self::Value>>,
Self: Sized + 'me,
{
self.storage.entries(self.db)
DebugWith {
value: BoxRef::Ref(self),
db,
}
}
/// Creates a wrapper type that implements `Debug` but which
/// uses the `DebugWithDb::fmt`.
///
/// # WARNING: Intended for debug use only!
///
/// The wrapper type Debug impl will access the value of all
/// fields but those accesses are ignored by salsa. This is only
/// suitable for debug output. See [`DebugWithDb`][] trait comment
/// for more details.
fn into_debug<'me, 'db>(self, db: &'me Db) -> DebugWith<'me, Db>
where
Self: Sized + 'me,
{
DebugWith {
value: BoxRef::Box(Box::new(self)),
db,
}
}
/// Format `self` given the database `db`.
///
/// # Dependency tracking
///
/// When invoked manually, field accesses that occur
/// within this method are tracked by salsa. But when invoked
/// the [`DebugWith`][] value returned by the [`debug`](`Self::debug`)
/// and [`into_debug`][`Self::into_debug`] methods,
/// those accesses are ignored.
fn fmt(&self, f: &mut fmt::Formatter<'_>, db: &Db) -> fmt::Result;
}
/// Helper type for the [`DebugWithDb`][] trait that
/// wraps a value and implements [`std::fmt::Debug`][],
/// redirecting calls to the `fmt` method from [`DebugWithDb`][].
///
/// # WARNING: Intended for debug use only!
///
/// This type intentionally ignores salsa dependencies used
/// to generate the debug output. See the [`DebugWithDb`][] trait
/// for more notes on this.
pub struct DebugWith<'me, Db: ?Sized + AsSalsaDatabase> {
value: BoxRef<'me, dyn DebugWithDb<Db> + 'me>,
db: &'me Db,
}
enum BoxRef<'me, T: ?Sized> {
Box(Box<T>),
Ref(&'me T),
}
impl<T: ?Sized> std::ops::Deref for BoxRef<'_, T> {
type Target = T;
fn deref(&self) -> &Self::Target {
match self {
BoxRef::Box(b) => b,
BoxRef::Ref(r) => r,
}
}
}
impl<Db: ?Sized> fmt::Debug for DebugWith<'_, Db>
where
Db: AsSalsaDatabase,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let db = self.db.as_salsa_database();
db.runtime()
.debug_probe(|| DebugWithDb::fmt(&*self.value, f, self.db))
}
}
impl<Db: ?Sized, T: ?Sized> DebugWithDb<Db> for &T
where
T: DebugWithDb<Db>,
Db: AsSalsaDatabase,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>, db: &Db) -> fmt::Result {
T::fmt(self, f, db)
}
}
impl<Db: ?Sized, T: ?Sized> DebugWithDb<Db> for Box<T>
where
T: DebugWithDb<Db>,
Db: AsSalsaDatabase,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>, db: &Db) -> fmt::Result {
T::fmt(self, f, db)
}
}
impl<Db: ?Sized, T> DebugWithDb<Db> for Rc<T>
where
T: DebugWithDb<Db>,
Db: AsSalsaDatabase,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>, db: &Db) -> fmt::Result {
T::fmt(self, f, db)
}
}
impl<Db: ?Sized, T: ?Sized> DebugWithDb<Db> for Arc<T>
where
T: DebugWithDb<Db>,
Db: AsSalsaDatabase,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>, db: &Db) -> fmt::Result {
T::fmt(self, f, db)
}
}
impl<Db: ?Sized, T> DebugWithDb<Db> for Vec<T>
where
T: DebugWithDb<Db>,
Db: AsSalsaDatabase,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>, db: &Db) -> fmt::Result {
let elements = self.iter().map(|e| e.debug(db));
f.debug_list().entries(elements).finish()
}
}
impl<Db: ?Sized, T> DebugWithDb<Db> for Option<T>
where
T: DebugWithDb<Db>,
Db: AsSalsaDatabase,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>, db: &Db) -> fmt::Result {
let me = self.as_ref().map(|v| v.debug(db));
fmt::Debug::fmt(&me, f)
}
}
impl<Db: ?Sized, K, V, S> DebugWithDb<Db> for HashMap<K, V, S>
where
K: DebugWithDb<Db>,
V: DebugWithDb<Db>,
Db: AsSalsaDatabase,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>, db: &Db) -> fmt::Result {
let elements = self.iter().map(|(k, v)| (k.debug(db), v.debug(db)));
f.debug_map().entries(elements).finish()
}
}
impl<Db: ?Sized, A, B> DebugWithDb<Db> for (A, B)
where
A: DebugWithDb<Db>,
B: DebugWithDb<Db>,
Db: AsSalsaDatabase,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>, db: &Db) -> fmt::Result {
f.debug_tuple("")
.field(&self.0.debug(db))
.field(&self.1.debug(db))
.finish()
}
}
impl<Db: ?Sized, A, B, C> DebugWithDb<Db> for (A, B, C)
where
A: DebugWithDb<Db>,
B: DebugWithDb<Db>,
C: DebugWithDb<Db>,
Db: AsSalsaDatabase,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>, db: &Db) -> fmt::Result {
f.debug_tuple("")
.field(&self.0.debug(db))
.field(&self.1.debug(db))
.field(&self.2.debug(db))
.finish()
}
}
impl<Db: ?Sized, V, S> DebugWithDb<Db> for HashSet<V, S>
where
V: DebugWithDb<Db>,
Db: AsSalsaDatabase,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>, db: &Db) -> fmt::Result {
let elements = self.iter().map(|e| e.debug(db));
f.debug_list().entries(elements).finish()
}
}
/// This is used by the macro generated code.
/// If the field type implements `DebugWithDb`, uses that, otherwise, uses `Debug`.
/// That's the "has impl" trick (https://github.com/nvzqz/impls#how-it-works)
#[doc(hidden)]
pub mod helper {
use super::{AsSalsaDatabase, DebugWith, DebugWithDb};
use std::{fmt, marker::PhantomData};
pub trait Fallback<T: fmt::Debug, Db: ?Sized> {
fn salsa_debug<'a>(a: &'a T, _db: &Db) -> &'a dyn fmt::Debug {
a
}
}
impl<Everything, Db: ?Sized, T: fmt::Debug> Fallback<T, Db> for Everything {}
pub struct SalsaDebug<T, Db: ?Sized>(PhantomData<T>, PhantomData<Db>);
impl<T, Db: ?Sized> SalsaDebug<T, Db>
where
T: DebugWithDb<Db>,
Db: AsSalsaDatabase,
{
#[allow(dead_code)]
pub fn salsa_debug<'a>(a: &'a T, db: &'a Db) -> DebugWith<'a, Db> {
a.debug(db)
}
}
}

View file

@ -1,241 +0,0 @@
use crate::debug::TableEntry;
use crate::durability::Durability;
use crate::plumbing::DerivedQueryStorageOps;
use crate::plumbing::LruQueryStorageOps;
use crate::plumbing::QueryFunction;
use crate::plumbing::QueryStorageMassOps;
use crate::plumbing::QueryStorageOps;
use crate::runtime::local_state::QueryInputs;
use crate::runtime::local_state::QueryRevisions;
use crate::Runtime;
use crate::{Database, DatabaseKeyIndex, QueryDb, Revision};
use std::borrow::Borrow;
use std::hash::Hash;
use std::marker::PhantomData;
mod execute;
mod fetch;
mod key_to_key_index;
mod lru;
mod maybe_changed_after;
mod memo;
mod sync;
//mod slot;
//use slot::Slot;
/// Memoized queries store the result plus a list of the other queries
/// that they invoked. This means we can avoid recomputing them when
/// none of those inputs have changed.
pub type MemoizedStorage<Q> = DerivedStorage<Q, AlwaysMemoizeValue>;
/// "Dependency" queries just track their dependencies and not the
/// actual value (which they produce on demand). This lessens the
/// storage requirements.
pub type DependencyStorage<Q> = DerivedStorage<Q, NeverMemoizeValue>;
/// Handles storage where the value is 'derived' by executing a
/// function (in contrast to "inputs").
pub struct DerivedStorage<Q, MP>
where
Q: QueryFunction,
MP: MemoizationPolicy<Q>,
{
group_index: u16,
lru: lru::Lru,
key_map: key_to_key_index::KeyToKeyIndex<Q::Key>,
memo_map: memo::MemoMap<Q::Value>,
sync_map: sync::SyncMap,
policy: PhantomData<MP>,
}
type DerivedKeyIndex = u32;
impl<Q, MP> std::panic::RefUnwindSafe for DerivedStorage<Q, MP>
where
Q: QueryFunction,
MP: MemoizationPolicy<Q>,
Q::Key: std::panic::RefUnwindSafe,
Q::Value: std::panic::RefUnwindSafe,
{
}
pub trait MemoizationPolicy<Q>: Send + Sync
where
Q: QueryFunction,
{
fn should_memoize_value(key: &Q::Key) -> bool;
fn memoized_value_eq(old_value: &Q::Value, new_value: &Q::Value) -> bool;
}
pub enum AlwaysMemoizeValue {}
impl<Q> MemoizationPolicy<Q> for AlwaysMemoizeValue
where
Q: QueryFunction,
Q::Value: Eq,
{
fn should_memoize_value(_key: &Q::Key) -> bool {
true
}
fn memoized_value_eq(old_value: &Q::Value, new_value: &Q::Value) -> bool {
old_value == new_value
}
}
pub enum NeverMemoizeValue {}
impl<Q> MemoizationPolicy<Q> for NeverMemoizeValue
where
Q: QueryFunction,
{
fn should_memoize_value(_key: &Q::Key) -> bool {
false
}
fn memoized_value_eq(_old_value: &Q::Value, _new_value: &Q::Value) -> bool {
panic!("cannot reach since we never memoize")
}
}
impl<Q, MP> DerivedStorage<Q, MP>
where
Q: QueryFunction,
MP: MemoizationPolicy<Q>,
{
fn database_key_index(&self, key_index: DerivedKeyIndex) -> DatabaseKeyIndex {
DatabaseKeyIndex {
group_index: self.group_index,
query_index: Q::QUERY_INDEX,
key_index,
}
}
fn assert_our_key_index(&self, index: DatabaseKeyIndex) {
assert_eq!(index.group_index, self.group_index);
assert_eq!(index.query_index, Q::QUERY_INDEX);
}
fn key_index(&self, index: DatabaseKeyIndex) -> DerivedKeyIndex {
self.assert_our_key_index(index);
index.key_index
}
}
impl<Q, MP> QueryStorageOps<Q> for DerivedStorage<Q, MP>
where
Q: QueryFunction,
MP: MemoizationPolicy<Q>,
{
const CYCLE_STRATEGY: crate::plumbing::CycleRecoveryStrategy = Q::CYCLE_STRATEGY;
fn new(group_index: u16) -> Self {
DerivedStorage {
group_index,
lru: Default::default(),
key_map: Default::default(),
memo_map: Default::default(),
sync_map: Default::default(),
policy: PhantomData,
}
}
fn fmt_index(
&self,
_db: &<Q as QueryDb<'_>>::DynDb,
index: DatabaseKeyIndex,
fmt: &mut std::fmt::Formatter<'_>,
) -> std::fmt::Result {
let key_index = self.key_index(index);
let key = self.key_map.key_for_key_index(key_index);
write!(fmt, "{}({:?})", Q::QUERY_NAME, key)
}
fn maybe_changed_after(
&self,
db: &<Q as QueryDb<'_>>::DynDb,
database_key_index: DatabaseKeyIndex,
revision: Revision,
) -> bool {
debug_assert!(revision < db.salsa_runtime().current_revision());
let key_index = self.key_index(database_key_index);
self.maybe_changed_after(db, key_index, revision)
}
fn fetch(&self, db: &<Q as QueryDb<'_>>::DynDb, key: &Q::Key) -> Q::Value {
let key_index = self.key_map.key_index_for_key(key);
self.fetch(db, key_index)
}
fn durability(&self, _db: &<Q as QueryDb<'_>>::DynDb, key: &Q::Key) -> Durability {
let key_index = self.key_map.key_index_for_key(key);
if let Some(memo) = self.memo_map.get(key_index) {
memo.revisions.durability
} else {
Durability::LOW
}
}
fn entries<C>(&self, _db: &<Q as QueryDb<'_>>::DynDb) -> C
where
C: std::iter::FromIterator<TableEntry<Q::Key, Q::Value>>,
{
self.memo_map
.iter()
.map(|(key_index, memo)| {
let key = self.key_map.key_for_key_index(key_index);
TableEntry::new(key, memo.value.clone())
})
.collect()
}
}
impl<Q, MP> QueryStorageMassOps for DerivedStorage<Q, MP>
where
Q: QueryFunction,
MP: MemoizationPolicy<Q>,
{
fn purge(&self) {
self.lru.set_capacity(0);
self.memo_map.clear();
}
}
impl<Q, MP> LruQueryStorageOps for DerivedStorage<Q, MP>
where
Q: QueryFunction,
MP: MemoizationPolicy<Q>,
{
fn set_lru_capacity(&self, new_capacity: usize) {
self.lru.set_capacity(new_capacity);
}
}
impl<Q, MP> DerivedQueryStorageOps<Q> for DerivedStorage<Q, MP>
where
Q: QueryFunction,
MP: MemoizationPolicy<Q>,
{
fn invalidate<S>(&self, runtime: &mut Runtime, key: &S)
where
S: Eq + Hash,
Q::Key: Borrow<S>,
{
runtime.with_incremented_revision(|new_revision| {
let key_index = self.key_map.existing_key_index_for_key(key)?;
let memo = self.memo_map.get(key_index)?;
let invalidated_revisions = QueryRevisions {
changed_at: new_revision,
durability: memo.revisions.durability,
inputs: QueryInputs::Untracked,
};
let new_memo = memo::Memo::new(
memo.value.clone(),
memo.verified_at.load(),
invalidated_revisions,
);
self.memo_map.insert(key_index, new_memo);
Some(memo.revisions.durability)
})
}
}

View file

@ -1,134 +0,0 @@
use std::sync::Arc;
use crate::{
plumbing::QueryFunction,
runtime::{local_state::ActiveQueryGuard, StampedValue},
Cycle, Database, Event, EventKind, QueryDb,
};
use super::{memo::Memo, DerivedStorage, MemoizationPolicy};
impl<Q, MP> DerivedStorage<Q, MP>
where
Q: QueryFunction,
MP: MemoizationPolicy<Q>,
{
/// Executes the query function for the given `active_query`. Creates and stores
/// a new memo with the result, backdated if possible. Once this completes,
/// the query will have been popped off the active query stack.
///
/// # Parameters
///
/// * `db`, the database.
/// * `active_query`, the active stack frame for the query to execute.
/// * `opt_old_memo`, the older memo, if any existed. Used for backdated.
pub(super) fn execute(
&self,
db: &<Q as QueryDb<'_>>::DynDb,
active_query: ActiveQueryGuard<'_>,
opt_old_memo: Option<Arc<Memo<Q::Value>>>,
) -> StampedValue<Q::Value> {
let runtime = db.salsa_runtime();
let revision_now = runtime.current_revision();
let database_key_index = active_query.database_key_index;
log::info!("{:?}: executing query", database_key_index.debug(db));
db.salsa_event(Event {
runtime_id: db.salsa_runtime().id(),
kind: EventKind::WillExecute {
database_key: database_key_index,
},
});
// Query was not previously executed, or value is potentially
// stale, or value is absent. Let's execute!
let database_key_index = active_query.database_key_index;
let key_index = database_key_index.key_index;
let key = self.key_map.key_for_key_index(key_index);
let value = match Cycle::catch(|| Q::execute(db, key.clone())) {
Ok(v) => v,
Err(cycle) => {
log::debug!(
"{:?}: caught cycle {:?}, have strategy {:?}",
database_key_index.debug(db),
cycle,
Q::CYCLE_STRATEGY,
);
match Q::CYCLE_STRATEGY {
crate::plumbing::CycleRecoveryStrategy::Panic => cycle.throw(),
crate::plumbing::CycleRecoveryStrategy::Fallback => {
if let Some(c) = active_query.take_cycle() {
assert!(c.is(&cycle));
Q::cycle_fallback(db, &cycle, &key)
} else {
// we are not a participant in this cycle
debug_assert!(!cycle
.participant_keys()
.any(|k| k == database_key_index));
cycle.throw()
}
}
}
}
};
let mut revisions = active_query.pop();
// We assume that query is side-effect free -- that is, does
// not mutate the "inputs" to the query system. Sanity check
// that assumption here, at least to the best of our ability.
assert_eq!(
runtime.current_revision(),
revision_now,
"revision altered during query execution",
);
// If the new value is equal to the old one, then it didn't
// really change, even if some of its inputs have. So we can
// "backdate" its `changed_at` revision to be the same as the
// old value.
if let Some(old_memo) = &opt_old_memo {
if let Some(old_value) = &old_memo.value {
// Careful: if the value became less durable than it
// used to be, that is a "breaking change" that our
// consumers must be aware of. Becoming *more* durable
// is not. See the test `constant_to_non_constant`.
if revisions.durability >= old_memo.revisions.durability
&& MP::memoized_value_eq(old_value, &value)
{
log::debug!(
"{:?}: read_upgrade: value is equal, back-dating to {:?}",
database_key_index.debug(db),
old_memo.revisions.changed_at,
);
assert!(old_memo.revisions.changed_at <= revisions.changed_at);
revisions.changed_at = old_memo.revisions.changed_at;
}
}
}
let stamped_value = revisions.stamped_value(value);
log::debug!(
"{:?}: read_upgrade: result.revisions = {:#?}",
database_key_index.debug(db),
revisions
);
self.memo_map.insert(
key_index,
Memo::new(
if MP::should_memoize_value(&key) {
Some(stamped_value.value.clone())
} else {
None
},
revision_now,
revisions,
),
);
stamped_value
}
}

Some files were not shown because too many files have changed in this diff Show more