Remove unnecessary uses of DashMap and Arc (#3413)

## Summary

All of the resolver code is run on the main thread, so a lot of the
`Send` bounds and uses of `DashMap` and `Arc` are unnecessary. We could
also switch to using single-threaded versions of `Mutex` and `Notify` in
some places, but there isn't really a crate that provides those I would
be comfortable with using.

The `Arc` in `OnceMap` can't easily be removed because of the uv-auth
code which uses the
[reqwest-middleware](https://docs.rs/reqwest-middleware/latest/reqwest_middleware/trait.Middleware.html)
crate, that seems to adds unnecessary `Send` bounds because of
`async-trait`. We could duplicate the code and create a `OnceMapLocal`
variant, but I don't feel that's worth it.
This commit is contained in:
Ibraheem Ahmed 2024-05-06 22:30:43 -04:00 committed by GitHub
parent 2c84af15b8
commit 94cf604574
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
18 changed files with 165 additions and 145 deletions

View file

@ -7,8 +7,8 @@ use std::fmt::{Display, Formatter};
use std::io; use std::io;
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use std::process::{ExitStatus, Output}; use std::process::{ExitStatus, Output};
use std::rc::Rc;
use std::str::FromStr; use std::str::FromStr;
use std::sync::Arc;
use std::{env, iter}; use std::{env, iter};
use fs_err as fs; use fs_err as fs;
@ -335,13 +335,13 @@ impl Pep517Backend {
} }
} }
/// Uses an [`Arc`] internally, clone freely. /// Uses an [`Rc`] internally, clone freely.
#[derive(Debug, Default, Clone)] #[derive(Debug, Default, Clone)]
pub struct SourceBuildContext { pub struct SourceBuildContext {
/// An in-memory resolution of the default backend's requirements for PEP 517 builds. /// An in-memory resolution of the default backend's requirements for PEP 517 builds.
default_resolution: Arc<Mutex<Option<Resolution>>>, default_resolution: Rc<Mutex<Option<Resolution>>>,
/// An in-memory resolution of the build requirements for `--legacy-setup-py` builds. /// An in-memory resolution of the build requirements for `--legacy-setup-py` builds.
setup_py_resolution: Arc<Mutex<Option<Resolution>>>, setup_py_resolution: Rc<Mutex<Option<Resolution>>>,
} }
/// Holds the state through a series of PEP 517 frontend to backend calls or a single setup.py /// Holds the state through a series of PEP 517 frontend to backend calls or a single setup.py

View file

@ -30,7 +30,7 @@ use crate::{
/// `CachedClient::get_cacheable`. If your types fit into the /// `CachedClient::get_cacheable`. If your types fit into the
/// `rkyvutil::OwnedArchive` mold, then an implementation of `Cacheable` is /// `rkyvutil::OwnedArchive` mold, then an implementation of `Cacheable` is
/// already provided for that type. /// already provided for that type.
pub trait Cacheable: Sized + Send { pub trait Cacheable: Sized {
/// This associated type permits customizing what the "output" type of /// This associated type permits customizing what the "output" type of
/// deserialization is. It can be identical to `Self`. /// deserialization is. It can be identical to `Self`.
/// ///
@ -54,7 +54,7 @@ pub struct SerdeCacheable<T> {
inner: T, inner: T,
} }
impl<T: Send + Serialize + DeserializeOwned> Cacheable for SerdeCacheable<T> { impl<T: Serialize + DeserializeOwned> Cacheable for SerdeCacheable<T> {
type Target = T; type Target = T;
fn from_aligned_bytes(bytes: AlignedVec) -> Result<T, Error> { fn from_aligned_bytes(bytes: AlignedVec) -> Result<T, Error> {
@ -75,7 +75,7 @@ impl<T: Send + Serialize + DeserializeOwned> Cacheable for SerdeCacheable<T> {
/// All `OwnedArchive` values are cacheable. /// All `OwnedArchive` values are cacheable.
impl<A> Cacheable for OwnedArchive<A> impl<A> Cacheable for OwnedArchive<A>
where where
A: rkyv::Archive + rkyv::Serialize<crate::rkyvutil::Serializer<4096>> + Send, A: rkyv::Archive + rkyv::Serialize<crate::rkyvutil::Serializer<4096>>,
A::Archived: for<'a> rkyv::CheckBytes<rkyv::validation::validators::DefaultValidator<'a>> A::Archived: for<'a> rkyv::CheckBytes<rkyv::validation::validators::DefaultValidator<'a>>
+ rkyv::Deserialize<A, rkyv::de::deserializers::SharedDeserializeMap>, + rkyv::Deserialize<A, rkyv::de::deserializers::SharedDeserializeMap>,
{ {
@ -179,7 +179,7 @@ impl CachedClient {
/// allowed to make subsequent requests, e.g. through the uncached client. /// allowed to make subsequent requests, e.g. through the uncached client.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn get_serde< pub async fn get_serde<
Payload: Serialize + DeserializeOwned + Send + 'static, Payload: Serialize + DeserializeOwned + 'static,
CallBackError, CallBackError,
Callback, Callback,
CallbackReturn, CallbackReturn,
@ -191,8 +191,8 @@ impl CachedClient {
response_callback: Callback, response_callback: Callback,
) -> Result<Payload, CachedClientError<CallBackError>> ) -> Result<Payload, CachedClientError<CallBackError>>
where where
Callback: FnOnce(Response) -> CallbackReturn + Send, Callback: FnOnce(Response) -> CallbackReturn,
CallbackReturn: Future<Output = Result<Payload, CallBackError>> + Send, CallbackReturn: Future<Output = Result<Payload, CallBackError>>,
{ {
let payload = self let payload = self
.get_cacheable(req, cache_entry, cache_control, move |resp| async { .get_cacheable(req, cache_entry, cache_control, move |resp| async {
@ -225,11 +225,15 @@ impl CachedClient {
) -> Result<Payload::Target, CachedClientError<CallBackError>> ) -> Result<Payload::Target, CachedClientError<CallBackError>>
where where
Callback: FnOnce(Response) -> CallbackReturn, Callback: FnOnce(Response) -> CallbackReturn,
CallbackReturn: Future<Output = Result<Payload, CallBackError>> + Send, CallbackReturn: Future<Output = Result<Payload, CallBackError>>,
{ {
let fresh_req = req.try_clone().expect("HTTP request must be cloneable"); let fresh_req = req.try_clone().expect("HTTP request must be cloneable");
let cached_response = match Self::read_cache(cache_entry).await { let cached_response = match Self::read_cache(cache_entry).await {
Some(cached) => self.send_cached(req, cache_control, cached).boxed().await?, Some(cached) => {
self.send_cached(req, cache_control, cached)
.boxed_local()
.await?
}
None => { None => {
debug!("No cache entry for: {}", req.url()); debug!("No cache entry for: {}", req.url());
let (response, cache_policy) = self.fresh_request(req).await?; let (response, cache_policy) = self.fresh_request(req).await?;
@ -301,7 +305,7 @@ impl CachedClient {
/// Make a request without checking whether the cache is fresh. /// Make a request without checking whether the cache is fresh.
pub async fn skip_cache< pub async fn skip_cache<
Payload: Serialize + DeserializeOwned + Send + 'static, Payload: Serialize + DeserializeOwned + 'static,
CallBackError, CallBackError,
Callback, Callback,
CallbackReturn, CallbackReturn,
@ -312,8 +316,8 @@ impl CachedClient {
response_callback: Callback, response_callback: Callback,
) -> Result<Payload, CachedClientError<CallBackError>> ) -> Result<Payload, CachedClientError<CallBackError>>
where where
Callback: FnOnce(Response) -> CallbackReturn + Send, Callback: FnOnce(Response) -> CallbackReturn,
CallbackReturn: Future<Output = Result<Payload, CallBackError>> + Send, CallbackReturn: Future<Output = Result<Payload, CallBackError>>,
{ {
let (response, cache_policy) = self.fresh_request(req).await?; let (response, cache_policy) = self.fresh_request(req).await?;
@ -335,7 +339,7 @@ impl CachedClient {
) -> Result<Payload::Target, CachedClientError<CallBackError>> ) -> Result<Payload::Target, CachedClientError<CallBackError>>
where where
Callback: FnOnce(Response) -> CallbackReturn, Callback: FnOnce(Response) -> CallbackReturn,
CallbackReturn: Future<Output = Result<Payload, CallBackError>> + Send, CallbackReturn: Future<Output = Result<Payload, CallBackError>>,
{ {
let _ = fs_err::tokio::remove_file(&cache_entry.path()).await; let _ = fs_err::tokio::remove_file(&cache_entry.path()).await;
let (response, cache_policy) = self.fresh_request(req).await?; let (response, cache_policy) = self.fresh_request(req).await?;
@ -352,11 +356,11 @@ impl CachedClient {
) -> Result<Payload::Target, CachedClientError<CallBackError>> ) -> Result<Payload::Target, CachedClientError<CallBackError>>
where where
Callback: FnOnce(Response) -> CallbackReturn, Callback: FnOnce(Response) -> CallbackReturn,
CallbackReturn: Future<Output = Result<Payload, CallBackError>> + Send, CallbackReturn: Future<Output = Result<Payload, CallBackError>>,
{ {
let new_cache = info_span!("new_cache", file = %cache_entry.path().display()); let new_cache = info_span!("new_cache", file = %cache_entry.path().display());
let data = response_callback(response) let data = response_callback(response)
.boxed() .boxed_local()
.await .await
.map_err(|err| CachedClientError::Callback(err))?; .map_err(|err| CachedClientError::Callback(err))?;
let Some(cache_policy) = cache_policy else { let Some(cache_policy) = cache_policy else {

View file

@ -166,7 +166,7 @@ impl<'a> FlatIndexClient<'a> {
.collect(); .collect();
Ok::<Vec<File>, CachedClientError<Error>>(files) Ok::<Vec<File>, CachedClientError<Error>>(files)
} }
.boxed() .boxed_local()
.instrument(info_span!("parse_flat_index_html", url = % url)) .instrument(info_span!("parse_flat_index_html", url = % url))
}; };
let response = self let response = self

View file

@ -345,7 +345,7 @@ impl RegistryClient {
}; };
OwnedArchive::from_unarchived(&unarchived) OwnedArchive::from_unarchived(&unarchived)
} }
.boxed() .boxed_local()
.instrument(info_span!("parse_simple_api", package = %package_name)) .instrument(info_span!("parse_simple_api", package = %package_name))
}; };
let result = self let result = self
@ -534,7 +534,7 @@ impl RegistryClient {
})?; })?;
Ok::<Metadata23, CachedClientError<Error>>(metadata) Ok::<Metadata23, CachedClientError<Error>>(metadata)
} }
.boxed() .boxed_local()
.instrument(info_span!("read_metadata_range_request", wheel = %filename)) .instrument(info_span!("read_metadata_range_request", wheel = %filename))
}; };

View file

@ -314,7 +314,7 @@ impl<'a> BuildContext for BuildDispatch<'a> {
build_kind, build_kind,
self.build_extra_env_vars.clone(), self.build_extra_env_vars.clone(),
) )
.boxed() .boxed_local()
.await?; .await?;
Ok(builder) Ok(builder)
} }

View file

@ -1,5 +1,6 @@
use std::io; use std::io;
use std::path::Path; use std::path::Path;
use std::rc::Rc;
use std::sync::Arc; use std::sync::Arc;
use futures::{FutureExt, TryStreamExt}; use futures::{FutureExt, TryStreamExt};
@ -41,20 +42,20 @@ use crate::{ArchiveMetadata, Error, LocalWheel, Reporter, SourceDistributionBuil
/// ///
/// This struct also has the task of acquiring locks around source dist builds in general and git /// This struct also has the task of acquiring locks around source dist builds in general and git
/// operation especially. /// operation especially.
pub struct DistributionDatabase<'a, Context: BuildContext + Send + Sync> { pub struct DistributionDatabase<'a, Context: BuildContext> {
client: &'a RegistryClient, client: &'a RegistryClient,
build_context: &'a Context, build_context: &'a Context,
builder: SourceDistributionBuilder<'a, Context>, builder: SourceDistributionBuilder<'a, Context>,
locks: Arc<Locks>, locks: Rc<Locks>,
} }
impl<'a, Context: BuildContext + Send + Sync> DistributionDatabase<'a, Context> { impl<'a, Context: BuildContext> DistributionDatabase<'a, Context> {
pub fn new(client: &'a RegistryClient, build_context: &'a Context) -> Self { pub fn new(client: &'a RegistryClient, build_context: &'a Context) -> Self {
Self { Self {
client, client,
build_context, build_context,
builder: SourceDistributionBuilder::new(client, build_context), builder: SourceDistributionBuilder::new(client, build_context),
locks: Arc::new(Locks::default()), locks: Rc::new(Locks::default()),
} }
} }
@ -307,7 +308,7 @@ impl<'a, Context: BuildContext + Send + Sync> DistributionDatabase<'a, Context>
let built_wheel = self let built_wheel = self
.builder .builder
.download_and_build(&BuildableSource::Dist(dist), tags, hashes) .download_and_build(&BuildableSource::Dist(dist), tags, hashes)
.boxed() .boxed_local()
.await?; .await?;
// If the wheel was unzipped previously, respect it. Source distributions are // If the wheel was unzipped previously, respect it. Source distributions are
@ -360,7 +361,7 @@ impl<'a, Context: BuildContext + Send + Sync> DistributionDatabase<'a, Context>
return Ok(ArchiveMetadata { metadata, hashes }); return Ok(ArchiveMetadata { metadata, hashes });
} }
match self.client.wheel_metadata(dist).boxed().await { match self.client.wheel_metadata(dist).boxed_local().await {
Ok(metadata) => Ok(ArchiveMetadata::from(metadata)), Ok(metadata) => Ok(ArchiveMetadata::from(metadata)),
Err(err) if err.is_http_streaming_unsupported() => { Err(err) if err.is_http_streaming_unsupported() => {
warn!("Streaming unsupported when fetching metadata for {dist}; downloading wheel directly ({err})"); warn!("Streaming unsupported when fetching metadata for {dist}; downloading wheel directly ({err})");
@ -404,7 +405,7 @@ impl<'a, Context: BuildContext + Send + Sync> DistributionDatabase<'a, Context>
let metadata = self let metadata = self
.builder .builder
.download_and_build_metadata(source, hashes) .download_and_build_metadata(source, hashes)
.boxed() .boxed_local()
.await?; .await?;
Ok(metadata) Ok(metadata)
} }

View file

@ -1,4 +1,4 @@
use std::sync::Arc; use std::rc::Rc;
use rustc_hash::FxHashMap; use rustc_hash::FxHashMap;
use tokio::sync::Mutex; use tokio::sync::Mutex;
@ -7,14 +7,14 @@ use distribution_types::{Identifier, ResourceId};
/// A set of locks used to prevent concurrent access to the same resource. /// A set of locks used to prevent concurrent access to the same resource.
#[derive(Debug, Default)] #[derive(Debug, Default)]
pub(crate) struct Locks(Mutex<FxHashMap<ResourceId, Arc<Mutex<()>>>>); pub(crate) struct Locks(Mutex<FxHashMap<ResourceId, Rc<Mutex<()>>>>);
impl Locks { impl Locks {
/// Acquire a lock on the given resource. /// Acquire a lock on the given resource.
pub(crate) async fn acquire(&self, dist: &impl Identifier) -> Arc<Mutex<()>> { pub(crate) async fn acquire(&self, dist: &impl Identifier) -> Rc<Mutex<()>> {
let mut map = self.0.lock().await; let mut map = self.0.lock().await;
map.entry(dist.resource_id()) map.entry(dist.resource_id())
.or_insert_with(|| Arc::new(Mutex::new(()))) .or_insert_with(|| Rc::new(Mutex::new(())))
.clone() .clone()
} }
} }

View file

@ -116,7 +116,7 @@ impl<'a, T: BuildContext> SourceDistributionBuilder<'a, T> {
tags, tags,
hashes, hashes,
) )
.boxed() .boxed_local()
.await; .await;
} }
}; };
@ -130,7 +130,7 @@ impl<'a, T: BuildContext> SourceDistributionBuilder<'a, T> {
tags, tags,
hashes, hashes,
) )
.boxed() .boxed_local()
.await? .await?
} }
BuildableSource::Dist(SourceDist::DirectUrl(dist)) => { BuildableSource::Dist(SourceDist::DirectUrl(dist)) => {
@ -153,18 +153,18 @@ impl<'a, T: BuildContext> SourceDistributionBuilder<'a, T> {
tags, tags,
hashes, hashes,
) )
.boxed() .boxed_local()
.await? .await?
} }
BuildableSource::Dist(SourceDist::Git(dist)) => { BuildableSource::Dist(SourceDist::Git(dist)) => {
self.git(source, &GitSourceUrl::from(dist), tags, hashes) self.git(source, &GitSourceUrl::from(dist), tags, hashes)
.boxed() .boxed_local()
.await? .await?
} }
BuildableSource::Dist(SourceDist::Path(dist)) => { BuildableSource::Dist(SourceDist::Path(dist)) => {
if dist.path.is_dir() { if dist.path.is_dir() {
self.source_tree(source, &PathSourceUrl::from(dist), tags, hashes) self.source_tree(source, &PathSourceUrl::from(dist), tags, hashes)
.boxed() .boxed_local()
.await? .await?
} else { } else {
let cache_shard = self let cache_shard = self
@ -178,7 +178,7 @@ impl<'a, T: BuildContext> SourceDistributionBuilder<'a, T> {
tags, tags,
hashes, hashes,
) )
.boxed() .boxed_local()
.await? .await?
} }
} }
@ -205,16 +205,18 @@ impl<'a, T: BuildContext> SourceDistributionBuilder<'a, T> {
tags, tags,
hashes, hashes,
) )
.boxed() .boxed_local()
.await? .await?
} }
BuildableSource::Url(SourceUrl::Git(resource)) => { BuildableSource::Url(SourceUrl::Git(resource)) => {
self.git(source, resource, tags, hashes).boxed().await? self.git(source, resource, tags, hashes)
.boxed_local()
.await?
} }
BuildableSource::Url(SourceUrl::Path(resource)) => { BuildableSource::Url(SourceUrl::Path(resource)) => {
if resource.path.is_dir() { if resource.path.is_dir() {
self.source_tree(source, resource, tags, hashes) self.source_tree(source, resource, tags, hashes)
.boxed() .boxed_local()
.await? .await?
} else { } else {
let cache_shard = self.build_context.cache().shard( let cache_shard = self.build_context.cache().shard(
@ -222,7 +224,7 @@ impl<'a, T: BuildContext> SourceDistributionBuilder<'a, T> {
WheelCache::Path(resource.url).root(), WheelCache::Path(resource.url).root(),
); );
self.archive(source, resource, &cache_shard, tags, hashes) self.archive(source, resource, &cache_shard, tags, hashes)
.boxed() .boxed_local()
.await? .await?
} }
} }
@ -268,7 +270,7 @@ impl<'a, T: BuildContext> SourceDistributionBuilder<'a, T> {
&cache_shard, &cache_shard,
hashes, hashes,
) )
.boxed() .boxed_local()
.await; .await;
} }
}; };
@ -281,7 +283,7 @@ impl<'a, T: BuildContext> SourceDistributionBuilder<'a, T> {
None, None,
hashes, hashes,
) )
.boxed() .boxed_local()
.await? .await?
} }
BuildableSource::Dist(SourceDist::DirectUrl(dist)) => { BuildableSource::Dist(SourceDist::DirectUrl(dist)) => {
@ -303,18 +305,18 @@ impl<'a, T: BuildContext> SourceDistributionBuilder<'a, T> {
subdirectory.as_deref(), subdirectory.as_deref(),
hashes, hashes,
) )
.boxed() .boxed_local()
.await? .await?
} }
BuildableSource::Dist(SourceDist::Git(dist)) => { BuildableSource::Dist(SourceDist::Git(dist)) => {
self.git_metadata(source, &GitSourceUrl::from(dist), hashes) self.git_metadata(source, &GitSourceUrl::from(dist), hashes)
.boxed() .boxed_local()
.await? .await?
} }
BuildableSource::Dist(SourceDist::Path(dist)) => { BuildableSource::Dist(SourceDist::Path(dist)) => {
if dist.path.is_dir() { if dist.path.is_dir() {
self.source_tree_metadata(source, &PathSourceUrl::from(dist), hashes) self.source_tree_metadata(source, &PathSourceUrl::from(dist), hashes)
.boxed() .boxed_local()
.await? .await?
} else { } else {
let cache_shard = self let cache_shard = self
@ -322,7 +324,7 @@ impl<'a, T: BuildContext> SourceDistributionBuilder<'a, T> {
.cache() .cache()
.shard(CacheBucket::BuiltWheels, WheelCache::Path(&dist.url).root()); .shard(CacheBucket::BuiltWheels, WheelCache::Path(&dist.url).root());
self.archive_metadata(source, &PathSourceUrl::from(dist), &cache_shard, hashes) self.archive_metadata(source, &PathSourceUrl::from(dist), &cache_shard, hashes)
.boxed() .boxed_local()
.await? .await?
} }
} }
@ -348,16 +350,18 @@ impl<'a, T: BuildContext> SourceDistributionBuilder<'a, T> {
subdirectory.as_deref(), subdirectory.as_deref(),
hashes, hashes,
) )
.boxed() .boxed_local()
.await? .await?
} }
BuildableSource::Url(SourceUrl::Git(resource)) => { BuildableSource::Url(SourceUrl::Git(resource)) => {
self.git_metadata(source, resource, hashes).boxed().await? self.git_metadata(source, resource, hashes)
.boxed_local()
.await?
} }
BuildableSource::Url(SourceUrl::Path(resource)) => { BuildableSource::Url(SourceUrl::Path(resource)) => {
if resource.path.is_dir() { if resource.path.is_dir() {
self.source_tree_metadata(source, resource, hashes) self.source_tree_metadata(source, resource, hashes)
.boxed() .boxed_local()
.await? .await?
} else { } else {
let cache_shard = self.build_context.cache().shard( let cache_shard = self.build_context.cache().shard(
@ -365,7 +369,7 @@ impl<'a, T: BuildContext> SourceDistributionBuilder<'a, T> {
WheelCache::Path(resource.url).root(), WheelCache::Path(resource.url).root(),
); );
self.archive_metadata(source, resource, &cache_shard, hashes) self.archive_metadata(source, resource, &cache_shard, hashes)
.boxed() .boxed_local()
.await? .await?
} }
} }
@ -488,7 +492,7 @@ impl<'a, T: BuildContext> SourceDistributionBuilder<'a, T> {
// If the backend supports `prepare_metadata_for_build_wheel`, use it. // If the backend supports `prepare_metadata_for_build_wheel`, use it.
if let Some(metadata) = self if let Some(metadata) = self
.build_metadata(source, source_dist_entry.path(), subdirectory) .build_metadata(source, source_dist_entry.path(), subdirectory)
.boxed() .boxed_local()
.await? .await?
{ {
// Store the metadata. // Store the metadata.
@ -569,7 +573,7 @@ impl<'a, T: BuildContext> SourceDistributionBuilder<'a, T> {
Ok(revision.with_hashes(hashes)) Ok(revision.with_hashes(hashes))
} }
.boxed() .boxed_local()
.instrument(info_span!("download", source_dist = %source)) .instrument(info_span!("download", source_dist = %source))
}; };
let req = self.request(url.clone())?; let req = self.request(url.clone())?;
@ -706,7 +710,7 @@ impl<'a, T: BuildContext> SourceDistributionBuilder<'a, T> {
// If the backend supports `prepare_metadata_for_build_wheel`, use it. // If the backend supports `prepare_metadata_for_build_wheel`, use it.
if let Some(metadata) = self if let Some(metadata) = self
.build_metadata(source, source_entry.path(), None) .build_metadata(source, source_entry.path(), None)
.boxed() .boxed_local()
.await? .await?
{ {
// Store the metadata. // Store the metadata.
@ -903,7 +907,7 @@ impl<'a, T: BuildContext> SourceDistributionBuilder<'a, T> {
// If the backend supports `prepare_metadata_for_build_wheel`, use it. // If the backend supports `prepare_metadata_for_build_wheel`, use it.
if let Some(metadata) = self if let Some(metadata) = self
.build_metadata(source, &resource.path, None) .build_metadata(source, &resource.path, None)
.boxed() .boxed_local()
.await? .await?
{ {
// Store the metadata. // Store the metadata.
@ -1110,7 +1114,7 @@ impl<'a, T: BuildContext> SourceDistributionBuilder<'a, T> {
// If the backend supports `prepare_metadata_for_build_wheel`, use it. // If the backend supports `prepare_metadata_for_build_wheel`, use it.
if let Some(metadata) = self if let Some(metadata) = self
.build_metadata(source, fetch.path(), subdirectory.as_deref()) .build_metadata(source, fetch.path(), subdirectory.as_deref())
.boxed() .boxed_local()
.await? .await?
{ {
// Store the metadata. // Store the metadata.

View file

@ -37,7 +37,7 @@ pub enum Error {
} }
/// Download, build, and unzip a set of distributions. /// Download, build, and unzip a set of distributions.
pub struct Downloader<'a, Context: BuildContext + Send + Sync> { pub struct Downloader<'a, Context: BuildContext> {
tags: &'a Tags, tags: &'a Tags,
cache: &'a Cache, cache: &'a Cache,
hashes: &'a HashStrategy, hashes: &'a HashStrategy,
@ -45,7 +45,7 @@ pub struct Downloader<'a, Context: BuildContext + Send + Sync> {
reporter: Option<Arc<dyn Reporter>>, reporter: Option<Arc<dyn Reporter>>,
} }
impl<'a, Context: BuildContext + Send + Sync> Downloader<'a, Context> { impl<'a, Context: BuildContext> Downloader<'a, Context> {
pub fn new( pub fn new(
cache: &'a Cache, cache: &'a Cache,
tags: &'a Tags, tags: &'a Tags,
@ -83,7 +83,7 @@ impl<'a, Context: BuildContext + Send + Sync> Downloader<'a, Context> {
) -> impl Stream<Item = Result<CachedDist, Error>> + 'stream { ) -> impl Stream<Item = Result<CachedDist, Error>> + 'stream {
futures::stream::iter(distributions) futures::stream::iter(distributions)
.map(|dist| async { .map(|dist| async {
let wheel = self.get_wheel(dist, in_flight).boxed().await?; let wheel = self.get_wheel(dist, in_flight).boxed_local().await?;
if let Some(reporter) = self.reporter.as_ref() { if let Some(reporter) = self.reporter.as_ref() {
reporter.on_progress(&wheel); reporter.on_progress(&wheel);
} }
@ -174,7 +174,7 @@ impl<'a, Context: BuildContext + Send + Sync> Downloader<'a, Context> {
let result = self let result = self
.database .database
.get_or_build_wheel(&dist, self.tags, policy) .get_or_build_wheel(&dist, self.tags, policy)
.boxed() .boxed_local()
.map_err(|err| Error::Fetch(dist.clone(), err)) .map_err(|err| Error::Fetch(dist.clone(), err))
.await .await
.and_then(|wheel: LocalWheel| { .and_then(|wheel: LocalWheel| {

View file

@ -45,7 +45,7 @@ pub enum LookaheadError {
/// possible because a direct URL points to a _specific_ version of a package, and so we know that /// possible because a direct URL points to a _specific_ version of a package, and so we know that
/// any correct resolution will _have_ to include it (unlike with PyPI dependencies, which may /// any correct resolution will _have_ to include it (unlike with PyPI dependencies, which may
/// require a range of versions and backtracking). /// require a range of versions and backtracking).
pub struct LookaheadResolver<'a, Context: BuildContext + Send + Sync> { pub struct LookaheadResolver<'a, Context: BuildContext> {
/// The direct requirements for the project. /// The direct requirements for the project.
requirements: &'a [Requirement], requirements: &'a [Requirement],
/// The constraints for the project. /// The constraints for the project.
@ -62,7 +62,7 @@ pub struct LookaheadResolver<'a, Context: BuildContext + Send + Sync> {
database: DistributionDatabase<'a, Context>, database: DistributionDatabase<'a, Context>,
} }
impl<'a, Context: BuildContext + Send + Sync> LookaheadResolver<'a, Context> { impl<'a, Context: BuildContext> LookaheadResolver<'a, Context> {
/// Instantiate a new [`LookaheadResolver`] for a given set of requirements. /// Instantiate a new [`LookaheadResolver`] for a given set of requirements.
#[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_arguments)]
pub fn new( pub fn new(

View file

@ -21,7 +21,7 @@ use crate::ExtrasSpecification;
/// ///
/// Used, e.g., to determine the input requirements when a user specifies a `pyproject.toml` /// Used, e.g., to determine the input requirements when a user specifies a `pyproject.toml`
/// file, which may require running PEP 517 build hooks to extract metadata. /// file, which may require running PEP 517 build hooks to extract metadata.
pub struct SourceTreeResolver<'a, Context: BuildContext + Send + Sync> { pub struct SourceTreeResolver<'a, Context: BuildContext> {
/// The requirements for the project. /// The requirements for the project.
source_trees: Vec<PathBuf>, source_trees: Vec<PathBuf>,
/// The extras to include when resolving requirements. /// The extras to include when resolving requirements.
@ -34,7 +34,7 @@ pub struct SourceTreeResolver<'a, Context: BuildContext + Send + Sync> {
database: DistributionDatabase<'a, Context>, database: DistributionDatabase<'a, Context>,
} }
impl<'a, Context: BuildContext + Send + Sync> SourceTreeResolver<'a, Context> { impl<'a, Context: BuildContext> SourceTreeResolver<'a, Context> {
/// Instantiate a new [`SourceTreeResolver`] for a given set of `source_trees`. /// Instantiate a new [`SourceTreeResolver`] for a given set of `source_trees`.
pub fn new( pub fn new(
source_trees: Vec<PathBuf>, source_trees: Vec<PathBuf>,

View file

@ -22,7 +22,7 @@ use uv_resolver::{InMemoryIndex, MetadataResponse};
use uv_types::{BuildContext, HashStrategy}; use uv_types::{BuildContext, HashStrategy};
/// Like [`RequirementsSpecification`], but with concrete names for all requirements. /// Like [`RequirementsSpecification`], but with concrete names for all requirements.
pub struct NamedRequirementsResolver<'a, Context: BuildContext + Send + Sync> { pub struct NamedRequirementsResolver<'a, Context: BuildContext> {
/// The requirements for the project. /// The requirements for the project.
requirements: Vec<UnresolvedRequirementSpecification>, requirements: Vec<UnresolvedRequirementSpecification>,
/// Whether to check hashes for distributions. /// Whether to check hashes for distributions.
@ -33,7 +33,7 @@ pub struct NamedRequirementsResolver<'a, Context: BuildContext + Send + Sync> {
database: DistributionDatabase<'a, Context>, database: DistributionDatabase<'a, Context>,
} }
impl<'a, Context: BuildContext + Send + Sync> NamedRequirementsResolver<'a, Context> { impl<'a, Context: BuildContext> NamedRequirementsResolver<'a, Context> {
/// Instantiate a new [`NamedRequirementsResolver`] for a given set of requirements. /// Instantiate a new [`NamedRequirementsResolver`] for a given set of requirements.
pub fn new( pub fn new(
requirements: Vec<UnresolvedRequirementSpecification>, requirements: Vec<UnresolvedRequirementSpecification>,

View file

@ -1,9 +1,9 @@
use std::collections::{BTreeMap, BTreeSet}; use std::collections::{BTreeMap, BTreeSet};
use std::fmt::Formatter; use std::fmt::Formatter;
use std::ops::Deref; use std::ops::Deref;
use std::rc::Rc;
use std::sync::Arc; use std::sync::Arc;
use dashmap::{DashMap, DashSet};
use indexmap::IndexMap; use indexmap::IndexMap;
use pubgrub::range::Range; use pubgrub::range::Range;
use pubgrub::report::{DefaultStringReporter, DerivationTree, External, Reporter}; use pubgrub::report::{DefaultStringReporter, DerivationTree, External, Reporter};
@ -22,7 +22,9 @@ use crate::candidate_selector::CandidateSelector;
use crate::dependency_provider::UvDependencyProvider; use crate::dependency_provider::UvDependencyProvider;
use crate::pubgrub::{PubGrubPackage, PubGrubPython, PubGrubReportFormatter}; use crate::pubgrub::{PubGrubPackage, PubGrubPython, PubGrubReportFormatter};
use crate::python_requirement::PythonRequirement; use crate::python_requirement::PythonRequirement;
use crate::resolver::{IncompletePackage, UnavailablePackage, VersionsResponse}; use crate::resolver::{
IncompletePackage, SharedMap, SharedSet, UnavailablePackage, VersionsResponse,
};
#[derive(Debug, thiserror::Error)] #[derive(Debug, thiserror::Error)]
pub enum ResolveError { pub enum ResolveError {
@ -236,8 +238,8 @@ impl NoSolutionError {
pub(crate) fn with_available_versions( pub(crate) fn with_available_versions(
mut self, mut self,
python_requirement: &PythonRequirement, python_requirement: &PythonRequirement,
visited: &DashSet<PackageName>, visited: &SharedSet<PackageName>,
package_versions: &OnceMap<PackageName, Arc<VersionsResponse>>, package_versions: &OnceMap<PackageName, Rc<VersionsResponse>>,
) -> Self { ) -> Self {
let mut available_versions = IndexMap::default(); let mut available_versions = IndexMap::default();
for package in self.derivation_tree.packages() { for package in self.derivation_tree.packages() {
@ -261,7 +263,7 @@ impl NoSolutionError {
// tree, but were never visited during resolution. We _may_ have metadata for // tree, but were never visited during resolution. We _may_ have metadata for
// these packages, but it's non-deterministic, and omitting them ensures that // these packages, but it's non-deterministic, and omitting them ensures that
// we represent the state of the resolver at the time of failure. // we represent the state of the resolver at the time of failure.
if visited.contains(name) { if visited.borrow().contains(name) {
if let Some(response) = package_versions.get(name) { if let Some(response) = package_versions.get(name) {
if let VersionsResponse::Found(ref version_maps) = *response { if let VersionsResponse::Found(ref version_maps) = *response {
for version_map in version_maps { for version_map in version_maps {
@ -300,13 +302,13 @@ impl NoSolutionError {
#[must_use] #[must_use]
pub(crate) fn with_unavailable_packages( pub(crate) fn with_unavailable_packages(
mut self, mut self,
unavailable_packages: &DashMap<PackageName, UnavailablePackage>, unavailable_packages: &SharedMap<PackageName, UnavailablePackage>,
) -> Self { ) -> Self {
let unavailable_packages = unavailable_packages.borrow();
let mut new = FxHashMap::default(); let mut new = FxHashMap::default();
for package in self.derivation_tree.packages() { for package in self.derivation_tree.packages() {
if let PubGrubPackage::Package(name, _, _) = package { if let PubGrubPackage::Package(name, _, _) = package {
if let Some(entry) = unavailable_packages.get(name) { if let Some(reason) = unavailable_packages.get(name) {
let reason = entry.value();
new.insert(name.clone(), reason.clone()); new.insert(name.clone(), reason.clone());
} }
} }
@ -319,15 +321,14 @@ impl NoSolutionError {
#[must_use] #[must_use]
pub(crate) fn with_incomplete_packages( pub(crate) fn with_incomplete_packages(
mut self, mut self,
incomplete_packages: &DashMap<PackageName, DashMap<Version, IncompletePackage>>, incomplete_packages: &SharedMap<PackageName, SharedMap<Version, IncompletePackage>>,
) -> Self { ) -> Self {
let mut new = FxHashMap::default(); let mut new = FxHashMap::default();
let incomplete_packages = incomplete_packages.borrow();
for package in self.derivation_tree.packages() { for package in self.derivation_tree.packages() {
if let PubGrubPackage::Package(name, _, _) = package { if let PubGrubPackage::Package(name, _, _) = package {
if let Some(entry) = incomplete_packages.get(name) { if let Some(versions) = incomplete_packages.get(name) {
let versions = entry.value(); for (version, reason) in versions.borrow().iter() {
for entry in versions {
let (version, reason) = entry.pair();
new.entry(name.clone()) new.entry(name.clone())
.or_insert_with(BTreeMap::default) .or_insert_with(BTreeMap::default)
.insert(version.clone(), reason.clone()); .insert(version.clone(), reason.clone());

View file

@ -1,6 +1,6 @@
use std::borrow::Cow; use std::borrow::Cow;
use std::hash::BuildHasherDefault; use std::hash::BuildHasherDefault;
use std::sync::Arc; use std::rc::Rc;
use anyhow::Result; use anyhow::Result;
use itertools::Itertools; use itertools::Itertools;
@ -69,8 +69,8 @@ impl ResolutionGraph {
pub(crate) fn from_state( pub(crate) fn from_state(
selection: &SelectedDependencies<UvDependencyProvider>, selection: &SelectedDependencies<UvDependencyProvider>,
pins: &FilePins, pins: &FilePins,
packages: &OnceMap<PackageName, Arc<VersionsResponse>>, packages: &OnceMap<PackageName, Rc<VersionsResponse>>,
distributions: &OnceMap<VersionId, Arc<MetadataResponse>>, distributions: &OnceMap<VersionId, Rc<MetadataResponse>>,
state: &State<UvDependencyProvider>, state: &State<UvDependencyProvider>,
preferences: &Preferences, preferences: &Preferences,
editables: Editables, editables: Editables,

View file

@ -1,4 +1,4 @@
use std::sync::Arc; use std::rc::Rc;
use distribution_types::VersionId; use distribution_types::VersionId;
use once_map::OnceMap; use once_map::OnceMap;
@ -11,30 +11,30 @@ use crate::resolver::provider::{MetadataResponse, VersionsResponse};
pub struct InMemoryIndex { pub struct InMemoryIndex {
/// A map from package name to the metadata for that package and the index where the metadata /// A map from package name to the metadata for that package and the index where the metadata
/// came from. /// came from.
pub(crate) packages: OnceMap<PackageName, Arc<VersionsResponse>>, pub(crate) packages: OnceMap<PackageName, Rc<VersionsResponse>>,
/// A map from package ID to metadata for that distribution. /// A map from package ID to metadata for that distribution.
pub(crate) distributions: OnceMap<VersionId, Arc<MetadataResponse>>, pub(crate) distributions: OnceMap<VersionId, Rc<MetadataResponse>>,
} }
impl InMemoryIndex { impl InMemoryIndex {
/// Insert a [`VersionsResponse`] into the index. /// Insert a [`VersionsResponse`] into the index.
pub fn insert_package(&self, package_name: PackageName, response: VersionsResponse) { pub fn insert_package(&self, package_name: PackageName, response: VersionsResponse) {
self.packages.done(package_name, Arc::new(response)); self.packages.done(package_name, Rc::new(response));
} }
/// Insert a [`Metadata23`] into the index. /// Insert a [`Metadata23`] into the index.
pub fn insert_metadata(&self, version_id: VersionId, response: MetadataResponse) { pub fn insert_metadata(&self, version_id: VersionId, response: MetadataResponse) {
self.distributions.done(version_id, Arc::new(response)); self.distributions.done(version_id, Rc::new(response));
} }
/// Get the [`VersionsResponse`] for a given package name, without waiting. /// Get the [`VersionsResponse`] for a given package name, without waiting.
pub fn get_package(&self, package_name: &PackageName) -> Option<Arc<VersionsResponse>> { pub fn get_package(&self, package_name: &PackageName) -> Option<Rc<VersionsResponse>> {
self.packages.get(package_name) self.packages.get(package_name)
} }
/// Get the [`MetadataResponse`] for a given package ID, without waiting. /// Get the [`MetadataResponse`] for a given package ID, without waiting.
pub fn get_metadata(&self, version_id: &VersionId) -> Option<Arc<MetadataResponse>> { pub fn get_metadata(&self, version_id: &VersionId) -> Option<Rc<MetadataResponse>> {
self.distributions.get(version_id) self.distributions.get(version_id)
} }
} }

View file

@ -1,12 +1,14 @@
//! Given a set of requirements, find a set of compatible packages. //! Given a set of requirements, find a set of compatible packages.
use std::borrow::Cow; use std::borrow::Cow;
use std::cell::RefCell;
use std::collections::{HashMap, HashSet};
use std::fmt::{Display, Formatter}; use std::fmt::{Display, Formatter};
use std::ops::Deref; use std::ops::Deref;
use std::rc::Rc;
use std::sync::Arc; use std::sync::Arc;
use anyhow::Result; use anyhow::Result;
use dashmap::{DashMap, DashSet};
use futures::{FutureExt, StreamExt}; use futures::{FutureExt, StreamExt};
use itertools::Itertools; use itertools::Itertools;
use pubgrub::error::PubGrubError; use pubgrub::error::PubGrubError;
@ -107,11 +109,10 @@ enum ResolverVersion {
Unavailable(Version, UnavailableVersion), Unavailable(Version, UnavailableVersion),
} }
pub struct Resolver< pub(crate) type SharedMap<K, V> = Rc<RefCell<HashMap<K, V>>>;
'a, pub(crate) type SharedSet<K> = Rc<RefCell<HashSet<K>>>;
Provider: ResolverProvider,
InstalledPackages: InstalledPackagesProvider + Send + Sync, pub struct Resolver<'a, Provider: ResolverProvider, InstalledPackages: InstalledPackagesProvider> {
> {
project: Option<PackageName>, project: Option<PackageName>,
requirements: Vec<Requirement>, requirements: Vec<Requirement>,
constraints: Constraints, constraints: Constraints,
@ -129,20 +130,17 @@ pub struct Resolver<
index: &'a InMemoryIndex, index: &'a InMemoryIndex,
installed_packages: &'a InstalledPackages, installed_packages: &'a InstalledPackages,
/// Incompatibilities for packages that are entirely unavailable. /// Incompatibilities for packages that are entirely unavailable.
unavailable_packages: DashMap<PackageName, UnavailablePackage>, unavailable_packages: SharedMap<PackageName, UnavailablePackage>,
/// Incompatibilities for packages that are unavailable at specific versions. /// Incompatibilities for packages that are unavailable at specific versions.
incomplete_packages: DashMap<PackageName, DashMap<Version, IncompletePackage>>, incomplete_packages: SharedMap<PackageName, SharedMap<Version, IncompletePackage>>,
/// The set of all registry-based packages visited during resolution. /// The set of all registry-based packages visited during resolution.
visited: DashSet<PackageName>, visited: SharedSet<PackageName>,
reporter: Option<Arc<dyn Reporter>>, reporter: Option<Arc<dyn Reporter>>,
provider: Provider, provider: Provider,
} }
impl< impl<'a, Context: BuildContext, InstalledPackages: InstalledPackagesProvider>
'a, Resolver<'a, DefaultResolverProvider<'a, Context>, InstalledPackages>
Context: BuildContext + Send + Sync,
InstalledPackages: InstalledPackagesProvider + Send + Sync,
> Resolver<'a, DefaultResolverProvider<'a, Context>, InstalledPackages>
{ {
/// Initialize a new resolver using the default backend doing real requests. /// Initialize a new resolver using the default backend doing real requests.
/// ///
@ -186,11 +184,8 @@ impl<
} }
} }
impl< impl<'a, Provider: ResolverProvider, InstalledPackages: InstalledPackagesProvider>
'a, Resolver<'a, Provider, InstalledPackages>
Provider: ResolverProvider,
InstalledPackages: InstalledPackagesProvider + Send + Sync,
> Resolver<'a, Provider, InstalledPackages>
{ {
/// Initialize a new resolver using a user provided backend. /// Initialize a new resolver using a user provided backend.
#[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_arguments)]
@ -206,9 +201,9 @@ impl<
) -> Result<Self, ResolveError> { ) -> Result<Self, ResolveError> {
Ok(Self { Ok(Self {
index, index,
unavailable_packages: DashMap::default(), unavailable_packages: SharedMap::default(),
incomplete_packages: DashMap::default(), incomplete_packages: SharedMap::default(),
visited: DashSet::default(), visited: SharedSet::default(),
selector: CandidateSelector::for_resolution(options, &manifest, markers), selector: CandidateSelector::for_resolution(options, &manifest, markers),
dependency_mode: options.dependency_mode, dependency_mode: options.dependency_mode,
urls: Urls::from_manifest(&manifest, markers, options.dependency_mode)?, urls: Urls::from_manifest(&manifest, markers, options.dependency_mode)?,
@ -251,7 +246,7 @@ impl<
let requests_fut = self.fetch(request_stream).fuse(); let requests_fut = self.fetch(request_stream).fuse();
// Run the solver. // Run the solver.
let resolve_fut = self.solve(request_sink).boxed().fuse(); let resolve_fut = self.solve(request_sink).boxed_local().fuse();
// Wait for both to complete. // Wait for both to complete.
match tokio::try_join!(requests_fut, resolve_fut) { match tokio::try_join!(requests_fut, resolve_fut) {
@ -368,6 +363,7 @@ impl<
if let PubGrubPackage::Package(ref package_name, _, _) = next { if let PubGrubPackage::Package(ref package_name, _, _) = next {
// Check if the decision was due to the package being unavailable // Check if the decision was due to the package being unavailable
self.unavailable_packages self.unavailable_packages
.borrow()
.get(package_name) .get(package_name)
.map(|entry| match *entry { .map(|entry| match *entry {
UnavailablePackage::NoIndex => { UnavailablePackage::NoIndex => {
@ -648,25 +644,26 @@ impl<
MetadataResponse::Found(archive) => &archive.metadata, MetadataResponse::Found(archive) => &archive.metadata,
MetadataResponse::Offline => { MetadataResponse::Offline => {
self.unavailable_packages self.unavailable_packages
.borrow_mut()
.insert(package_name.clone(), UnavailablePackage::Offline); .insert(package_name.clone(), UnavailablePackage::Offline);
return Ok(None); return Ok(None);
} }
MetadataResponse::InvalidMetadata(err) => { MetadataResponse::InvalidMetadata(err) => {
self.unavailable_packages.insert( self.unavailable_packages.borrow_mut().insert(
package_name.clone(), package_name.clone(),
UnavailablePackage::InvalidMetadata(err.to_string()), UnavailablePackage::InvalidMetadata(err.to_string()),
); );
return Ok(None); return Ok(None);
} }
MetadataResponse::InconsistentMetadata(err) => { MetadataResponse::InconsistentMetadata(err) => {
self.unavailable_packages.insert( self.unavailable_packages.borrow_mut().insert(
package_name.clone(), package_name.clone(),
UnavailablePackage::InvalidMetadata(err.to_string()), UnavailablePackage::InvalidMetadata(err.to_string()),
); );
return Ok(None); return Ok(None);
} }
MetadataResponse::InvalidStructure(err) => { MetadataResponse::InvalidStructure(err) => {
self.unavailable_packages.insert( self.unavailable_packages.borrow_mut().insert(
package_name.clone(), package_name.clone(),
UnavailablePackage::InvalidStructure(err.to_string()), UnavailablePackage::InvalidStructure(err.to_string()),
); );
@ -707,22 +704,25 @@ impl<
.instrument(info_span!("package_wait", %package_name)) .instrument(info_span!("package_wait", %package_name))
.await .await
.ok_or(ResolveError::Unregistered)?; .ok_or(ResolveError::Unregistered)?;
self.visited.insert(package_name.clone()); self.visited.borrow_mut().insert(package_name.clone());
let version_maps = match *versions_response { let version_maps = match *versions_response {
VersionsResponse::Found(ref version_maps) => version_maps.as_slice(), VersionsResponse::Found(ref version_maps) => version_maps.as_slice(),
VersionsResponse::NoIndex => { VersionsResponse::NoIndex => {
self.unavailable_packages self.unavailable_packages
.borrow_mut()
.insert(package_name.clone(), UnavailablePackage::NoIndex); .insert(package_name.clone(), UnavailablePackage::NoIndex);
&[] &[]
} }
VersionsResponse::Offline => { VersionsResponse::Offline => {
self.unavailable_packages self.unavailable_packages
.borrow_mut()
.insert(package_name.clone(), UnavailablePackage::Offline); .insert(package_name.clone(), UnavailablePackage::Offline);
&[] &[]
} }
VersionsResponse::NotFound => { VersionsResponse::NotFound => {
self.unavailable_packages self.unavailable_packages
.borrow_mut()
.insert(package_name.clone(), UnavailablePackage::NotFound); .insert(package_name.clone(), UnavailablePackage::NotFound);
&[] &[]
} }
@ -925,7 +925,11 @@ impl<
let version_id = dist.version_id(); let version_id = dist.version_id();
// If the package does not exist in the registry or locally, we cannot fetch its dependencies // If the package does not exist in the registry or locally, we cannot fetch its dependencies
if self.unavailable_packages.get(package_name).is_some() if self
.unavailable_packages
.borrow()
.get(package_name)
.is_some()
&& self && self
.installed_packages .installed_packages
.get_packages(package_name) .get_packages(package_name)
@ -953,8 +957,10 @@ impl<
MetadataResponse::Found(archive) => &archive.metadata, MetadataResponse::Found(archive) => &archive.metadata,
MetadataResponse::Offline => { MetadataResponse::Offline => {
self.incomplete_packages self.incomplete_packages
.borrow_mut()
.entry(package_name.clone()) .entry(package_name.clone())
.or_default() .or_default()
.borrow_mut()
.insert(version.clone(), IncompletePackage::Offline); .insert(version.clone(), IncompletePackage::Offline);
return Ok(Dependencies::Unavailable( return Ok(Dependencies::Unavailable(
"network connectivity is disabled, but the metadata wasn't found in the cache" "network connectivity is disabled, but the metadata wasn't found in the cache"
@ -964,8 +970,10 @@ impl<
MetadataResponse::InvalidMetadata(err) => { MetadataResponse::InvalidMetadata(err) => {
warn!("Unable to extract metadata for {package_name}: {err}"); warn!("Unable to extract metadata for {package_name}: {err}");
self.incomplete_packages self.incomplete_packages
.borrow_mut()
.entry(package_name.clone()) .entry(package_name.clone())
.or_default() .or_default()
.borrow_mut()
.insert( .insert(
version.clone(), version.clone(),
IncompletePackage::InvalidMetadata(err.to_string()), IncompletePackage::InvalidMetadata(err.to_string()),
@ -977,8 +985,10 @@ impl<
MetadataResponse::InconsistentMetadata(err) => { MetadataResponse::InconsistentMetadata(err) => {
warn!("Unable to extract metadata for {package_name}: {err}"); warn!("Unable to extract metadata for {package_name}: {err}");
self.incomplete_packages self.incomplete_packages
.borrow_mut()
.entry(package_name.clone()) .entry(package_name.clone())
.or_default() .or_default()
.borrow_mut()
.insert( .insert(
version.clone(), version.clone(),
IncompletePackage::InconsistentMetadata(err.to_string()), IncompletePackage::InconsistentMetadata(err.to_string()),
@ -990,8 +1000,10 @@ impl<
MetadataResponse::InvalidStructure(err) => { MetadataResponse::InvalidStructure(err) => {
warn!("Unable to extract metadata for {package_name}: {err}"); warn!("Unable to extract metadata for {package_name}: {err}");
self.incomplete_packages self.incomplete_packages
.borrow_mut()
.entry(package_name.clone()) .entry(package_name.clone())
.or_default() .or_default()
.borrow_mut()
.insert( .insert(
version.clone(), version.clone(),
IncompletePackage::InvalidStructure(err.to_string()), IncompletePackage::InvalidStructure(err.to_string()),
@ -1052,22 +1064,20 @@ impl<
request_stream: tokio::sync::mpsc::Receiver<Request>, request_stream: tokio::sync::mpsc::Receiver<Request>,
) -> Result<(), ResolveError> { ) -> Result<(), ResolveError> {
let mut response_stream = ReceiverStream::new(request_stream) let mut response_stream = ReceiverStream::new(request_stream)
.map(|request| self.process_request(request).boxed()) .map(|request| self.process_request(request).boxed_local())
.buffer_unordered(50); .buffer_unordered(50);
while let Some(response) = response_stream.next().await { while let Some(response) = response_stream.next().await {
match response? { match response? {
Some(Response::Package(package_name, version_map)) => { Some(Response::Package(package_name, version_map)) => {
trace!("Received package metadata for: {package_name}"); trace!("Received package metadata for: {package_name}");
self.index self.index.packages.done(package_name, Rc::new(version_map));
.packages
.done(package_name, Arc::new(version_map));
} }
Some(Response::Installed { dist, metadata }) => { Some(Response::Installed { dist, metadata }) => {
trace!("Received installed distribution metadata for: {dist}"); trace!("Received installed distribution metadata for: {dist}");
self.index.distributions.done( self.index.distributions.done(
dist.version_id(), dist.version_id(),
Arc::new(MetadataResponse::Found(ArchiveMetadata::from(metadata))), Rc::new(MetadataResponse::Found(ArchiveMetadata::from(metadata))),
); );
} }
Some(Response::Dist { Some(Response::Dist {
@ -1086,7 +1096,7 @@ impl<
} }
self.index self.index
.distributions .distributions
.done(dist.version_id(), Arc::new(metadata)); .done(dist.version_id(), Rc::new(metadata));
} }
Some(Response::Dist { Some(Response::Dist {
dist: Dist::Source(dist), dist: Dist::Source(dist),
@ -1104,7 +1114,7 @@ impl<
} }
self.index self.index
.distributions .distributions
.done(dist.version_id(), Arc::new(metadata)); .done(dist.version_id(), Rc::new(metadata));
} }
None => {} None => {}
} }
@ -1121,7 +1131,7 @@ impl<
let package_versions = self let package_versions = self
.provider .provider
.get_package_versions(&package_name) .get_package_versions(&package_name)
.boxed() .boxed_local()
.await .await
.map_err(ResolveError::Client)?; .map_err(ResolveError::Client)?;
@ -1133,7 +1143,7 @@ impl<
let metadata = self let metadata = self
.provider .provider
.get_or_build_wheel_metadata(&dist) .get_or_build_wheel_metadata(&dist)
.boxed() .boxed_local()
.await .await
.map_err(|err| match dist.clone() { .map_err(|err| match dist.clone() {
Dist::Built(BuiltDist::Path(built_dist)) => { Dist::Built(BuiltDist::Path(built_dist)) => {
@ -1172,18 +1182,21 @@ impl<
// Short-circuit if we did not find any versions for the package // Short-circuit if we did not find any versions for the package
VersionsResponse::NoIndex => { VersionsResponse::NoIndex => {
self.unavailable_packages self.unavailable_packages
.borrow_mut()
.insert(package_name.clone(), UnavailablePackage::NoIndex); .insert(package_name.clone(), UnavailablePackage::NoIndex);
return Ok(None); return Ok(None);
} }
VersionsResponse::Offline => { VersionsResponse::Offline => {
self.unavailable_packages self.unavailable_packages
.borrow_mut()
.insert(package_name.clone(), UnavailablePackage::Offline); .insert(package_name.clone(), UnavailablePackage::Offline);
return Ok(None); return Ok(None);
} }
VersionsResponse::NotFound => { VersionsResponse::NotFound => {
self.unavailable_packages self.unavailable_packages
.borrow_mut()
.insert(package_name.clone(), UnavailablePackage::NotFound); .insert(package_name.clone(), UnavailablePackage::NotFound);
return Ok(None); return Ok(None);
@ -1217,7 +1230,7 @@ impl<
let metadata = self let metadata = self
.provider .provider
.get_or_build_wheel_metadata(&dist) .get_or_build_wheel_metadata(&dist)
.boxed() .boxed_local()
.await .await
.map_err(|err| match dist.clone() { .map_err(|err| match dist.clone() {
Dist::Built(BuiltDist::Path(built_dist)) => { Dist::Built(BuiltDist::Path(built_dist)) => {

View file

@ -46,12 +46,12 @@ pub enum MetadataResponse {
Offline, Offline,
} }
pub trait ResolverProvider: Send + Sync { pub trait ResolverProvider {
/// Get the version map for a package. /// Get the version map for a package.
fn get_package_versions<'io>( fn get_package_versions<'io>(
&'io self, &'io self,
package_name: &'io PackageName, package_name: &'io PackageName,
) -> impl Future<Output = PackageVersionsResult> + Send + 'io; ) -> impl Future<Output = PackageVersionsResult> + 'io;
/// Get the metadata for a distribution. /// Get the metadata for a distribution.
/// ///
@ -61,7 +61,7 @@ pub trait ResolverProvider: Send + Sync {
fn get_or_build_wheel_metadata<'io>( fn get_or_build_wheel_metadata<'io>(
&'io self, &'io self,
dist: &'io Dist, dist: &'io Dist,
) -> impl Future<Output = WheelMetadataResult> + Send + 'io; ) -> impl Future<Output = WheelMetadataResult> + 'io;
fn index_locations(&self) -> &IndexLocations; fn index_locations(&self) -> &IndexLocations;
@ -72,7 +72,7 @@ pub trait ResolverProvider: Send + Sync {
/// The main IO backend for the resolver, which does cached requests network requests using the /// The main IO backend for the resolver, which does cached requests network requests using the
/// [`RegistryClient`] and [`DistributionDatabase`]. /// [`RegistryClient`] and [`DistributionDatabase`].
pub struct DefaultResolverProvider<'a, Context: BuildContext + Send + Sync> { pub struct DefaultResolverProvider<'a, Context: BuildContext> {
/// The [`DistributionDatabase`] used to build source distributions. /// The [`DistributionDatabase`] used to build source distributions.
fetcher: DistributionDatabase<'a, Context>, fetcher: DistributionDatabase<'a, Context>,
/// The [`RegistryClient`] used to query the index. /// The [`RegistryClient`] used to query the index.
@ -88,7 +88,7 @@ pub struct DefaultResolverProvider<'a, Context: BuildContext + Send + Sync> {
no_build: NoBuild, no_build: NoBuild,
} }
impl<'a, Context: BuildContext + Send + Sync> DefaultResolverProvider<'a, Context> { impl<'a, Context: BuildContext> DefaultResolverProvider<'a, Context> {
/// Reads the flat index entries and builds the provider. /// Reads the flat index entries and builds the provider.
#[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_arguments)]
pub fn new( pub fn new(
@ -118,9 +118,7 @@ impl<'a, Context: BuildContext + Send + Sync> DefaultResolverProvider<'a, Contex
} }
} }
impl<'a, Context: BuildContext + Send + Sync> ResolverProvider impl<'a, Context: BuildContext> ResolverProvider for DefaultResolverProvider<'a, Context> {
for DefaultResolverProvider<'a, Context>
{
/// Make a "Simple API" request for the package and convert the result to a [`VersionMap`]. /// Make a "Simple API" request for the package and convert the result to a [`VersionMap`].
async fn get_package_versions<'io>( async fn get_package_versions<'io>(
&'io self, &'io self,

View file

@ -48,8 +48,8 @@ use crate::BuildIsolation;
/// Put in a different way, the types here allow `uv-resolver` to depend on `uv-build` and /// Put in a different way, the types here allow `uv-resolver` to depend on `uv-build` and
/// `uv-build` to depend on `uv-resolver` which having actual crate dependencies between /// `uv-build` to depend on `uv-resolver` which having actual crate dependencies between
/// them. /// them.
pub trait BuildContext: Sync { pub trait BuildContext {
type SourceDistBuilder: SourceBuildTrait + Send + Sync; type SourceDistBuilder: SourceBuildTrait;
/// Return a reference to the cache. /// Return a reference to the cache.
fn cache(&self) -> &Cache; fn cache(&self) -> &Cache;
@ -79,7 +79,7 @@ pub trait BuildContext: Sync {
fn resolve<'a>( fn resolve<'a>(
&'a self, &'a self,
requirements: &'a [Requirement], requirements: &'a [Requirement],
) -> impl Future<Output = Result<Resolution>> + Send + 'a; ) -> impl Future<Output = Result<Resolution>> + 'a;
/// Install the given set of package versions into the virtual environment. The environment must /// Install the given set of package versions into the virtual environment. The environment must
/// use the same base Python as [`BuildContext::interpreter`] /// use the same base Python as [`BuildContext::interpreter`]
@ -87,7 +87,7 @@ pub trait BuildContext: Sync {
&'a self, &'a self,
resolution: &'a Resolution, resolution: &'a Resolution,
venv: &'a PythonEnvironment, venv: &'a PythonEnvironment,
) -> impl Future<Output = Result<()>> + Send + 'a; ) -> impl Future<Output = Result<()>> + 'a;
/// Setup a source distribution build by installing the required dependencies. A wrapper for /// Setup a source distribution build by installing the required dependencies. A wrapper for
/// `uv_build::SourceBuild::setup`. /// `uv_build::SourceBuild::setup`.
@ -103,7 +103,7 @@ pub trait BuildContext: Sync {
version_id: &'a str, version_id: &'a str,
dist: Option<&'a SourceDist>, dist: Option<&'a SourceDist>,
build_kind: BuildKind, build_kind: BuildKind,
) -> impl Future<Output = Result<Self::SourceDistBuilder>> + Send + 'a; ) -> impl Future<Output = Result<Self::SourceDistBuilder>> + 'a;
} }
/// A wrapper for `uv_build::SourceBuild` to avoid cyclical crate dependencies. /// A wrapper for `uv_build::SourceBuild` to avoid cyclical crate dependencies.
@ -117,15 +117,14 @@ pub trait SourceBuildTrait {
/// ///
/// Returns the metadata directory if we're having a PEP 517 build and the /// Returns the metadata directory if we're having a PEP 517 build and the
/// `prepare_metadata_for_build_wheel` hook exists /// `prepare_metadata_for_build_wheel` hook exists
fn metadata(&mut self) -> impl Future<Output = Result<Option<PathBuf>>> + Send; fn metadata(&mut self) -> impl Future<Output = Result<Option<PathBuf>>>;
/// A wrapper for `uv_build::SourceBuild::build`. /// A wrapper for `uv_build::SourceBuild::build`.
/// ///
/// For PEP 517 builds, this calls `build_wheel`. /// For PEP 517 builds, this calls `build_wheel`.
/// ///
/// Returns the filename of the built wheel inside the given `wheel_dir`. /// Returns the filename of the built wheel inside the given `wheel_dir`.
fn wheel<'a>(&'a self, wheel_dir: &'a Path) fn wheel<'a>(&'a self, wheel_dir: &'a Path) -> impl Future<Output = Result<String>> + 'a;
-> impl Future<Output = Result<String>> + Send + 'a;
} }
/// A wrapper for [`uv_installer::SitePackages`] /// A wrapper for [`uv_installer::SitePackages`]