add support for specifying conflicting extras (#8976)

This PR adds support for conflicting extras. For example, consider
some optional dependencies like this:

```toml
[project.optional-dependencies]
project1 = ["numpy==1.26.3"]
project2 = ["numpy==1.26.4"]
```

These dependency specifications are not compatible with one another.
And if you ask uv to lock these, you'll get an unresolvable error.

With this PR, you can now add this to your `pyproject.toml` to get
around this:

```toml
[tool.uv]
conflicting-groups = [
    [
      { package = "project", extra = "project1" },
      { package = "project", extra = "project2" },
    ],
]
```

This will make the universal resolver create additional forks
internally that keep the dependencies from the `project1` and
`project2` extras separate. And we make all of this work by reporting
an error at **install** time if one tries to install with two or more
extras that have been declared as conflicting. (If we didn't do this,
it would be possible to try and install two different versions of the
same package into the same environment.)

This PR does *not* add support for conflicting **groups**, but it is
intended to add support in a follow-up PR.

Closes #6981

Fixes #8024

Ref #6729, Ref #6830

This should also hopefully unblock
https://github.com/dagster-io/dagster/pull/23814, but in my testing, I
did run into other problems (specifically, with `pywin`). But it does
resolve the problem with incompatible dependencies in two different
extras once you declare `test-airflow-1` and `test-airflow-2` as
conflicting for `dagster-airflow`.

NOTE: This PR doesn't make `conflicting-groups` public yet. And in a
follow-up PR, I plan to switch the name to `conflicts` instead of
`conflicting-groups`, since it will be able to accept conflicting extras
_and_ conflicting groups.
This commit is contained in:
Andrew Gallant 2024-11-13 09:52:28 -05:00 committed by GitHub
parent 926660aea0
commit 15ef807c80
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
41 changed files with 2393 additions and 171 deletions

1
Cargo.lock generated
View file

@ -5195,6 +5195,7 @@ dependencies = [
"mailparse",
"regex",
"rkyv",
"schemars",
"serde",
"serde-untagged",
"thiserror",

View file

@ -97,7 +97,7 @@ mod resolver {
use uv_pep440::Version;
use uv_pep508::{MarkerEnvironment, MarkerEnvironmentBuilder};
use uv_platform_tags::{Arch, Os, Platform, Tags};
use uv_pypi_types::ResolverMarkerEnvironment;
use uv_pypi_types::{ConflictingGroupList, ResolverMarkerEnvironment};
use uv_python::Interpreter;
use uv_resolver::{
FlatIndex, InMemoryIndex, Manifest, OptionsBuilder, PythonRequirement, RequiresPython,
@ -163,6 +163,7 @@ mod resolver {
let options = OptionsBuilder::new().exclude_newer(exclude_newer).build();
let sources = SourceStrategy::default();
let dependency_metadata = DependencyMetadata::default();
let conflicting_groups = ConflictingGroupList::empty();
let python_requirement = if universal {
PythonRequirement::from_requires_python(
@ -208,6 +209,7 @@ mod resolver {
options,
&python_requirement,
markers,
conflicting_groups,
Some(&TAGS),
&flat_index,
&index,

View file

@ -26,7 +26,7 @@ use uv_distribution_types::{
};
use uv_git::GitResolver;
use uv_installer::{Installer, Plan, Planner, Preparer, SitePackages};
use uv_pypi_types::Requirement;
use uv_pypi_types::{ConflictingGroupList, Requirement};
use uv_python::{Interpreter, PythonEnvironment};
use uv_resolver::{
ExcludeNewer, FlatIndex, Flexibility, InMemoryIndex, Manifest, OptionsBuilder,
@ -186,6 +186,9 @@ impl<'a> BuildContext for BuildDispatch<'a> {
.build(),
&python_requirement,
ResolverEnvironment::specific(marker_env),
// Conflicting groups only make sense when doing
// universal resolution.
ConflictingGroupList::empty(),
Some(tags),
self.flat_index,
self.index,

View file

@ -19,7 +19,7 @@ workspace = true
uv-distribution-filename = { workspace = true }
uv-fs = { workspace = true, features = ["serde"] }
uv-git = { workspace = true }
uv-normalize = { workspace = true }
uv-normalize = { workspace = true, features = ["schemars"] }
uv-pep440 = { workspace = true }
uv-pep508 = { workspace = true }
@ -29,6 +29,7 @@ jiff = { workspace = true, features = ["serde"] }
mailparse = { workspace = true }
regex = { workspace = true }
rkyv = { workspace = true }
schemars = { workspace = true }
serde = { workspace = true }
serde-untagged = { workspace = true }
thiserror = { workspace = true }

View file

@ -0,0 +1,301 @@
use uv_normalize::{ExtraName, PackageName};
/// A list of conflicting groups pre-defined by an end user.
///
/// This is useful to force the resolver to fork according to extras that have
/// unavoidable conflicts with each other. (The alternative is that resolution
/// will fail.)
#[derive(
Debug, Default, Clone, Eq, PartialEq, serde::Deserialize, serde::Serialize, schemars::JsonSchema,
)]
pub struct ConflictingGroupList(Vec<ConflictingGroups>);
impl ConflictingGroupList {
/// Returns no conflicting groups.
///
/// This results in no effect on resolution.
pub fn empty() -> ConflictingGroupList {
ConflictingGroupList::default()
}
/// Push a set of conflicting groups.
pub fn push(&mut self, groups: ConflictingGroups) {
self.0.push(groups);
}
/// Returns an iterator over all sets of conflicting groups.
pub fn iter(&self) -> impl Iterator<Item = &'_ ConflictingGroups> + '_ {
self.0.iter()
}
/// Returns true if this conflicting group list contains any conflicting
/// group set that contains the given package and extra name pair.
pub fn contains(&self, package: &PackageName, extra: &ExtraName) -> bool {
self.iter().any(|groups| groups.contains(package, extra))
}
/// Returns true if this set of conflicting groups is empty.
pub fn is_empty(&self) -> bool {
self.0.is_empty()
}
/// Appends the given list to this one. This drains all elements
/// from the list given, such that after this call, it is empty.
pub fn append(&mut self, other: &mut ConflictingGroupList) {
self.0.append(&mut other.0);
}
}
/// A single set of package-extra pairs that conflict with one another.
///
/// Within each set of conflicting groups, the resolver should isolate
/// the requirements corresponding to each extra from the requirements of
/// other extras in this set. That is, the resolver should put each set of
/// requirements in a different fork.
///
/// A `TryFrom<Vec<ConflictingGroup>>` impl may be used to build a set
/// from a sequence. Note though that at least 2 groups are required.
#[derive(Debug, Default, Clone, Eq, PartialEq, serde::Serialize, schemars::JsonSchema)]
pub struct ConflictingGroups(Vec<ConflictingGroup>);
impl ConflictingGroups {
/// Create a pair of groups that conflict with one another.
pub fn pair(group1: ConflictingGroup, group2: ConflictingGroup) -> ConflictingGroups {
ConflictingGroups(vec![group1, group2])
}
/// Add a new conflicting group to this set.
pub fn push(&mut self, group: ConflictingGroup) {
self.0.push(group);
}
/// Returns an iterator over all conflicting groups.
pub fn iter(&self) -> impl Iterator<Item = &'_ ConflictingGroup> + '_ {
self.0.iter()
}
/// Returns true if this conflicting group contains the given
/// package and extra name pair.
pub fn contains(&self, package: &PackageName, extra: &ExtraName) -> bool {
self.iter()
.any(|group| group.package() == package && group.extra() == extra)
}
}
impl<'de> serde::Deserialize<'de> for ConflictingGroups {
fn deserialize<D>(deserializer: D) -> Result<ConflictingGroups, D::Error>
where
D: serde::Deserializer<'de>,
{
let groups = Vec::<ConflictingGroup>::deserialize(deserializer)?;
Self::try_from(groups).map_err(serde::de::Error::custom)
}
}
impl TryFrom<Vec<ConflictingGroup>> for ConflictingGroups {
type Error = ConflictingGroupError;
fn try_from(groups: Vec<ConflictingGroup>) -> Result<ConflictingGroups, ConflictingGroupError> {
match groups.len() {
0 => return Err(ConflictingGroupError::ZeroGroups),
1 => return Err(ConflictingGroupError::OneGroup),
_ => {}
}
Ok(ConflictingGroups(groups))
}
}
/// A single item in a set conflicting groups.
///
/// Each item is a pair of a package and a corresponding extra name for that
/// package.
#[derive(
Debug,
Default,
Clone,
Eq,
Hash,
PartialEq,
PartialOrd,
Ord,
serde::Deserialize,
serde::Serialize,
schemars::JsonSchema,
)]
pub struct ConflictingGroup {
package: PackageName,
extra: ExtraName,
}
impl ConflictingGroup {
/// Returns the package name of this conflicting group.
pub fn package(&self) -> &PackageName {
&self.package
}
/// Returns the extra name of this conflicting group.
pub fn extra(&self) -> &ExtraName {
&self.extra
}
/// Returns this group as a new type with its fields borrowed.
pub fn as_ref(&self) -> ConflictingGroupRef<'_> {
ConflictingGroupRef {
package: self.package(),
extra: self.extra(),
}
}
}
impl From<(PackageName, ExtraName)> for ConflictingGroup {
fn from((package, extra): (PackageName, ExtraName)) -> ConflictingGroup {
ConflictingGroup { package, extra }
}
}
/// A single item in a set conflicting groups, by reference.
///
/// Each item is a pair of a package and a corresponding extra name for that
/// package.
#[derive(Debug, Clone, Copy, Eq, Hash, PartialEq, PartialOrd, Ord)]
pub struct ConflictingGroupRef<'a> {
package: &'a PackageName,
extra: &'a ExtraName,
}
impl<'a> ConflictingGroupRef<'a> {
/// Returns the package name of this conflicting group.
pub fn package(&self) -> &'a PackageName {
self.package
}
/// Returns the extra name of this conflicting group.
pub fn extra(&self) -> &'a ExtraName {
self.extra
}
/// Converts this borrowed conflicting group to its owned variant.
pub fn to_owned(&self) -> ConflictingGroup {
ConflictingGroup {
package: self.package().clone(),
extra: self.extra().clone(),
}
}
}
impl<'a> From<(&'a PackageName, &'a ExtraName)> for ConflictingGroupRef<'a> {
fn from((package, extra): (&'a PackageName, &'a ExtraName)) -> ConflictingGroupRef<'a> {
ConflictingGroupRef { package, extra }
}
}
/// An error that occurs when the given conflicting groups are invalid somehow.
#[derive(Debug, thiserror::Error)]
pub enum ConflictingGroupError {
/// An error for when there are zero conflicting groups.
#[error("Each set of conflicting groups must have at least two entries, but found none")]
ZeroGroups,
/// An error for when there is one conflicting group.
#[error("Each set of conflicting groups must have at least two entries, but found only one")]
OneGroup,
}
/// Like [`ConflictingGroupList`], but for deserialization in `pyproject.toml`.
///
/// The schema format is different from the in-memory format. Specifically, the
/// schema format does not allow specifying the package name (or will make it
/// optional in the future), where as the in-memory format needs the package
/// name.
///
/// N.B. `ConflictingGroupList` is still used for (de)serialization.
/// Specifically, in the lock file, where the package name is required.
#[derive(
Debug, Default, Clone, Eq, PartialEq, serde::Deserialize, serde::Serialize, schemars::JsonSchema,
)]
pub struct SchemaConflictingGroupList(Vec<SchemaConflictingGroups>);
impl SchemaConflictingGroupList {
/// Convert the public schema "conflicting" type to our internal fully
/// resolved type. Effectively, this pairs the corresponding package name
/// with each conflict.
///
/// If a conflict has an explicit package name (written by the end user),
/// then that takes precedence over the given package name, which is only
/// used when there is no explicit package name written.
pub fn to_conflicting_with_package_name(&self, package: &PackageName) -> ConflictingGroupList {
let mut conflicting = ConflictingGroupList::empty();
for tool_uv_set in &self.0 {
let mut set = vec![];
for item in &tool_uv_set.0 {
let package = item.package.clone().unwrap_or_else(|| package.clone());
set.push(ConflictingGroup::from((package, item.extra.clone())));
}
// OK because we guarantee that
// `SchemaConflictingGroupList` is valid and there aren't
// any new errors that can occur here.
let set = ConflictingGroups::try_from(set).unwrap();
conflicting.push(set);
}
conflicting
}
}
/// Like [`ConflictingGroups`], but for deserialization in `pyproject.toml`.
///
/// The schema format is different from the in-memory format. Specifically, the
/// schema format does not allow specifying the package name (or will make it
/// optional in the future), where as the in-memory format needs the package
/// name.
#[derive(Debug, Default, Clone, Eq, PartialEq, serde::Serialize, schemars::JsonSchema)]
pub struct SchemaConflictingGroups(Vec<SchemaConflictingGroup>);
/// Like [`ConflictingGroup`], but for deserialization in `pyproject.toml`.
///
/// The schema format is different from the in-memory format. Specifically, the
/// schema format does not allow specifying the package name (or will make it
/// optional in the future), where as the in-memory format needs the package
/// name.
#[derive(
Debug,
Default,
Clone,
Eq,
Hash,
PartialEq,
PartialOrd,
Ord,
serde::Deserialize,
serde::Serialize,
schemars::JsonSchema,
)]
#[serde(deny_unknown_fields)]
pub struct SchemaConflictingGroup {
#[serde(default)]
package: Option<PackageName>,
extra: ExtraName,
}
impl<'de> serde::Deserialize<'de> for SchemaConflictingGroups {
fn deserialize<D>(deserializer: D) -> Result<SchemaConflictingGroups, D::Error>
where
D: serde::Deserializer<'de>,
{
let items = Vec::<SchemaConflictingGroup>::deserialize(deserializer)?;
Self::try_from(items).map_err(serde::de::Error::custom)
}
}
impl TryFrom<Vec<SchemaConflictingGroup>> for SchemaConflictingGroups {
type Error = ConflictingGroupError;
fn try_from(
items: Vec<SchemaConflictingGroup>,
) -> Result<SchemaConflictingGroups, ConflictingGroupError> {
match items.len() {
0 => return Err(ConflictingGroupError::ZeroGroups),
1 => return Err(ConflictingGroupError::OneGroup),
_ => {}
}
Ok(SchemaConflictingGroups(items))
}
}

View file

@ -1,4 +1,5 @@
pub use base_url::*;
pub use conflicting_groups::*;
pub use direct_url::*;
pub use lenient_requirement::*;
pub use marker_environment::*;
@ -10,6 +11,7 @@ pub use simple_json::*;
pub use supported_environments::*;
mod base_url;
mod conflicting_groups;
mod direct_url;
mod lenient_requirement;
mod marker_environment;

View file

@ -4,7 +4,7 @@ use uv_pep508::MarkerEnvironment;
/// A wrapper type around [`MarkerEnvironment`] that ensures the Python version markers are
/// release-only, to match the resolver's semantics.
#[derive(Debug, Clone)]
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct ResolverMarkerEnvironment(MarkerEnvironment);
impl ResolverMarkerEnvironment {

View file

@ -137,10 +137,14 @@ impl CandidateSelector {
// first has the matching half and then the mismatching half.
let preferences_match = preferences.get(package_name).filter(|(marker, _version)| {
// `.unwrap_or(true)` because the universal marker is considered matching.
marker.map(|marker| env.included(marker)).unwrap_or(true)
marker
.map(|marker| env.included_by_marker(marker))
.unwrap_or(true)
});
let preferences_mismatch = preferences.get(package_name).filter(|(marker, _version)| {
marker.map(|marker| !env.included(marker)).unwrap_or(false)
marker
.map(|marker| !env.included_by_marker(marker))
.unwrap_or(false)
});
self.get_preferred_from_iter(
preferences_match.chain(preferences_mismatch),

View file

@ -12,7 +12,7 @@ use tracing::trace;
use uv_distribution_types::{
BuiltDist, IndexCapabilities, IndexLocations, IndexUrl, InstalledDist, SourceDist,
};
use uv_normalize::PackageName;
use uv_normalize::{ExtraName, PackageName};
use uv_pep440::{LocalVersionSlice, Version};
use uv_static::EnvVars;
@ -41,6 +41,13 @@ pub enum ResolveError {
#[error("Attempted to wait on an unregistered task: `{_0}`")]
UnregisteredTask(String),
#[error("Found conflicting extra `{extra}` unconditionally enabled in `{requirement}`")]
ConflictingExtra {
// Boxed because `Requirement` is large.
requirement: Box<uv_pypi_types::Requirement>,
extra: ExtraName,
},
#[error("Overrides contain conflicting URLs for package `{0}`:\n- {1}\n- {2}")]
ConflictingOverrideUrls(PackageName, String, String),

View file

@ -40,7 +40,8 @@ use uv_pep440::Version;
use uv_pep508::{split_scheme, MarkerEnvironment, MarkerTree, VerbatimUrl, VerbatimUrlError};
use uv_platform_tags::{TagCompatibility, TagPriority, Tags};
use uv_pypi_types::{
redact_credentials, HashDigest, ParsedArchiveUrl, ParsedGitUrl, Requirement, RequirementSource,
redact_credentials, ConflictingGroupList, HashDigest, ParsedArchiveUrl, ParsedGitUrl,
Requirement, RequirementSource,
};
use uv_types::{BuildContext, HashStrategy};
use uv_workspace::dependency_groups::DependencyGroupError;
@ -80,6 +81,8 @@ pub struct Lock {
/// If this lockfile was built from a forking resolution with non-identical forks, store the
/// forks in the lockfile so we can recreate them in subsequent resolutions.
fork_markers: Vec<MarkerTree>,
/// The conflicting groups/extras specified by the user.
conflicting_groups: ConflictingGroupList,
/// The list of supported environments specified by the user.
supported_environments: Vec<MarkerTree>,
/// The range of supported Python versions.
@ -236,6 +239,7 @@ impl Lock {
requires_python,
options,
ResolverManifest::default(),
ConflictingGroupList::empty(),
vec![],
graph.fork_markers.clone(),
)?;
@ -311,6 +315,7 @@ impl Lock {
requires_python: RequiresPython,
options: ResolverOptions,
manifest: ResolverManifest,
conflicting_groups: ConflictingGroupList,
supported_environments: Vec<MarkerTree>,
fork_markers: Vec<MarkerTree>,
) -> Result<Self, LockError> {
@ -460,6 +465,7 @@ impl Lock {
let lock = Self {
version,
fork_markers,
conflicting_groups,
supported_environments,
requires_python,
options,
@ -477,6 +483,13 @@ impl Lock {
self
}
/// Record the conflicting groups that were used to generate this lock.
#[must_use]
pub fn with_conflicting_groups(mut self, conflicting_groups: ConflictingGroupList) -> Self {
self.conflicting_groups = conflicting_groups;
self
}
/// Record the supported environments that were used to generate this lock.
#[must_use]
pub fn with_supported_environments(mut self, supported_environments: Vec<MarkerTree>) -> Self {
@ -536,6 +549,11 @@ impl Lock {
self.options.exclude_newer
}
/// Returns the conflicting groups that were used to generate this lock.
pub fn conflicting_groups(&self) -> &ConflictingGroupList {
&self.conflicting_groups
}
/// Returns the supported environments that were used to generate this lock.
pub fn supported_environments(&self) -> &[MarkerTree] {
&self.supported_environments
@ -614,6 +632,19 @@ impl Lock {
doc.insert("supported-markers", value(supported_environments));
}
if !self.conflicting_groups.is_empty() {
let mut list = Array::new();
for groups in self.conflicting_groups.iter() {
list.push(each_element_on_its_line_array(groups.iter().map(|group| {
let mut table = InlineTable::new();
table.insert("package", Value::from(group.package().to_string()));
table.insert("extra", Value::from(group.extra().to_string()));
table
})));
}
doc.insert("conflicting-groups", value(list));
}
// Write the settings that were used to generate the resolution.
// This enables us to invalidate the lockfile if the user changes
// their settings.
@ -1352,6 +1383,8 @@ struct LockWire {
fork_markers: Vec<SimplifiedMarkerTree>,
#[serde(rename = "supported-markers", default)]
supported_environments: Vec<SimplifiedMarkerTree>,
#[serde(rename = "conflicting-groups", default)]
conflicting_groups: Option<ConflictingGroupList>,
/// We discard the lockfile if these options match.
#[serde(default)]
options: ResolverOptions,
@ -1403,6 +1436,8 @@ impl TryFrom<LockWire> for Lock {
wire.requires_python,
wire.options,
wire.manifest,
wire.conflicting_groups
.unwrap_or_else(ConflictingGroupList::empty),
supported_environments,
fork_markers,
)?;

View file

@ -6,6 +6,9 @@ Ok(
Lock {
version: 1,
fork_markers: [],
conflicting_groups: ConflictingGroupList(
[],
),
supported_environments: [],
requires_python: RequiresPython {
specifiers: VersionSpecifiers(

View file

@ -6,6 +6,9 @@ Ok(
Lock {
version: 1,
fork_markers: [],
conflicting_groups: ConflictingGroupList(
[],
),
supported_environments: [],
requires_python: RequiresPython {
specifiers: VersionSpecifiers(

View file

@ -6,6 +6,9 @@ Ok(
Lock {
version: 1,
fork_markers: [],
conflicting_groups: ConflictingGroupList(
[],
),
supported_environments: [],
requires_python: RequiresPython {
specifiers: VersionSpecifiers(

View file

@ -6,6 +6,9 @@ Ok(
Lock {
version: 1,
fork_markers: [],
conflicting_groups: ConflictingGroupList(
[],
),
supported_environments: [],
requires_python: RequiresPython {
specifiers: VersionSpecifiers(

View file

@ -6,6 +6,9 @@ Ok(
Lock {
version: 1,
fork_markers: [],
conflicting_groups: ConflictingGroupList(
[],
),
supported_environments: [],
requires_python: RequiresPython {
specifiers: VersionSpecifiers(

View file

@ -6,6 +6,9 @@ Ok(
Lock {
version: 1,
fork_markers: [],
conflicting_groups: ConflictingGroupList(
[],
),
supported_environments: [],
requires_python: RequiresPython {
specifiers: VersionSpecifiers(

View file

@ -6,6 +6,9 @@ Ok(
Lock {
version: 1,
fork_markers: [],
conflicting_groups: ConflictingGroupList(
[],
),
supported_environments: [],
requires_python: RequiresPython {
specifiers: VersionSpecifiers(

View file

@ -6,6 +6,9 @@ Ok(
Lock {
version: 1,
fork_markers: [],
conflicting_groups: ConflictingGroupList(
[],
),
supported_environments: [],
requires_python: RequiresPython {
specifiers: VersionSpecifiers(

View file

@ -6,6 +6,9 @@ Ok(
Lock {
version: 1,
fork_markers: [],
conflicting_groups: ConflictingGroupList(
[],
),
supported_environments: [],
requires_python: RequiresPython {
specifiers: VersionSpecifiers(

View file

@ -6,6 +6,9 @@ Ok(
Lock {
version: 1,
fork_markers: [],
conflicting_groups: ConflictingGroupList(
[],
),
supported_environments: [],
requires_python: RequiresPython {
specifiers: VersionSpecifiers(

View file

@ -3,6 +3,7 @@ use std::sync::Arc;
use uv_normalize::{ExtraName, GroupName, PackageName};
use uv_pep508::{MarkerTree, MarkerTreeContents};
use uv_pypi_types::ConflictingGroupRef;
use crate::python_requirement::PythonRequirement;
@ -166,6 +167,35 @@ impl PubGrubPackage {
}
}
/// Returns the extra name associated with this PubGrub package, if it has
/// one.
pub(crate) fn extra(&self) -> Option<&ExtraName> {
match &**self {
// A root can never be a dependency of another package, and a `Python` pubgrub
// package is never returned by `get_dependencies`. So these cases never occur.
PubGrubPackageInner::Root(_)
| PubGrubPackageInner::Python(_)
| PubGrubPackageInner::Package { extra: None, .. }
| PubGrubPackageInner::Dev { .. }
| PubGrubPackageInner::Marker { .. } => None,
PubGrubPackageInner::Package {
extra: Some(ref extra),
..
}
| PubGrubPackageInner::Extra { ref extra, .. } => Some(extra),
}
}
/// Extracts a possible conflicting group from this package.
///
/// If this package can't possibly be classified as a conflicting group,
/// then this returns `None`.
pub(crate) fn conflicting_group(&self) -> Option<ConflictingGroupRef<'_>> {
let package = self.name_no_root()?;
let extra = self.extra()?;
Some(ConflictingGroupRef::from((package, extra)))
}
/// Returns `true` if this PubGrub package is a proxy package.
pub(crate) fn is_proxy(&self) -> bool {
matches!(
@ -205,6 +235,7 @@ impl PubGrubPackage {
}
}
/// This isn't actually used anywhere, but can be useful for printf-debugging.
#[allow(dead_code)]
pub(crate) fn kind(&self) -> &'static str {
match &**self {

View file

@ -17,7 +17,9 @@ use uv_git::GitResolver;
use uv_normalize::{ExtraName, GroupName, PackageName};
use uv_pep440::{Version, VersionSpecifier};
use uv_pep508::{MarkerEnvironment, MarkerTree, MarkerTreeKind};
use uv_pypi_types::{HashDigest, ParsedUrlError, Requirement, VerbatimParsedUrl, Yanked};
use uv_pypi_types::{
ConflictingGroupList, HashDigest, ParsedUrlError, Requirement, VerbatimParsedUrl, Yanked,
};
use crate::graph_ops::marker_reachability;
use crate::pins::FilePins;
@ -101,6 +103,7 @@ impl ResolutionGraph {
index: &InMemoryIndex,
git: &GitResolver,
python: &PythonRequirement,
conflicting_groups: &ConflictingGroupList,
resolution_strategy: &ResolutionStrategy,
options: Options,
) -> Result<Self, ResolveError> {
@ -201,8 +204,8 @@ impl ResolutionGraph {
resolution
.env
.try_markers()
.expect("A non-forking resolution exists in forking mode")
.clone()
.cloned()
.unwrap_or(MarkerTree::TRUE)
})
// Any unsatisfiable forks were skipped.
.filter(|fork| !fork.is_false())
@ -237,27 +240,40 @@ impl ResolutionGraph {
fork_markers,
};
#[allow(unused_mut, reason = "Used in debug_assertions below")]
let mut conflicting = graph.find_conflicting_distributions();
if !conflicting.is_empty() {
tracing::warn!(
"found {} conflicting distributions in resolution, \
// We only do conflicting distribution detection when no
// conflicting groups have been specified. The reason here
// is that when there are conflicting groups, then from the
// perspective of marker expressions only, it may look like
// one can install different versions of the same package for
// the same marker environment. However, the thing preventing
// this is that the only way this should be possible is if
// one tries to install two or more conflicting extras at
// the same time. At which point, uv will report an error,
// thereby sidestepping the possibility of installing different
// versions of the same package into the same virtualenv. ---AG
if conflicting_groups.is_empty() {
#[allow(unused_mut, reason = "Used in debug_assertions below")]
let mut conflicting = graph.find_conflicting_distributions();
if !conflicting.is_empty() {
tracing::warn!(
"found {} conflicting distributions in resolution, \
please report this as a bug at \
https://github.com/astral-sh/uv/issues/new",
conflicting.len()
);
}
// When testing, we materialize any conflicting distributions as an
// error to ensure any relevant tests fail. Otherwise, we just leave
// it at the warning message above. The reason for not returning an
// error "in production" is that an incorrect resolution may only be
// incorrect in certain marker environments, but fine in most others.
// Returning an error in that case would make `uv` unusable whenever
// the bug occurs, but letting it through means `uv` *could* still be
// usable.
#[cfg(debug_assertions)]
if let Some(err) = conflicting.pop() {
return Err(ResolveError::ConflictingDistribution(err));
conflicting.len()
);
}
// When testing, we materialize any conflicting distributions as an
// error to ensure any relevant tests fail. Otherwise, we just leave
// it at the warning message above. The reason for not returning an
// error "in production" is that an incorrect resolution may only be
// incorrect in certain marker environments, but fine in most others.
// Returning an error in that case would make `uv` unusable whenever
// the bug occurs, but letting it through means `uv` *could* still be
// usable.
#[cfg(debug_assertions)]
if let Some(err) = conflicting.pop() {
return Err(ResolveError::ConflictingDistribution(err));
}
}
Ok(graph)
}

View file

@ -1,12 +1,16 @@
use std::sync::Arc;
use rustc_hash::{FxHashMap, FxHashSet};
use uv_normalize::{ExtraName, PackageName};
use uv_pep508::{MarkerEnvironment, MarkerTree};
use uv_pypi_types::ResolverMarkerEnvironment;
use uv_pypi_types::{
ConflictingGroup, ConflictingGroupList, ConflictingGroupRef, ResolverMarkerEnvironment,
};
use crate::pubgrub::{PubGrubDependency, PubGrubPackage};
use crate::requires_python::RequiresPythonRange;
use crate::resolver::ForkState;
use crate::PythonRequirement;
use crate::ResolveError;
/// Represents one or more marker environments for a resolution.
///
@ -57,7 +61,7 @@ use crate::ResolveError;
/// explicitly by the resolver. (Perhaps a future refactor can incorporate
/// `requires-python` into this type as well, but it's not totally clear at
/// time of writing if that's a good idea or not.)
#[derive(Clone, Debug)]
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct ResolverEnvironment {
kind: Kind,
}
@ -69,7 +73,7 @@ pub struct ResolverEnvironment {
/// analysis on this type, and instead try to encapsulate the case analysis via
/// higher level routines on `ResolverEnvironment` itself. (This goal may prove
/// intractable, so don't treat it like gospel.)
#[derive(Clone, Debug)]
#[derive(Clone, Debug, Eq, PartialEq)]
enum Kind {
/// We're solving for one specific marker environment only.
///
@ -94,6 +98,8 @@ enum Kind {
initial_forks: Arc<[MarkerTree]>,
/// The markers associated with this resolver fork.
markers: MarkerTree,
/// Conflicting group exclusions.
exclude: Arc<FxHashMap<PackageName, FxHashSet<ExtraName>>>,
},
}
@ -131,6 +137,7 @@ impl ResolverEnvironment {
let kind = Kind::Universal {
initial_forks: initial_forks.into(),
markers: MarkerTree::TRUE,
exclude: Arc::new(FxHashMap::default()),
};
ResolverEnvironment { kind }
}
@ -149,32 +156,44 @@ impl ResolverEnvironment {
/// Returns `false` only when this environment is a fork and it is disjoint
/// with the given marker.
pub(crate) fn included(&self, marker: &MarkerTree) -> bool {
pub(crate) fn included_by_marker(&self, marker: &MarkerTree) -> bool {
match self.kind {
Kind::Specific { .. } => true,
Kind::Universal { ref markers, .. } => !markers.is_disjoint(marker),
}
}
/// Returns true if the dependency represented by this forker may be
/// included in the given resolver environment.
pub(crate) fn included_by_group(&self, group: ConflictingGroupRef<'_>) -> bool {
match self.kind {
Kind::Specific { .. } => true,
Kind::Universal { ref exclude, .. } => !exclude
.get(group.package())
.map(|set| set.contains(group.extra()))
.unwrap_or(false),
}
}
/// Returns the bounding Python versions that can satisfy this
/// resolver environment's marker, if it's constrained.
pub(crate) fn requires_python(&self) -> Option<RequiresPythonRange> {
let marker = self.try_markers().unwrap_or(&MarkerTree::TRUE);
crate::marker::requires_python(marker)
}
/// Narrow this environment given the forking markers.
///
/// This should be used when generating forking states in the resolver. In
/// effect, this "forks" this environment (which itself may be a fork) by
/// intersecting it with the markers given.
/// This effectively intersects any markers in this environment with the
/// markers given, and returns the new resulting environment.
///
/// This may return `None` when the marker intersection results in a marker
/// that can never be true for the given Python requirement. In this case,
/// the corresponding fork should be dropped.
/// This is also useful in tests to generate a "forked" marker environment.
///
/// # Panics
///
/// This panics if the resolver environment corresponds to one and only one
/// specific marker environment. i.e., "pip"-style resolution.
pub(crate) fn narrow_environment(
&self,
python_requirement: &PythonRequirement,
rhs: &MarkerTree,
) -> Option<ResolverEnvironment> {
fn narrow_environment(&self, rhs: MarkerTree) -> ResolverEnvironment {
match self.kind {
Kind::Specific { .. } => {
unreachable!("environment narrowing only happens in universal resolution")
@ -182,25 +201,58 @@ impl ResolverEnvironment {
Kind::Universal {
ref initial_forks,
markers: ref lhs,
ref exclude,
} => {
let mut lhs = lhs.clone();
lhs.and(rhs.clone());
let python_marker = python_requirement.to_marker_tree();
// If the new combined marker is disjoint with the given
// Python requirement, then this fork shouldn't exist.
if lhs.is_disjoint(&python_marker) {
tracing::debug!(
"Skipping split {lhs:?} \
because of Python requirement {python_marker:?}",
);
return None;
let mut markers = lhs.clone();
markers.and(rhs);
let kind = Kind::Universal {
initial_forks: Arc::clone(initial_forks),
markers,
exclude: Arc::clone(exclude),
};
ResolverEnvironment { kind }
}
}
}
/// Returns a new resolver environment with the given groups excluded from
/// it.
///
/// When a group is excluded from a resolver environment,
/// `ResolverEnvironment::included_by_group` will return false. The idea
/// is that a dependency with a corresponding group should be excluded by
/// forks in the resolver with this environment.
///
/// # Panics
///
/// This panics if the resolver environment corresponds to one and only one
/// specific marker environment. i.e., "pip"-style resolution.
pub(crate) fn exclude_by_group(
&self,
groups: impl IntoIterator<Item = ConflictingGroup>,
) -> ResolverEnvironment {
match self.kind {
Kind::Specific { .. } => {
unreachable!("environment narrowing only happens in universal resolution")
}
Kind::Universal {
ref initial_forks,
ref markers,
ref exclude,
} => {
let mut exclude: FxHashMap<_, _> = (**exclude).clone();
for group in groups {
exclude
.entry(group.package().clone())
.or_default()
.insert(group.extra().clone());
}
let kind = Kind::Universal {
initial_forks: initial_forks.clone(),
markers: lhs,
initial_forks: Arc::clone(initial_forks),
markers: markers.clone(),
exclude: Arc::new(exclude),
};
Some(ResolverEnvironment { kind })
ResolverEnvironment { kind }
}
}
}
@ -215,7 +267,9 @@ impl ResolverEnvironment {
/// configuration.
pub(crate) fn initial_forked_states(&self, init: ForkState) -> Vec<ForkState> {
let Kind::Universal {
ref initial_forks, ..
ref initial_forks,
markers: ref _markers,
exclude: ref _exclude,
} = self.kind
else {
return vec![init];
@ -226,7 +280,10 @@ impl ResolverEnvironment {
initial_forks
.iter()
.rev()
.filter_map(|initial_fork| init.clone().with_env(&initial_fork))
.map(|initial_fork| {
init.clone()
.with_env(self.narrow_environment(initial_fork.clone()))
})
.collect()
}
@ -244,7 +301,7 @@ impl ResolverEnvironment {
&self,
python_requirement: &PythonRequirement,
) -> Option<PythonRequirement> {
Some(python_requirement.narrow(&self.requires_python_range()?)?)
python_requirement.narrow(&self.requires_python_range()?)
}
/// Returns a message formatted for end users representing a fork in the
@ -314,6 +371,106 @@ impl std::fmt::Display for ResolverEnvironment {
}
}
/// The different forking possibilities.
///
/// Upon seeing a dependency, when determining whether to fork, three
/// different cases are possible:
///
/// 1. Forking cannot be ruled out.
/// 2. The dependency is excluded by the "parent" fork.
/// 3. The dependency is unconditional and thus cannot provoke new forks.
///
/// This enum encapsulates those possibilities. In the first case, a helper is
/// returned to help management the nuts and bolts of forking.
#[derive(Debug)]
pub(crate) enum ForkingPossibility<'d> {
Possible(Forker<'d>),
DependencyAlwaysExcluded,
NoForkingPossible,
}
impl<'d> ForkingPossibility<'d> {
pub(crate) fn new(
env: &ResolverEnvironment,
dep: &'d PubGrubDependency,
) -> ForkingPossibility<'d> {
let marker = dep.package.marker().unwrap_or(&MarkerTree::TRUE);
if !env.included_by_marker(marker) {
ForkingPossibility::DependencyAlwaysExcluded
} else if marker.is_true() {
ForkingPossibility::NoForkingPossible
} else {
let forker = Forker {
package: &dep.package,
marker: marker.clone(),
};
ForkingPossibility::Possible(forker)
}
}
}
/// An encapsulation of forking based on a single dependency.
#[derive(Debug)]
pub(crate) struct Forker<'d> {
package: &'d PubGrubPackage,
marker: MarkerTree,
}
impl<'d> Forker<'d> {
/// Attempt a fork based on the given resolver environment.
///
/// If a fork is possible, then a new forker and at least one new
/// resolver environment is returned. In some cases, it is possible for
/// more resolver environments to be returned. (For example, when the
/// negation of this forker's markers has overlap with the given resolver
/// environment.)
pub(crate) fn fork(
&self,
env: &ResolverEnvironment,
_conflicting_groups: &ConflictingGroupList,
) -> Option<(Forker<'d>, Vec<ResolverEnvironment>)> {
if !env.included_by_marker(&self.marker) {
return None;
}
let Kind::Universal {
markers: ref env_marker,
..
} = env.kind
else {
panic!("resolver must be in universal mode for forking")
};
let mut envs = vec![];
{
let not_marker = self.marker.negate();
if !env_marker.is_disjoint(&not_marker) {
envs.push(env.narrow_environment(not_marker));
}
}
// Note also that we push this one last for historical reasons.
// Changing the order of forks can change the output in some
// ways. While it's probably fine, we try to avoid changing the
// output.
envs.push(env.narrow_environment(self.marker.clone()));
let mut remaining_marker = self.marker.clone();
remaining_marker.and(env_marker.negate());
let remaining_forker = Forker {
package: self.package,
marker: remaining_marker,
};
Some((remaining_forker, envs))
}
/// Returns true if the dependency represented by this forker may be
/// included in the given resolver environment.
pub(crate) fn included(&self, env: &ResolverEnvironment) -> bool {
let marker = self.package.marker().unwrap_or(&MarkerTree::TRUE);
env.included_by_marker(marker)
}
}
#[cfg(test)]
mod tests {
use std::ops::Bound;
@ -352,8 +509,7 @@ mod tests {
fn requires_python_range_lower(lower_version_bound: &str) -> RequiresPythonRange {
let lower = LowerBound::new(Bound::Included(version(lower_version_bound)));
let range = RequiresPythonRange::new(lower, UpperBound::default());
range
RequiresPythonRange::new(lower, UpperBound::default())
}
fn marker(marker: &str) -> MarkerTree {
@ -413,8 +569,7 @@ mod tests {
fn narrow_python_requirement_forking_no_op() {
let pyreq = python_requirement("3.10");
let resolver_env = ResolverEnvironment::universal(vec![])
.narrow_environment(&pyreq, &marker("python_version >= '3.10'"))
.unwrap();
.narrow_environment(marker("python_version >= '3.10'"));
assert_eq!(
resolver_env.narrow_python_requirement(&pyreq),
Some(python_requirement("3.10")),
@ -428,8 +583,7 @@ mod tests {
fn narrow_python_requirement_forking_stricter() {
let pyreq = python_requirement("3.10");
let resolver_env = ResolverEnvironment::universal(vec![])
.narrow_environment(&pyreq, &marker("python_version >= '3.11'"))
.unwrap();
.narrow_environment(marker("python_version >= '3.11'"));
let expected = {
let range = requires_python_range_lower("3.11");
let requires_python = requires_python_lower("3.10").narrow(&range).unwrap();
@ -448,8 +602,7 @@ mod tests {
fn narrow_python_requirement_forking_relaxed() {
let pyreq = python_requirement("3.11");
let resolver_env = ResolverEnvironment::universal(vec![])
.narrow_environment(&pyreq, &marker("python_version >= '3.10'"))
.unwrap();
.narrow_environment(marker("python_version >= '3.10'"));
assert_eq!(
resolver_env.narrow_python_requirement(&pyreq),
Some(python_requirement("3.11")),

View file

@ -60,7 +60,7 @@ impl<T> ForkMap<T> {
};
values
.iter()
.filter(|entry| env.included(&entry.marker))
.filter(|entry| env.included_by_marker(&entry.marker))
.map(|entry| &entry.value)
.collect()
}

View file

@ -1,7 +1,5 @@
//! Given a set of requirements, find a set of compatible packages.
#![allow(warnings)]
use std::borrow::Cow;
use std::cmp::Ordering;
use std::collections::{BTreeMap, BTreeSet, VecDeque};
@ -15,13 +13,14 @@ use dashmap::DashMap;
use either::Either;
use futures::{FutureExt, StreamExt};
use itertools::Itertools;
use pubgrub::{Incompatibility, Range, Ranges, State};
use pubgrub::{Incompatibility, Range, State};
use rustc_hash::{FxHashMap, FxHashSet};
use tokio::sync::mpsc::{self, Receiver, Sender};
use tokio::sync::oneshot;
use tokio_stream::wrappers::ReceiverStream;
use tracing::{debug, info, instrument, trace, warn, Level};
use environment::ForkingPossibility;
pub use environment::ResolverEnvironment;
pub(crate) use fork_map::{ForkMap, ForkSet};
pub(crate) use urls::Urls;
@ -38,7 +37,10 @@ use uv_normalize::{ExtraName, GroupName, PackageName};
use uv_pep440::{release_specifiers_to_ranges, Version, MIN_VERSION};
use uv_pep508::MarkerTree;
use uv_platform_tags::Tags;
use uv_pypi_types::{Requirement, ResolutionMetadata, VerbatimParsedUrl};
use uv_pypi_types::{
ConflictingGroup, ConflictingGroupList, ConflictingGroupRef, Requirement, ResolutionMetadata,
VerbatimParsedUrl,
};
use uv_types::{BuildContext, HashStrategy, InstalledPackagesProvider};
use uv_warnings::warn_user_once;
@ -108,6 +110,7 @@ struct ResolverState<InstalledPackages: InstalledPackagesProvider> {
hasher: HashStrategy,
env: ResolverEnvironment,
python_requirement: PythonRequirement,
conflicting_groups: ConflictingGroupList,
workspace_members: BTreeSet<PackageName>,
selector: CandidateSelector,
index: InMemoryIndex,
@ -148,6 +151,7 @@ impl<'a, Context: BuildContext, InstalledPackages: InstalledPackagesProvider>
options: Options,
python_requirement: &'a PythonRequirement,
env: ResolverEnvironment,
conflicting_groups: ConflictingGroupList,
tags: Option<&'a Tags>,
flat_index: &'a FlatIndex,
index: &'a InMemoryIndex,
@ -174,6 +178,7 @@ impl<'a, Context: BuildContext, InstalledPackages: InstalledPackagesProvider>
hasher,
env,
python_requirement,
conflicting_groups,
index,
build_context.git(),
build_context.capabilities(),
@ -194,6 +199,7 @@ impl<Provider: ResolverProvider, InstalledPackages: InstalledPackagesProvider>
hasher: &HashStrategy,
env: ResolverEnvironment,
python_requirement: &PythonRequirement,
conflicting_groups: ConflictingGroupList,
index: &InMemoryIndex,
git: &GitResolver,
capabilities: &IndexCapabilities,
@ -221,6 +227,7 @@ impl<Provider: ResolverProvider, InstalledPackages: InstalledPackagesProvider>
locations: locations.clone(),
env,
python_requirement: python_requirement.clone(),
conflicting_groups,
installed_packages,
unavailable_packages: DashMap::default(),
incomplete_packages: DashMap::default(),
@ -601,6 +608,7 @@ impl<InstalledPackages: InstalledPackagesProvider> ResolverState<InstalledPackag
&self.index,
&self.git,
&self.python_requirement,
&self.conflicting_groups,
self.selector.resolution_strategy(),
self.options,
)
@ -683,15 +691,15 @@ impl<InstalledPackages: InstalledPackagesProvider> ResolverState<InstalledPackag
forks
.into_iter()
.enumerate()
.filter_map(move |(i, fork)| {
.map(move |(i, fork)| {
let is_last = i == forks_len - 1;
let forked_state = cur_state.take().unwrap();
if !is_last {
cur_state = Some(forked_state.clone());
}
let markers = fork.markers.clone();
Some((fork, forked_state.with_env(&markers)?))
let env = fork.env.clone();
(fork, forked_state.with_env(env))
})
.map(move |(fork, mut forked_state)| {
forked_state.add_package_version_dependencies(
@ -721,17 +729,6 @@ impl<InstalledPackages: InstalledPackagesProvider> ResolverState<InstalledPackag
}
Ok(forked_state)
})
// Drop any forked states whose markers are known to never
// match any marker environments.
.filter(|result| {
if let Ok(ref forked_state) = result {
let markers = forked_state.env.try_markers().expect("is a fork");
if markers.is_false() {
return false;
}
}
true
})
}
/// Visit a [`PubGrubPackage`] prior to selection. This should be called on a [`PubGrubPackage`]
@ -1185,7 +1182,7 @@ impl<InstalledPackages: InstalledPackagesProvider> ResolverState<InstalledPackag
Dependencies::Unavailable(err) => ForkedDependencies::Unavailable(err),
})
} else {
Ok(result?.fork(python_requirement))
Ok(result?.fork(env, python_requirement, &self.conflicting_groups))
}
}
@ -1336,6 +1333,18 @@ impl<InstalledPackages: InstalledPackagesProvider> ResolverState<InstalledPackag
}
};
if let Some(err) =
find_conflicting_extra(&self.conflicting_groups, &metadata.requires_dist)
{
return Err(err);
}
for dependencies in metadata.dependency_groups.values() {
if let Some(err) =
find_conflicting_extra(&self.conflicting_groups, dependencies)
{
return Err(err);
}
}
let requirements = self.flatten_requirements(
&metadata.requires_dist,
&metadata.dependency_groups,
@ -1548,7 +1557,7 @@ impl<InstalledPackages: InstalledPackagesProvider> ResolverState<InstalledPackag
// If we're in a fork in universal mode, ignore any dependency that isn't part of
// this fork (but will be part of another fork).
if !env.included(&requirement.marker) {
if !env.included_by_marker(&requirement.marker) {
trace!("skipping {requirement} because of {env}");
return None;
}
@ -1566,6 +1575,11 @@ impl<InstalledPackages: InstalledPackagesProvider> ResolverState<InstalledPackag
) {
return None;
}
if !env.included_by_group(
ConflictingGroupRef::from((&requirement.name, source_extra)),
) {
return None;
}
}
None => {
if !requirement.evaluate_markers(env.marker_environment(), &[]) {
@ -1640,7 +1654,7 @@ impl<InstalledPackages: InstalledPackagesProvider> ResolverState<InstalledPackag
// If we're in a fork in universal mode, ignore any dependency that isn't part of
// this fork (but will be part of another fork).
if !env.included(&constraint.marker) {
if !env.included_by_marker(&constraint.marker) {
trace!("skipping {constraint} because of {env}");
return None;
}
@ -1654,6 +1668,11 @@ impl<InstalledPackages: InstalledPackagesProvider> ResolverState<InstalledPackag
) {
return None;
}
if !env.included_by_group(
ConflictingGroupRef::from((&requirement.name, source_extra)),
) {
return None;
}
}
None => {
if !constraint.evaluate_markers(env.marker_environment(), &[]) {
@ -2071,7 +2090,7 @@ impl<InstalledPackages: InstalledPackagesProvider> ResolverState<InstalledPackag
/// State that is used during unit propagation in the resolver, one instance per fork.
#[derive(Clone)]
struct ForkState {
pub(crate) struct ForkState {
/// The internal state used by the resolver.
///
/// Note that not all parts of this state are strictly internal. For
@ -2283,16 +2302,14 @@ impl ForkState {
///
/// If the fork should be dropped (e.g., because its markers can never be true for its
/// Python requirement), then this returns `None`.
fn with_env(mut self, markers: &MarkerTree) -> Option<Self> {
self.env = self
.env
.narrow_environment(&self.python_requirement, markers)?;
fn with_env(mut self, env: ResolverEnvironment) -> Self {
self.env = env;
// If the fork contains a narrowed Python requirement, apply it.
if let Some(req) = self.env.narrow_python_requirement(&self.python_requirement) {
debug!("Narrowed `requires-python` bound to: {}", req.target());
self.python_requirement = req;
}
Some(self)
self
}
fn into_resolution(self) -> Resolution {
@ -2664,7 +2681,12 @@ impl Dependencies {
/// A fork *only* occurs when there are multiple dependencies with the same
/// name *and* those dependency specifications have corresponding marker
/// expressions that are completely disjoint with one another.
fn fork(self, python_requirement: &PythonRequirement) -> ForkedDependencies {
fn fork(
self,
env: &ResolverEnvironment,
python_requirement: &PythonRequirement,
conflicting_groups: &ConflictingGroupList,
) -> ForkedDependencies {
let deps = match self {
Dependencies::Available(deps) => deps,
Dependencies::Unforkable(deps) => return ForkedDependencies::Unforked(deps),
@ -2682,7 +2704,7 @@ impl Dependencies {
let Forks {
mut forks,
diverging_packages,
} = Forks::new(name_to_deps, python_requirement);
} = Forks::new(name_to_deps, env, python_requirement, conflicting_groups);
if forks.is_empty() {
ForkedDependencies::Unforked(vec![])
} else if forks.len() == 1 {
@ -2742,14 +2764,13 @@ struct Forks {
impl Forks {
fn new(
name_to_deps: BTreeMap<PackageName, Vec<PubGrubDependency>>,
env: &ResolverEnvironment,
python_requirement: &PythonRequirement,
conflicting_groups: &ConflictingGroupList,
) -> Forks {
let python_marker = python_requirement.to_marker_tree();
let mut forks = vec![Fork {
dependencies: vec![],
markers: MarkerTree::TRUE,
}];
let mut forks = vec![Fork::new(env.clone())];
let mut diverging_packages = BTreeSet::new();
for (name, mut deps) in name_to_deps {
assert!(!deps.is_empty(), "every name has at least one dependency");
@ -2780,64 +2801,118 @@ impl Forks {
let dep = deps.pop().unwrap();
let markers = dep.package.marker().cloned().unwrap_or(MarkerTree::TRUE);
for fork in &mut forks {
if !fork.markers.is_disjoint(&markers) {
fork.dependencies.push(dep.clone());
if fork.env.included_by_marker(&markers) {
fork.add_dependency(dep.clone());
}
}
continue;
}
}
for dep in deps {
let mut markers = dep.package.marker().cloned().unwrap_or(MarkerTree::TRUE);
if markers.is_false() {
// If the markers can never be satisfied, then we
// can drop this dependency unceremoniously.
continue;
}
if markers.is_true() {
// Or, if the markers are always true, then we just
// add the dependency to every fork unconditionally.
for fork in &mut forks {
if !fork.markers.is_disjoint(&markers) {
fork.dependencies.push(dep.clone());
}
let mut forker = match ForkingPossibility::new(env, &dep) {
ForkingPossibility::Possible(forker) => forker,
ForkingPossibility::DependencyAlwaysExcluded => {
// If the markers can never be satisfied by the parent
// fork, then we can drop this dependency unceremoniously.
continue;
}
continue;
}
ForkingPossibility::NoForkingPossible => {
// Or, if the markers are always true, then we just
// add the dependency to every fork unconditionally.
for fork in &mut forks {
fork.add_dependency(dep.clone());
}
continue;
}
};
// Otherwise, we *should* need to add a new fork...
diverging_packages.insert(name.clone());
let mut new = vec![];
for mut fork in std::mem::take(&mut forks) {
if fork.markers.is_disjoint(&markers) {
for fork in std::mem::take(&mut forks) {
let Some((remaining_forker, envs)) = forker.fork(&fork.env, conflicting_groups)
else {
new.push(fork);
continue;
}
};
forker = remaining_forker;
let not_markers = markers.negate();
let mut new_markers = markers.clone();
new_markers.and(fork.markers.negate());
if !fork.markers.is_disjoint(&not_markers) {
for fork_env in envs {
let mut new_fork = fork.clone();
new_fork.intersect(not_markers);
new_fork.set_env(fork_env);
// We only add the dependency to this fork if it
// satisfies the fork's markers. Some forks are
// specifically created to exclude this dependency,
// so this isn't always true!
if forker.included(&new_fork.env) {
new_fork.add_dependency(dep.clone());
}
// Filter out any forks we created that are disjoint with our
// Python requirement.
if !new_fork.markers.is_disjoint(&python_marker) {
if new_fork.env.included_by_marker(&python_marker) {
new.push(new_fork);
}
}
fork.dependencies.push(dep.clone());
fork.intersect(markers);
// Filter out any forks we created that are disjoint with our
// Python requirement.
if !fork.markers.is_disjoint(&python_marker) {
new.push(fork);
}
markers = new_markers;
}
forks = new;
}
}
// When there is a conflicting group configuration, we need
// to potentially add more forks. Each fork added contains an
// exclusion list of conflicting groups where dependencies with
// the corresponding package and extra name are forcefully
// excluded from that group.
//
// We specifically iterate on conflicting groups and
// potentially re-generate all forks for each one. We do it
// this way in case there are multiple sets of conflicting
// groups that impact the forks here.
//
// For example, if we have conflicting groups {x1, x2} and {x3,
// x4}, we need to make sure the forks generated from one set
// also account for the other set.
for groups in conflicting_groups.iter() {
let mut new = vec![];
for fork in std::mem::take(&mut forks) {
let mut has_conflicting_dependency = false;
for group in groups.iter() {
if fork.contains_conflicting_group(group.as_ref()) {
has_conflicting_dependency = true;
break;
}
}
if !has_conflicting_dependency {
new.push(fork);
continue;
}
// Create a fork that excludes ALL extras.
let mut fork_none = fork.clone();
for group in groups.iter() {
fork_none = fork_none.exclude([group.clone()]);
}
new.push(fork_none);
// Now create a fork for each conflicting group, where
// that fork excludes every *other* conflicting group.
//
// So if we have conflicting extras foo, bar and baz,
// then this creates three forks: one that excludes
// {foo, bar}, one that excludes {foo, baz} and one
// that excludes {bar, baz}.
for (i, _) in groups.iter().enumerate() {
let fork_allows_group = fork.clone().exclude(
groups
.iter()
.enumerate()
.filter(|&(j, _)| i != j)
.map(|(_, group)| group.clone()),
);
new.push(fork_allows_group);
}
}
forks = new;
}
Forks {
forks,
diverging_packages,
@ -2854,7 +2929,7 @@ impl Forks {
/// have the same name and because the marker expressions are disjoint,
/// a fork occurs. One fork will contain `a<2` but not `a>=2`, while
/// the other fork will contain `a>=2` but not `a<2`.
#[derive(Clone, Debug, Eq, PartialEq)]
#[derive(Clone, Debug)]
struct Fork {
/// The list of dependencies for this fork, guaranteed to be conflict
/// free. (i.e., There are no two packages with the same name with
@ -2865,26 +2940,108 @@ struct Fork {
/// it should be impossible for a package with a marker expression that is
/// disjoint from the marker expression on this fork to be added.
dependencies: Vec<PubGrubDependency>,
/// The markers that provoked this fork.
/// The conflicting groups in this fork.
///
/// So in the example above, the `a<2` fork would have
/// `sys_platform == 'foo'`, while the `a>=2` fork would have
/// `sys_platform == 'bar'`.
/// This exists to make some access patterns more efficient. Namely,
/// it makes it easy to check whether there's a dependency with a
/// particular conflicting group in this fork.
conflicting_groups: FxHashMap<PackageName, FxHashSet<ExtraName>>,
/// The resolver environment for this fork.
///
/// (This doesn't include any marker expressions from a parent fork.)
markers: MarkerTree,
/// Principally, this corresponds to the markers in this for. So in the
/// example above, the `a<2` fork would have `sys_platform == 'foo'`, while
/// the `a>=2` fork would have `sys_platform == 'bar'`.
///
/// If this fork was generated from another fork, then this *includes*
/// the criteria from its parent. i.e., Its marker expression represents
/// the intersection of the marker expression from its parent and any
/// additional marker expression generated by addition forking based on
/// conflicting dependency specifications.
env: ResolverEnvironment,
}
impl Fork {
fn intersect(&mut self, markers: MarkerTree) {
self.markers.and(markers);
/// Create a new fork with no dependencies with the given resolver
/// environment.
fn new(env: ResolverEnvironment) -> Fork {
Fork {
dependencies: vec![],
conflicting_groups: FxHashMap::default(),
env,
}
}
/// Add a dependency to this fork.
fn add_dependency(&mut self, dep: PubGrubDependency) {
if let Some(conflicting_group) = dep.package.conflicting_group() {
self.conflicting_groups
.entry(conflicting_group.package().clone())
.or_default()
.insert(conflicting_group.extra().clone());
}
self.dependencies.push(dep);
}
/// Sets the resolver environment to the one given.
///
/// Any dependency in this fork that does not satisfy the given environment
/// is removed.
fn set_env(&mut self, env: ResolverEnvironment) {
self.env = env;
self.dependencies.retain(|dep| {
let Some(markers) = dep.package.marker() else {
return true;
};
!self.markers.is_disjoint(markers)
if self.env.included_by_marker(markers) {
return true;
}
if let Some(conflicting_group) = dep.package.conflicting_group() {
if let Some(set) = self.conflicting_groups.get_mut(conflicting_group.package()) {
set.remove(conflicting_group.extra());
}
}
false
});
}
/// Returns true if any of the dependencies in this fork contain a
/// dependency with the given package and extra values.
fn contains_conflicting_group(&self, group: ConflictingGroupRef<'_>) -> bool {
self.conflicting_groups
.get(group.package())
.map(|set| set.contains(group.extra()))
.unwrap_or(false)
}
/// Exclude the given groups from this fork.
///
/// This removes all dependencies matching the given conflicting groups.
fn exclude(mut self, groups: impl IntoIterator<Item = ConflictingGroup>) -> Fork {
self.env = self.env.exclude_by_group(groups);
self.dependencies.retain(|dep| {
let Some(conflicting_group) = dep.package.conflicting_group() else {
return true;
};
if self.env.included_by_group(conflicting_group) {
return true;
}
if let Some(conflicting_group) = dep.package.conflicting_group() {
if let Some(set) = self.conflicting_groups.get_mut(conflicting_group.package()) {
set.remove(conflicting_group.extra());
}
}
false
});
self
}
}
impl Eq for Fork {}
impl PartialEq for Fork {
fn eq(&self, other: &Fork) -> bool {
self.dependencies == other.dependencies && self.env == other.env
}
}
impl Ord for Fork {
@ -2892,8 +3049,8 @@ impl Ord for Fork {
// A higher `requires-python` requirement indicates a _lower-priority_ fork. We'd prefer
// to solve `<3.7` before solving `>=3.7`, since the resolution produced by the former might
// work for the latter, but the inverse is unlikely to be true.
let self_bound = marker::requires_python(&self.markers).unwrap_or_default();
let other_bound = marker::requires_python(&other.markers).unwrap_or_default();
let self_bound = self.env.requires_python().unwrap_or_default();
let other_bound = other.env.requires_python().unwrap_or_default();
other_bound.lower().cmp(self_bound.lower()).then_with(|| {
// If there's no difference, prioritize forks with upper bounds. We'd prefer to solve
@ -2930,3 +3087,36 @@ impl PartialOrd for Fork {
Some(self.cmp(other))
}
}
/// Returns an error if a conflicting extra is found in the given requirements.
///
/// Specifically, if there is any conflicting extra (just one is enough) that
/// is unconditionally enabled as part of a dependency specification, then this
/// returns an error.
///
/// The reason why we're so conservative here is because it avoids us needing
/// the look at the entire dependency tree at once.
///
/// For example, consider packages `root`, `a`, `b` and `c`, where `c` has
/// declared conflicting extras of `x1` and `x2`.
///
/// Now imagine `root` depends on `a` and `b`, `a` depends on `c[x1]` and `b`
/// depends on `c[x2]`. That's a conflict, but not easily detectable unless
/// you reject either `c[x1]` or `c[x2]` on the grounds that `x1` and `x2` are
/// conflicting and thus cannot be enabled unconditionally.
fn find_conflicting_extra(
conflicting: &ConflictingGroupList,
reqs: &[Requirement],
) -> Option<ResolveError> {
for req in reqs {
for extra in &req.extras {
if conflicting.contains(&req.name, extra) {
return Some(ResolveError::ConflictingExtra {
requirement: Box::new(req.clone()),
extra: extra.clone(),
});
}
}
}
None
}

View file

@ -8,7 +8,7 @@ use uv_configuration::{
};
use uv_distribution_types::{Index, IndexUrl, PipExtraIndex, PipFindLinks, PipIndex};
use uv_install_wheel::linker::LinkMode;
use uv_pypi_types::SupportedEnvironments;
use uv_pypi_types::{SchemaConflictingGroupList, SupportedEnvironments};
use uv_python::{PythonDownloads, PythonPreference, PythonVersion};
use uv_resolver::{AnnotationStyle, ExcludeNewer, PrereleaseMode, ResolutionMode};
@ -90,6 +90,7 @@ impl_combine_or!(PythonVersion);
impl_combine_or!(ResolutionMode);
impl_combine_or!(String);
impl_combine_or!(SupportedEnvironments);
impl_combine_or!(SchemaConflictingGroupList);
impl_combine_or!(TargetTriple);
impl_combine_or!(TrustedPublishing);
impl_combine_or!(Url);

View file

@ -100,6 +100,9 @@ pub struct Options {
// NOTE(charlie): These fields should be kept in-sync with `ToolUv` in
// `crates/uv-workspace/src/pyproject.rs`. The documentation lives on that struct.
// They're only respected in `pyproject.toml` files, and should be rejected in `uv.toml` files.
#[cfg_attr(feature = "schemars", schemars(skip))]
pub conflicting_groups: Option<serde::de::IgnoredAny>,
#[cfg_attr(feature = "schemars", schemars(skip))]
pub workspace: Option<serde::de::IgnoredAny>,
@ -1559,6 +1562,7 @@ pub struct OptionsWire {
// NOTE(charlie): These fields should be kept in-sync with `ToolUv` in
// `crates/uv-workspace/src/pyproject.rs`. The documentation lives on that struct.
// They're only respected in `pyproject.toml` files, and should be rejected in `uv.toml` files.
conflicting_groups: Option<serde::de::IgnoredAny>,
workspace: Option<serde::de::IgnoredAny>,
sources: Option<serde::de::IgnoredAny>,
managed: Option<serde::de::IgnoredAny>,
@ -1611,6 +1615,7 @@ impl From<OptionsWire> for Options {
override_dependencies,
constraint_dependencies,
environments,
conflicting_groups,
publish_url,
trusted_publishing,
workspace,
@ -1668,6 +1673,7 @@ impl From<OptionsWire> for Options {
override_dependencies,
constraint_dependencies,
environments,
conflicting_groups,
publish: PublishOptions {
publish_url,
trusted_publishing,

View file

@ -24,7 +24,10 @@ use uv_macros::OptionsMetadata;
use uv_normalize::{ExtraName, GroupName, PackageName};
use uv_pep440::{Version, VersionSpecifiers};
use uv_pep508::MarkerTree;
use uv_pypi_types::{RequirementSource, SupportedEnvironments, VerbatimParsedUrl};
use uv_pypi_types::{
ConflictingGroupList, RequirementSource, SchemaConflictingGroupList, SupportedEnvironments,
VerbatimParsedUrl,
};
#[derive(Error, Debug)]
pub enum PyprojectTomlError {
@ -98,6 +101,24 @@ impl PyProjectToml {
false
}
}
/// Returns the set of conflicts for the project.
pub fn conflicting_groups(&self) -> ConflictingGroupList {
let empty = ConflictingGroupList::empty();
let Some(project) = self.project.as_ref() else {
return empty;
};
let Some(tool) = self.tool.as_ref() else {
return empty;
};
let Some(tooluv) = tool.uv.as_ref() else {
return empty;
};
let Some(conflicting) = tooluv.conflicting_groups.as_ref() else {
return empty;
};
conflicting.to_conflicting_with_package_name(&project.name)
}
}
// Ignore raw document in comparison.
@ -439,6 +460,51 @@ pub struct ToolUv {
"#
)]
pub environments: Option<SupportedEnvironments>,
/// Conflicting extras may be declared here.
///
/// It's useful to declare conflicting extras when the extras have mutually
/// incompatible dependencies. For example, extra `foo` might depend on
/// `numpy==2.0.0` while extra `bar` might depend on `numpy==2.1.0`. These
/// extras cannot be activated at the same time. This usually isn't a
/// problem for pip-style workflows, but when using uv project support
/// with universal resolution, it will try to produce a resolution that
/// satisfies both extras simultaneously.
///
/// When this happens, resolution will fail, because one cannot install
/// both `numpy 2.0.0` and `numpy 2.1.0` into the same environment.
///
/// To work around this, you may specify `foo` and `bar` as conflicting
/// extras. When doing universal resolution in project mode, these extras
/// will get their own "forks" distinct from one another in order to permit
/// conflicting dependencies. In exchange, if one tries to install from the
/// lock file with both conflicting extras activated, installation will
/// fail.
#[cfg_attr(
feature = "schemars",
// Skipped for now while we iterate on this feature.
schemars(skip, description = "A list sets of conflicting groups or extras.")
)]
/*
This is commented out temporarily while we finalize its
functionality and naming. This avoids it showing up in docs.
#[option(
default = r#"[]"#,
value_type = "list[list[dict]]",
example = r#"
# Require that `package[test1]` and `package[test2]`
# requirements are resolved in different forks so that they
# cannot conflict with one another.
conflicting-groups = [
[
{ extra = "test1" },
{ extra = "test2" },
]
]
"#
)]
*/
pub conflicting_groups: Option<SchemaConflictingGroupList>,
}
#[derive(Default, Debug, Clone, PartialEq, Eq)]

View file

@ -11,7 +11,7 @@ use uv_distribution_types::Index;
use uv_fs::{Simplified, CWD};
use uv_normalize::{GroupName, PackageName, DEV_DEPENDENCIES};
use uv_pep508::{MarkerTree, RequirementOrigin, VerbatimUrl};
use uv_pypi_types::{Requirement, RequirementSource, SupportedEnvironments};
use uv_pypi_types::{ConflictingGroupList, Requirement, RequirementSource, SupportedEnvironments};
use uv_static::EnvVars;
use uv_warnings::{warn_user, warn_user_once};
@ -392,6 +392,15 @@ impl Workspace {
.and_then(|uv| uv.environments.as_ref())
}
/// Returns the set of conflicts for the workspace.
pub fn conflicting_groups(&self) -> ConflictingGroupList {
let mut conflicting = ConflictingGroupList::empty();
for member in self.packages.values() {
conflicting.append(&mut member.pyproject_toml.conflicting_groups());
}
conflicting
}
/// Returns the set of constraints for the workspace.
pub fn constraints(&self) -> Vec<Requirement> {
let Some(constraints) = self

View file

@ -241,7 +241,8 @@ async fn albatross_root_workspace() {
"dev-dependencies": null,
"override-dependencies": null,
"constraint-dependencies": null,
"environments": null
"environments": null,
"conflicting-groups": null
}
},
"dependency-groups": null
@ -332,7 +333,8 @@ async fn albatross_virtual_workspace() {
"dev-dependencies": null,
"override-dependencies": null,
"constraint-dependencies": null,
"environments": null
"environments": null,
"conflicting-groups": null
}
},
"dependency-groups": null
@ -537,7 +539,8 @@ async fn exclude_package() -> Result<()> {
"dev-dependencies": null,
"override-dependencies": null,
"constraint-dependencies": null,
"environments": null
"environments": null,
"conflicting-groups": null
}
},
"dependency-groups": null
@ -640,7 +643,8 @@ async fn exclude_package() -> Result<()> {
"dev-dependencies": null,
"override-dependencies": null,
"constraint-dependencies": null,
"environments": null
"environments": null,
"conflicting-groups": null
}
},
"dependency-groups": null
@ -756,7 +760,8 @@ async fn exclude_package() -> Result<()> {
"dev-dependencies": null,
"override-dependencies": null,
"constraint-dependencies": null,
"environments": null
"environments": null,
"conflicting-groups": null
}
},
"dependency-groups": null
@ -846,7 +851,8 @@ async fn exclude_package() -> Result<()> {
"dev-dependencies": null,
"override-dependencies": null,
"constraint-dependencies": null,
"environments": null
"environments": null,
"conflicting-groups": null
}
},
"dependency-groups": null

View file

@ -23,7 +23,7 @@ use uv_fs::Simplified;
use uv_git::GitResolver;
use uv_install_wheel::linker::LinkMode;
use uv_normalize::PackageName;
use uv_pypi_types::{Requirement, SupportedEnvironments};
use uv_pypi_types::{ConflictingGroupList, Requirement, SupportedEnvironments};
use uv_python::{
EnvironmentPreference, PythonEnvironment, PythonInstallation, PythonPreference, PythonRequest,
PythonVersion, VersionRequest,
@ -54,6 +54,7 @@ pub(crate) async fn pip_compile(
constraints_from_workspace: Vec<Requirement>,
overrides_from_workspace: Vec<Requirement>,
environments: SupportedEnvironments,
conflicting_groups: ConflictingGroupList,
extras: ExtrasSpecification,
output_file: Option<&Path>,
resolution_mode: ResolutionMode,
@ -251,15 +252,20 @@ pub(crate) async fn pip_compile(
};
// Determine the environment for the resolution.
let (tags, resolver_env) = if universal {
let (tags, resolver_env, conflicting_groups) = if universal {
(
None,
ResolverEnvironment::universal(environments.into_markers()),
conflicting_groups,
)
} else {
let (tags, marker_env) =
resolution_environment(python_version, python_platform, &interpreter)?;
(Some(tags), ResolverEnvironment::specific(marker_env))
(
Some(tags),
ResolverEnvironment::specific(marker_env),
ConflictingGroupList::empty(),
)
};
// Generate, but don't enforce hashes for the requirements.
@ -394,6 +400,7 @@ pub(crate) async fn pip_compile(
tags.as_deref(),
resolver_env.clone(),
python_requirement,
conflicting_groups,
&client,
&flat_index,
&top_level_index,

View file

@ -20,7 +20,7 @@ use uv_fs::Simplified;
use uv_install_wheel::linker::LinkMode;
use uv_installer::{SatisfiesResult, SitePackages};
use uv_pep508::PackageName;
use uv_pypi_types::Requirement;
use uv_pypi_types::{ConflictingGroupList, Requirement};
use uv_python::{
EnvironmentPreference, Prefix, PythonEnvironment, PythonRequest, PythonVersion, Target,
};
@ -400,6 +400,7 @@ pub(crate) async fn pip_install(
Some(&tags),
ResolverEnvironment::specific(marker_env.clone()),
python_requirement,
ConflictingGroupList::empty(),
&client,
&flat_index,
&state.index,

View file

@ -29,7 +29,7 @@ use uv_install_wheel::linker::LinkMode;
use uv_installer::{Plan, Planner, Preparer, SitePackages};
use uv_normalize::{GroupName, PackageName};
use uv_platform_tags::Tags;
use uv_pypi_types::ResolverMarkerEnvironment;
use uv_pypi_types::{ConflictingGroupList, ResolverMarkerEnvironment};
use uv_python::PythonEnvironment;
use uv_requirements::{
LookaheadResolver, NamedRequirementsResolver, RequirementsSource, RequirementsSpecification,
@ -104,6 +104,7 @@ pub(crate) async fn resolve<InstalledPackages: InstalledPackagesProvider>(
tags: Option<&Tags>,
resolver_env: ResolverEnvironment,
python_requirement: PythonRequirement,
conflicting_groups: ConflictingGroupList,
client: &RegistryClient,
flat_index: &FlatIndex,
index: &InMemoryIndex,
@ -290,6 +291,7 @@ pub(crate) async fn resolve<InstalledPackages: InstalledPackagesProvider>(
options,
&python_requirement,
resolver_env,
conflicting_groups,
tags,
flat_index,
index,

View file

@ -17,6 +17,7 @@ use uv_fs::Simplified;
use uv_install_wheel::linker::LinkMode;
use uv_installer::SitePackages;
use uv_pep508::PackageName;
use uv_pypi_types::ConflictingGroupList;
use uv_python::{
EnvironmentPreference, Prefix, PythonEnvironment, PythonRequest, PythonVersion, Target,
};
@ -344,6 +345,7 @@ pub(crate) async fn pip_sync(
Some(&tags),
ResolverEnvironment::specific(marker_env.clone()),
python_requirement,
ConflictingGroupList::empty(),
&client,
&flat_index,
&state.index,

View file

@ -628,6 +628,7 @@ async fn do_lock(
None,
resolver_env,
python_requirement,
workspace.conflicting_groups(),
&client,
&flat_index,
&state.index,
@ -657,6 +658,7 @@ async fn do_lock(
let previous = existing_lock.map(ValidatedLock::into_lock);
let lock = Lock::from_resolution_graph(&resolution, workspace.install_path())?
.with_manifest(manifest)
.with_conflicting_groups(workspace.conflicting_groups())
.with_supported_environments(
environments
.cloned()
@ -800,6 +802,16 @@ impl ValidatedLock {
return Ok(Self::Versions(lock));
}
// If the conflicting group config has changed, we have to perform a clean resolution.
if &workspace.conflicting_groups() != lock.conflicting_groups() {
debug!(
"Ignoring existing lockfile due to change in conflicting groups: `{:?}` vs. `{:?}`",
workspace.conflicting_groups(),
lock.conflicting_groups(),
);
return Ok(Self::Versions(lock));
}
// If the user provided at least one index URL (from the command line, or from a configuration
// file), don't use the existing lockfile if it references any registries that are no longer
// included in the current configuration.

View file

@ -19,10 +19,10 @@ use uv_distribution_types::{
use uv_fs::{Simplified, CWD};
use uv_git::ResolvedRepositoryReference;
use uv_installer::{SatisfiesResult, SitePackages};
use uv_normalize::{GroupName, PackageName, DEV_DEPENDENCIES};
use uv_normalize::{ExtraName, GroupName, PackageName, DEV_DEPENDENCIES};
use uv_pep440::{Version, VersionSpecifiers};
use uv_pep508::MarkerTreeContents;
use uv_pypi_types::Requirement;
use uv_pypi_types::{ConflictingGroupList, ConflictingGroups, Requirement};
use uv_python::{
EnvironmentPreference, Interpreter, InvalidEnvironmentKind, PythonDownloads, PythonEnvironment,
PythonInstallation, PythonPreference, PythonRequest, PythonVariant, PythonVersionFile,
@ -80,6 +80,17 @@ pub(crate) enum ProjectError {
#[error("The current Python platform is not compatible with the lockfile's supported environments: {0}")]
LockedPlatformIncompatibility(String),
#[error(
"The requested extras ({}) are incompatible with the declared conflicting extra: {{{}}}",
_1.iter().map(|extra| format!("`{extra}`")).collect::<Vec<String>>().join(", "),
_0
.iter()
.map(|group| format!("`{}[{}]`", group.package(), group.extra()))
.collect::<Vec<String>>()
.join(", "),
)]
ExtraIncompatibility(ConflictingGroups, Vec<ExtraName>),
#[error("The requested interpreter resolved to Python {0}, which is incompatible with the project's Python requirement: `{1}`")]
RequestedPythonProjectIncompatibility(Version, RequiresPython),
@ -1091,6 +1102,7 @@ pub(crate) async fn resolve_environment<'a>(
Some(tags),
ResolverEnvironment::specific(marker_env),
python_requirement,
ConflictingGroupList::empty(),
&client,
&flat_index,
&state.index,
@ -1433,6 +1445,7 @@ pub(crate) async fn update_environment(
Some(tags),
ResolverEnvironment::specific(marker_env.clone()),
python_requirement,
ConflictingGroupList::empty(),
&client,
&flat_index,
&state.index,

View file

@ -15,7 +15,7 @@ use uv_configuration::{
use uv_dispatch::BuildDispatch;
use uv_distribution_types::{DirectorySourceDist, Dist, Index, ResolvedDist, SourceDist};
use uv_installer::SitePackages;
use uv_normalize::PackageName;
use uv_normalize::{ExtraName, PackageName};
use uv_pep508::{MarkerTree, Requirement, VersionOrUrl};
use uv_pypi_types::{
LenientRequirement, ParsedArchiveUrl, ParsedGitUrl, ParsedUrl, VerbatimParsedUrl,
@ -278,6 +278,23 @@ pub(super) async fn do_sync(
));
}
// Validate that we aren't trying to install extras that are
// declared as conflicting.
let conflicting_groups = target.lock().conflicting_groups();
for groups in conflicting_groups.iter() {
let conflicting = groups
.iter()
.filter(|group| extras.contains(group.extra()))
.map(|group| group.extra().clone())
.collect::<Vec<ExtraName>>();
if conflicting.len() >= 2 {
return Err(ProjectError::ExtraIncompatibility(
groups.clone(),
conflicting,
));
}
}
// Determine the markers to use for resolution.
let marker_env = venv.interpreter().resolver_marker_environment();

View file

@ -24,6 +24,7 @@ use uv_cli::{PythonCommand, PythonNamespace, ToolCommand, ToolNamespace, TopLeve
#[cfg(feature = "self-update")]
use uv_cli::{SelfCommand, SelfNamespace, SelfUpdateArgs};
use uv_fs::CWD;
use uv_pypi_types::ConflictingGroupList;
use uv_requirements::RequirementsSource;
use uv_scripts::{Pep723Item, Pep723Metadata, Pep723Script};
use uv_settings::{Combine, FilesystemOptions, Options};
@ -332,6 +333,7 @@ async fn run(mut cli: Cli) -> Result<ExitStatus> {
args.constraints_from_workspace,
args.overrides_from_workspace,
args.environments,
ConflictingGroupList::empty(),
args.settings.extras,
args.settings.output_file.as_deref(),
args.settings.resolution,

File diff suppressed because it is too large Load diff

View file

@ -191,7 +191,7 @@ fn invalid_pyproject_toml_option_unknown_field() -> Result<()> {
|
2 | unknown = "field"
| ^^^^^^^
unknown field `unknown`, expected one of `native-tls`, `offline`, `no-cache`, `cache-dir`, `preview`, `python-preference`, `python-downloads`, `concurrent-downloads`, `concurrent-builds`, `concurrent-installs`, `index`, `index-url`, `extra-index-url`, `no-index`, `find-links`, `index-strategy`, `keyring-provider`, `allow-insecure-host`, `resolution`, `prerelease`, `dependency-metadata`, `config-settings`, `no-build-isolation`, `no-build-isolation-package`, `exclude-newer`, `link-mode`, `compile-bytecode`, `no-sources`, `upgrade`, `upgrade-package`, `reinstall`, `reinstall-package`, `no-build`, `no-build-package`, `no-binary`, `no-binary-package`, `publish-url`, `trusted-publishing`, `pip`, `cache-keys`, `override-dependencies`, `constraint-dependencies`, `environments`, `workspace`, `sources`, `managed`, `package`, `default-groups`, `dev-dependencies`
unknown field `unknown`, expected one of `native-tls`, `offline`, `no-cache`, `cache-dir`, `preview`, `python-preference`, `python-downloads`, `concurrent-downloads`, `concurrent-builds`, `concurrent-installs`, `index`, `index-url`, `extra-index-url`, `no-index`, `find-links`, `index-strategy`, `keyring-provider`, `allow-insecure-host`, `resolution`, `prerelease`, `dependency-metadata`, `config-settings`, `no-build-isolation`, `no-build-isolation-package`, `exclude-newer`, `link-mode`, `compile-bytecode`, `no-sources`, `upgrade`, `upgrade-package`, `reinstall`, `reinstall-package`, `no-build`, `no-build-package`, `no-binary`, `no-binary-package`, `publish-url`, `trusted-publishing`, `pip`, `cache-keys`, `override-dependencies`, `constraint-dependencies`, `environments`, `conflicting-groups`, `workspace`, `sources`, `managed`, `package`, `default-groups`, `dev-dependencies`
Resolved in [TIME]
Audited in [TIME]

View file

@ -3031,6 +3031,102 @@ fn resolve_both() -> anyhow::Result<()> {
Ok(())
}
/// Tests that errors when parsing `conflicting-groups` are reported.
#[test]
fn invalid_conflicting_groups() -> anyhow::Result<()> {
let context = TestContext::new("3.12");
let pyproject = context.temp_dir.child("pyproject.toml");
// Write in `pyproject.toml` schema and test the singleton case.
pyproject.write_str(indoc::indoc! {r#"
[project]
name = "example"
version = "0.0.0"
requires-python = ">=3.12"
[tool.uv]
conflicting-groups = [
[{extra = "dev"}],
]
"#})?;
// The file should be rejected for violating the schema.
uv_snapshot!(context.filters(), add_shared_args(context.lock()), @r###"
success: false
exit_code: 2
----- stdout -----
----- stderr -----
error: Failed to parse: `pyproject.toml`
Caused by: TOML parse error at line 7, column 22
|
7 | conflicting-groups = [
| ^
Each set of conflicting groups must have at least two entries, but found only one
"###
);
// Now test the empty case.
pyproject.write_str(indoc::indoc! {r#"
[project]
name = "example"
version = "0.0.0"
requires-python = ">=3.12"
[tool.uv]
conflicting-groups = [[]]
"#})?;
// The file should be rejected for violating the schema.
uv_snapshot!(context.filters(), add_shared_args(context.lock()), @r###"
success: false
exit_code: 2
----- stdout -----
----- stderr -----
error: Failed to parse: `pyproject.toml`
Caused by: TOML parse error at line 7, column 22
|
7 | conflicting-groups = [[]]
| ^^^^
Each set of conflicting groups must have at least two entries, but found none
"###
);
Ok(())
}
/// Tests that valid `conflicting-groups` are parsed okay.
#[test]
fn valid_conflicting_groups() -> anyhow::Result<()> {
let context = TestContext::new("3.12");
let pyproject = context.temp_dir.child("pyproject.toml");
// Write in `pyproject.toml` schema.
pyproject.write_str(indoc::indoc! {r#"
[project]
name = "example"
version = "0.0.0"
requires-python = ">=3.12"
[tool.uv]
conflicting-groups = [
[{extra = "x1"}, {extra = "x2"}],
]
"#})?;
uv_snapshot!(context.filters(), add_shared_args(context.lock()), @r###"
success: true
exit_code: 0
----- stdout -----
----- stderr -----
Resolved 1 package in [TIME]
"###
);
Ok(())
}
/// Read from a `--config-file` command line argument.
#[test]
#[cfg_attr(
@ -3229,7 +3325,7 @@ fn resolve_config_file() -> anyhow::Result<()> {
|
1 | [project]
| ^^^^^^^
unknown field `project`, expected one of `native-tls`, `offline`, `no-cache`, `cache-dir`, `preview`, `python-preference`, `python-downloads`, `concurrent-downloads`, `concurrent-builds`, `concurrent-installs`, `index`, `index-url`, `extra-index-url`, `no-index`, `find-links`, `index-strategy`, `keyring-provider`, `allow-insecure-host`, `resolution`, `prerelease`, `dependency-metadata`, `config-settings`, `no-build-isolation`, `no-build-isolation-package`, `exclude-newer`, `link-mode`, `compile-bytecode`, `no-sources`, `upgrade`, `upgrade-package`, `reinstall`, `reinstall-package`, `no-build`, `no-build-package`, `no-binary`, `no-binary-package`, `publish-url`, `trusted-publishing`, `pip`, `cache-keys`, `override-dependencies`, `constraint-dependencies`, `environments`, `workspace`, `sources`, `managed`, `package`, `default-groups`, `dev-dependencies`
unknown field `project`, expected one of `native-tls`, `offline`, `no-cache`, `cache-dir`, `preview`, `python-preference`, `python-downloads`, `concurrent-downloads`, `concurrent-builds`, `concurrent-installs`, `index`, `index-url`, `extra-index-url`, `no-index`, `find-links`, `index-strategy`, `keyring-provider`, `allow-insecure-host`, `resolution`, `prerelease`, `dependency-metadata`, `config-settings`, `no-build-isolation`, `no-build-isolation-package`, `exclude-newer`, `link-mode`, `compile-bytecode`, `no-sources`, `upgrade`, `upgrade-package`, `reinstall`, `reinstall-package`, `no-build`, `no-build-package`, `no-binary`, `no-binary-package`, `publish-url`, `trusted-publishing`, `pip`, `cache-keys`, `override-dependencies`, `constraint-dependencies`, `environments`, `conflicting-groups`, `workspace`, `sources`, `managed`, `package`, `default-groups`, `dev-dependencies`
"###
);