Implement Ranged on more structs (#6639)

## Summary

I noticed some inconsistencies around uses of `.range.start()`, structs
that have a `TextRange` field but don't implement `Ranged`, etc.

## Test Plan

`cargo test`
This commit is contained in:
Charlie Marsh 2023-08-17 11:22:39 -04:00 committed by GitHub
parent a70807e1e1
commit db1c556508
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
66 changed files with 221 additions and 176 deletions

View file

@ -327,7 +327,7 @@ impl Format<PyFormatContext<'_>> for FormatComment<'_> {
}
let start = slice.start() + start_offset;
let end = slice.range().end() - trailing_whitespace_len;
let end = slice.end() - trailing_whitespace_len;
write!(
f,

View file

@ -445,7 +445,7 @@ fn handle_own_line_comment_between_branches<'a>(
// It depends on the indentation level of the comment if it is a leading comment for the
// following branch or if it a trailing comment of the previous body's last statement.
let comment_indentation = indentation_at_offset(comment.slice().range().start(), locator)
let comment_indentation = indentation_at_offset(comment.slice().start(), locator)
.unwrap_or_default()
.len();
@ -529,7 +529,7 @@ fn handle_own_line_comment_after_branch<'a>(
// We only care about the length because indentations with mixed spaces and tabs are only valid if
// the indent-level doesn't depend on the tab width (the indent level must be the same if the tab width is 1 or 8).
let comment_indentation = indentation_at_offset(comment.slice().range().start(), locator)
let comment_indentation = indentation_at_offset(comment.slice().start(), locator)
.unwrap_or_default()
.len();
@ -1314,7 +1314,7 @@ fn handle_comprehension_comment<'a>(
// b in c
// ]
// ```
if comment.slice().end() < comprehension.target.range().start() {
if comment.slice().end() < comprehension.target.start() {
return if is_own_line {
// own line comments are correctly assigned as leading the target
CommentPlacement::Default(comment)
@ -1325,10 +1325,7 @@ fn handle_comprehension_comment<'a>(
}
let in_token = find_only_token_in_range(
TextRange::new(
comprehension.target.range().end(),
comprehension.iter.range().start(),
),
TextRange::new(comprehension.target.end(), comprehension.iter.start()),
SimpleTokenKind::In,
locator,
);
@ -1361,7 +1358,7 @@ fn handle_comprehension_comment<'a>(
// c
// ]
// ```
if comment.slice().start() < comprehension.iter.range().start() {
if comment.slice().start() < comprehension.iter.start() {
return if is_own_line {
CommentPlacement::Default(comment)
} else {
@ -1370,7 +1367,7 @@ fn handle_comprehension_comment<'a>(
};
}
let mut last_end = comprehension.iter.range().end();
let mut last_end = comprehension.iter.end();
for if_node in &comprehension.ifs {
// ```python
@ -1391,7 +1388,7 @@ fn handle_comprehension_comment<'a>(
// ]
// ```
let if_token = find_only_token_in_range(
TextRange::new(last_end, if_node.range().start()),
TextRange::new(last_end, if_node.start()),
SimpleTokenKind::If,
locator,
);
@ -1400,11 +1397,11 @@ fn handle_comprehension_comment<'a>(
return CommentPlacement::dangling(if_node, comment);
}
} else if if_token.start() < comment.slice().start()
&& comment.slice().start() < if_node.range().start()
&& comment.slice().start() < if_node.start()
{
return CommentPlacement::dangling(if_node, comment);
}
last_end = if_node.range().end();
last_end = if_node.end();
}
CommentPlacement::Default(comment)

View file

@ -1,4 +1,4 @@
use ruff_python_ast::{Expr, ExprSlice, ExprUnaryOp, Ranged, UnaryOp};
use ruff_python_ast::{Expr, ExprSlice, ExprUnaryOp, UnaryOp};
use ruff_text_size::TextRange;
use ruff_formatter::prelude::{hard_line_break, line_suffix_boundary, space, text};
@ -20,13 +20,18 @@ impl FormatNodeRule<ExprSlice> for FormatExprSlice {
fn fmt_fields(&self, item: &ExprSlice, f: &mut PyFormatter) -> FormatResult<()> {
// `[lower:upper:step]`
let ExprSlice {
range,
lower,
upper,
step,
range,
} = item;
let (first_colon, second_colon) = find_colons(f.context().source(), *range, lower, upper)?;
let (first_colon, second_colon) = find_colons(
f.context().source(),
*range,
lower.as_deref(),
upper.as_deref(),
)?;
// Handle comment placement
// In placements.rs, we marked comment for None nodes a dangling and associated all others
@ -36,14 +41,14 @@ impl FormatNodeRule<ExprSlice> for FormatExprSlice {
let comments = f.context().comments().clone();
let slice_dangling_comments = comments.dangling_comments(item.as_any_node_ref());
// Put the dangling comments (where the nodes are missing) into buckets
let first_colon_partition_index = slice_dangling_comments
.partition_point(|x| x.slice().start() < first_colon.range.start());
let first_colon_partition_index =
slice_dangling_comments.partition_point(|x| x.slice().start() < first_colon.start());
let (dangling_lower_comments, dangling_upper_step_comments) =
slice_dangling_comments.split_at(first_colon_partition_index);
let (dangling_upper_comments, dangling_step_comments) =
if let Some(second_colon) = &second_colon {
let second_colon_partition_index = dangling_upper_step_comments
.partition_point(|x| x.slice().start() < second_colon.range.start());
.partition_point(|x| x.slice().start() < second_colon.start());
dangling_upper_step_comments.split_at(second_colon_partition_index)
} else {
// Without a second colon they remaining dangling comments belong between the first
@ -153,27 +158,27 @@ impl FormatNodeRule<ExprSlice> for FormatExprSlice {
pub(crate) fn find_colons(
contents: &str,
range: TextRange,
lower: &Option<Box<Expr>>,
upper: &Option<Box<Expr>>,
lower: Option<&Expr>,
upper: Option<&Expr>,
) -> FormatResult<(SimpleToken, Option<SimpleToken>)> {
let after_lower = lower
.as_ref()
.map_or(range.start(), |lower| lower.range().end());
.map_or(range.start(), ruff_python_ast::Ranged::end);
let mut tokens = SimpleTokenizer::new(contents, TextRange::new(after_lower, range.end()))
.skip_trivia()
.skip_while(|token| token.kind == SimpleTokenKind::RParen);
let first_colon = tokens.next().ok_or(FormatError::syntax_error(
"Din't find any token for slice first colon",
"Didn't find any token for slice first colon",
))?;
if first_colon.kind != SimpleTokenKind::Colon {
return Err(FormatError::syntax_error(
"slice first colon token was not a colon",
"Slice first colon token was not a colon",
));
}
let after_upper = upper
.as_ref()
.map_or(first_colon.end(), |upper| upper.range().end());
.map_or(first_colon.end(), ruff_python_ast::Ranged::end);
let mut tokens = SimpleTokenizer::new(contents, TextRange::new(after_upper, range.end()))
.skip_trivia()
.skip_while(|token| token.kind == SimpleTokenKind::RParen);
@ -206,6 +211,7 @@ fn is_simple_expr(expr: &Expr) -> bool {
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub(crate) enum ExprSliceCommentSection {
Lower,
Upper,
@ -229,21 +235,22 @@ pub(crate) fn assign_comment_in_slice(
expr_slice: &ExprSlice,
) -> ExprSliceCommentSection {
let ExprSlice {
range,
lower,
upper,
step: _,
range,
} = expr_slice;
let (first_colon, second_colon) = find_colons(contents, *range, lower, upper)
.expect("SyntaxError when trying to parse slice");
let (first_colon, second_colon) =
find_colons(contents, *range, lower.as_deref(), upper.as_deref())
.expect("SyntaxError when trying to parse slice");
if comment.start() < first_colon.range.start() {
if comment.start() < first_colon.start() {
ExprSliceCommentSection::Lower
} else {
// We are to the right of the first colon
if let Some(second_colon) = second_colon {
if comment.start() < second_colon.range.start() {
if comment.start() < second_colon.start() {
ExprSliceCommentSection::Upper
} else {
ExprSliceCommentSection::Step

View file

@ -91,7 +91,7 @@ fn is_operand_parenthesized(unary: &ExprUnaryOp, source: &str) -> bool {
UnaryOp::USub => '-'.text_len(),
};
let trivia_range = TextRange::new(unary.range.start() + operator_len, unary.operand.start());
let trivia_range = TextRange::new(unary.start() + operator_len, unary.operand.start());
if let Some(token) = SimpleTokenizer::new(source, trivia_range)
.skip_trivia()

View file

@ -269,6 +269,12 @@ struct FormatStringPart {
is_raw_string: bool,
}
impl Ranged for FormatStringPart {
fn range(&self) -> TextRange {
self.range
}
}
impl FormatStringPart {
fn new(range: TextRange, quoting: Quoting, locator: &Locator, quote_style: QuoteStyle) -> Self {
let string_content = locator.slice(range);
@ -317,10 +323,10 @@ impl Format<PyFormatContext<'_>> for FormatStringPart {
write!(f, [self.prefix, self.preferred_quotes])?;
match normalized {
Cow::Borrowed(_) => {
source_text_slice(self.range, contains_newlines).fmt(f)?;
source_text_slice(self.range(), contains_newlines).fmt(f)?;
}
Cow::Owned(normalized) => {
dynamic_text(&normalized, Some(self.range.start())).fmt(f)?;
dynamic_text(&normalized, Some(self.start())).fmt(f)?;
}
}
self.preferred_quotes.fmt(f)
@ -781,12 +787,12 @@ fn format_docstring(string_part: &FormatStringPart, f: &mut PyFormatter) -> Form
let locator = f.context().locator();
// Black doesn't change the indentation of docstrings that contain an escaped newline
if locator.slice(string_part.range).contains("\\\n") {
if locator.slice(string_part.range()).contains("\\\n") {
return string_part.fmt(f);
}
let (normalized, _) = normalize_string(
locator.slice(string_part.range),
locator.slice(string_part.range()),
string_part.preferred_quotes,
string_part.is_raw_string,
);
@ -799,13 +805,13 @@ fn format_docstring(string_part: &FormatStringPart, f: &mut PyFormatter) -> Form
write!(
f,
[
source_position(string_part.range.start()),
source_position(string_part.start()),
string_part.prefix,
string_part.preferred_quotes
]
)?;
// We track where in the source docstring we are (in source code byte offsets)
let mut offset = string_part.range.start();
let mut offset = string_part.start();
// The first line directly after the opening quotes has different rules than the rest, mainly
// that we remove all leading whitespace as there's no indentation
@ -892,7 +898,7 @@ fn format_docstring(string_part: &FormatStringPart, f: &mut PyFormatter) -> Form
f,
[
string_part.preferred_quotes,
source_position(string_part.range.end())
source_position(string_part.end())
]
)
}

View file

@ -39,7 +39,7 @@ impl FormatNodeRule<Comprehension> for FormatComprehension {
let dangling_item_comments = comments.dangling_comments(item);
let (before_target_comments, before_in_comments) = dangling_item_comments.split_at(
dangling_item_comments
.partition_point(|comment| comment.slice().end() < target.range().start()),
.partition_point(|comment| comment.slice().end() < target.start()),
);
let trailing_in_comments = comments.dangling_comments(iter);

View file

@ -79,7 +79,7 @@ fn is_match_case_pattern_parenthesized(
) -> FormatResult<bool> {
let mut tokenizer = SimpleTokenizer::new(
context.source(),
TextRange::new(case.range().start(), pattern.range().start()),
TextRange::new(case.start(), pattern.start()),
)
.skip_trivia();

View file

@ -416,7 +416,7 @@ pub(crate) fn find_parameter_separators(
debug_assert!(star.kind() == SimpleTokenKind::Star, "{star:?}");
Some(ParameterSeparator {
preceding_end: parameters.range.start(),
preceding_end: parameters.start(),
separator: star.range,
following_start: first_keyword_argument.start(),
})

View file

@ -6,7 +6,7 @@ use ruff_formatter::write;
use ruff_formatter::FormatResult;
use ruff_python_ast::node::AstNode;
use ruff_python_ast::TypeParams;
use ruff_python_ast::{Ranged, TypeParams};
#[derive(Default)]
pub struct FormatTypeParams;
@ -26,7 +26,7 @@ impl FormatNodeRule<TypeParams> for FormatTypeParams {
write!(f, [trailing_comments(dangling_comments)])?;
let items = format_with(|f| {
f.join_comma_separated(item.range.end())
f.join_comma_separated(item.end())
.nodes(item.type_params.iter())
.finish()
});