Bump MSRV to Rust 1.80 (#13826)

This commit is contained in:
Micha Reiser 2024-10-20 10:55:36 +02:00 committed by GitHub
parent 075e378b0f
commit 27c50bebec
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
42 changed files with 110 additions and 133 deletions

10
Cargo.lock generated
View file

@ -2136,7 +2136,6 @@ version = "0.0.0"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"colored", "colored",
"once_cell",
"red_knot_python_semantic", "red_knot_python_semantic",
"red_knot_vendored", "red_knot_vendored",
"regex", "regex",
@ -2154,7 +2153,6 @@ dependencies = [
name = "red_knot_vendored" name = "red_knot_vendored"
version = "0.0.0" version = "0.0.0"
dependencies = [ dependencies = [
"once_cell",
"path-slash", "path-slash",
"ruff_db", "ruff_db",
"walkdir", "walkdir",
@ -2382,7 +2380,6 @@ dependencies = [
"codspeed-criterion-compat", "codspeed-criterion-compat",
"criterion", "criterion",
"mimalloc", "mimalloc",
"once_cell",
"rayon", "rayon",
"red_knot_python_semantic", "red_knot_python_semantic",
"red_knot_workspace", "red_knot_workspace",
@ -2515,7 +2512,6 @@ version = "0.1.0"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"clap", "clap",
"once_cell",
"red_knot_python_semantic", "red_knot_python_semantic",
"ruff_cache", "ruff_cache",
"ruff_db", "ruff_db",
@ -2560,7 +2556,6 @@ dependencies = [
"log", "log",
"memchr", "memchr",
"natord", "natord",
"once_cell",
"path-absolutize", "path-absolutize",
"pathdiff", "pathdiff",
"pep440_rs 0.6.6", "pep440_rs 0.6.6",
@ -2616,7 +2611,6 @@ version = "0.0.0"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"itertools 0.13.0", "itertools 0.13.0",
"once_cell",
"rand", "rand",
"ruff_diagnostics", "ruff_diagnostics",
"ruff_source_file", "ruff_source_file",
@ -2638,7 +2632,6 @@ dependencies = [
"compact_str", "compact_str",
"is-macro", "is-macro",
"itertools 0.13.0", "itertools 0.13.0",
"once_cell",
"ruff_cache", "ruff_cache",
"ruff_macros", "ruff_macros",
"ruff_python_trivia", "ruff_python_trivia",
@ -2664,7 +2657,6 @@ dependencies = [
name = "ruff_python_codegen" name = "ruff_python_codegen"
version = "0.0.0" version = "0.0.0"
dependencies = [ dependencies = [
"once_cell",
"ruff_python_ast", "ruff_python_ast",
"ruff_python_literal", "ruff_python_literal",
"ruff_python_parser", "ruff_python_parser",
@ -2682,7 +2674,6 @@ dependencies = [
"insta", "insta",
"itertools 0.13.0", "itertools 0.13.0",
"memchr", "memchr",
"once_cell",
"regex", "regex",
"ruff_cache", "ruff_cache",
"ruff_formatter", "ruff_formatter",
@ -2843,7 +2834,6 @@ name = "ruff_source_file"
version = "0.0.0" version = "0.0.0"
dependencies = [ dependencies = [
"memchr", "memchr",
"once_cell",
"ruff_text_size", "ruff_text_size",
"serde", "serde",
] ]

View file

@ -4,7 +4,7 @@ resolver = "2"
[workspace.package] [workspace.package]
edition = "2021" edition = "2021"
rust-version = "1.76" rust-version = "1.80"
homepage = "https://docs.astral.sh/ruff" homepage = "https://docs.astral.sh/ruff"
documentation = "https://docs.astral.sh/ruff" documentation = "https://docs.astral.sh/ruff"
repository = "https://github.com/astral-sh/ruff" repository = "https://github.com/astral-sh/ruff"
@ -101,7 +101,6 @@ memchr = { version = "2.7.1" }
mimalloc = { version = "0.1.39" } mimalloc = { version = "0.1.39" }
natord = { version = "1.0.9" } natord = { version = "1.0.9" }
notify = { version = "6.1.1" } notify = { version = "6.1.1" }
once_cell = { version = "1.19.0" }
ordermap = { version = "0.5.0" } ordermap = { version = "0.5.0" }
path-absolutize = { version = "3.1.1" } path-absolutize = { version = "3.1.1" }
path-slash = { version = "0.2.1" } path-slash = { version = "0.2.1" }

View file

@ -21,7 +21,6 @@ ruff_text_size = { workspace = true }
anyhow = { workspace = true } anyhow = { workspace = true }
colored = { workspace = true } colored = { workspace = true }
once_cell = { workspace = true }
regex = { workspace = true } regex = { workspace = true }
rustc-hash = { workspace = true } rustc-hash = { workspace = true }
salsa = { workspace = true } salsa = { workspace = true }

View file

@ -35,7 +35,6 @@
//! ``` //! ```
use crate::db::Db; use crate::db::Db;
use once_cell::sync::Lazy;
use regex::Regex; use regex::Regex;
use ruff_db::files::File; use ruff_db::files::File;
use ruff_db::parsed::parsed_module; use ruff_db::parsed::parsed_module;
@ -45,6 +44,7 @@ use ruff_source_file::{LineIndex, Locator, OneIndexed};
use ruff_text_size::{Ranged, TextRange}; use ruff_text_size::{Ranged, TextRange};
use smallvec::SmallVec; use smallvec::SmallVec;
use std::ops::Deref; use std::ops::Deref;
use std::sync::LazyLock;
/// Diagnostic assertion comments in a single embedded file. /// Diagnostic assertion comments in a single embedded file.
#[derive(Debug)] #[derive(Debug)]
@ -239,10 +239,10 @@ impl<'a> Deref for LineAssertions<'a> {
} }
} }
static TYPE_RE: Lazy<Regex> = static TYPE_RE: LazyLock<Regex> =
Lazy::new(|| Regex::new(r"^#\s*revealed:\s*(?<ty_display>.+?)\s*$").unwrap()); LazyLock::new(|| Regex::new(r"^#\s*revealed:\s*(?<ty_display>.+?)\s*$").unwrap());
static ERROR_RE: Lazy<Regex> = Lazy::new(|| { static ERROR_RE: LazyLock<Regex> = LazyLock::new(|| {
Regex::new( Regex::new(
r#"^#\s*error:(\s*(?<column>\d+))?(\s*\[(?<rule>.+?)\])?(\s*"(?<message>.+?)")?\s*$"#, r#"^#\s*error:(\s*(?<column>\d+))?(\s*\[(?<rule>.+?)\])?(\s*"(?<message>.+?)")?\s*$"#,
) )

View file

@ -1,7 +1,7 @@
use once_cell::sync::Lazy;
use regex::{Captures, Regex}; use regex::{Captures, Regex};
use ruff_index::{newtype_index, IndexVec}; use ruff_index::{newtype_index, IndexVec};
use rustc_hash::{FxHashMap, FxHashSet}; use rustc_hash::{FxHashMap, FxHashSet};
use std::sync::LazyLock;
/// Parse the Markdown `source` as a test suite with given `title`. /// Parse the Markdown `source` as a test suite with given `title`.
pub(crate) fn parse<'s>(title: &'s str, source: &'s str) -> anyhow::Result<MarkdownTestSuite<'s>> { pub(crate) fn parse<'s>(title: &'s str, source: &'s str) -> anyhow::Result<MarkdownTestSuite<'s>> {
@ -135,12 +135,12 @@ pub(crate) struct EmbeddedFile<'s> {
/// Matches an arbitrary amount of whitespace (including newlines), followed by a sequence of `#` /// Matches an arbitrary amount of whitespace (including newlines), followed by a sequence of `#`
/// characters, followed by a title heading, followed by a newline. /// characters, followed by a title heading, followed by a newline.
static HEADER_RE: Lazy<Regex> = static HEADER_RE: LazyLock<Regex> =
Lazy::new(|| Regex::new(r"^(\s*\n)*(?<level>#+)\s+(?<title>.+)\s*\n").unwrap()); LazyLock::new(|| Regex::new(r"^(\s*\n)*(?<level>#+)\s+(?<title>.+)\s*\n").unwrap());
/// Matches a code block fenced by triple backticks, possibly with language and `key=val` /// Matches a code block fenced by triple backticks, possibly with language and `key=val`
/// configuration items following the opening backticks (in the "tag string" of the code block). /// configuration items following the opening backticks (in the "tag string" of the code block).
static CODE_RE: Lazy<Regex> = Lazy::new(|| { static CODE_RE: LazyLock<Regex> = LazyLock::new(|| {
Regex::new(r"^```(?<lang>\w+)(?<config>( +\S+)*)\s*\n(?<code>(.|\n)*?)\n?```\s*\n").unwrap() Regex::new(r"^```(?<lang>\w+)(?<config>( +\S+)*)\s*\n(?<code>(.|\n)*?)\n?```\s*\n").unwrap()
}); });

View file

@ -12,7 +12,6 @@ license = { workspace = true }
[dependencies] [dependencies]
ruff_db = { workspace = true } ruff_db = { workspace = true }
once_cell = { workspace = true }
zip = { workspace = true } zip = { workspace = true }
[build-dependencies] [build-dependencies]

View file

@ -1,14 +1,13 @@
use once_cell::sync::Lazy;
use ruff_db::vendored::VendoredFileSystem; use ruff_db::vendored::VendoredFileSystem;
use std::sync::LazyLock;
// The file path here is hardcoded in this crate's `build.rs` script. // The file path here is hardcoded in this crate's `build.rs` script.
// Luckily this crate will fail to build if this file isn't available at build time. // Luckily this crate will fail to build if this file isn't available at build time.
static TYPESHED_ZIP_BYTES: &[u8] = include_bytes!(concat!(env!("OUT_DIR"), "/zipped_typeshed.zip")); static TYPESHED_ZIP_BYTES: &[u8] = include_bytes!(concat!(env!("OUT_DIR"), "/zipped_typeshed.zip"));
pub fn file_system() -> &'static VendoredFileSystem { pub fn file_system() -> &'static VendoredFileSystem {
static VENDORED_TYPESHED_STUBS: Lazy<VendoredFileSystem> = static VENDORED_TYPESHED_STUBS: LazyLock<VendoredFileSystem> =
Lazy::new(|| VendoredFileSystem::new_static(TYPESHED_ZIP_BYTES).unwrap()); LazyLock::new(|| VendoredFileSystem::new_static(TYPESHED_ZIP_BYTES).unwrap());
&VENDORED_TYPESHED_STUBS &VENDORED_TYPESHED_STUBS
} }

View file

@ -39,7 +39,6 @@ harness = false
[dependencies] [dependencies]
codspeed-criterion-compat = { workspace = true, default-features = false, optional = true } codspeed-criterion-compat = { workspace = true, default-features = false, optional = true }
criterion = { workspace = true, default-features = false } criterion = { workspace = true, default-features = false }
once_cell = { workspace = true }
rayon = { workspace = true } rayon = { workspace = true }
rustc-hash = { workspace = true } rustc-hash = { workspace = true }
serde = { workspace = true } serde = { workspace = true }

View file

@ -82,7 +82,7 @@ impl TestFile {
} }
} }
static TARGET_DIR: once_cell::sync::Lazy<PathBuf> = once_cell::sync::Lazy::new(|| { static TARGET_DIR: std::sync::LazyLock<PathBuf> = std::sync::LazyLock::new(|| {
cargo_target_directory().unwrap_or_else(|| PathBuf::from("target")) cargo_target_directory().unwrap_or_else(|| PathBuf::from("target"))
}); });

View file

@ -20,7 +20,6 @@ ruff_python_parser = { workspace = true }
anyhow = { workspace = true } anyhow = { workspace = true }
clap = { workspace = true, optional = true } clap = { workspace = true, optional = true }
once_cell = { workspace = true }
salsa = { workspace = true } salsa = { workspace = true }
schemars = { workspace = true, optional = true } schemars = { workspace = true, optional = true }
serde = { workspace = true, optional = true } serde = { workspace = true, optional = true }

View file

@ -7,12 +7,11 @@ use ruff_db::system::{OsSystem, System, SystemPathBuf};
use ruff_db::vendored::{VendoredFileSystem, VendoredFileSystemBuilder}; use ruff_db::vendored::{VendoredFileSystem, VendoredFileSystemBuilder};
use ruff_db::{Db as SourceDb, Upcast}; use ruff_db::{Db as SourceDb, Upcast};
static EMPTY_VENDORED: once_cell::sync::Lazy<VendoredFileSystem> = static EMPTY_VENDORED: std::sync::LazyLock<VendoredFileSystem> = std::sync::LazyLock::new(|| {
once_cell::sync::Lazy::new(|| { let mut builder = VendoredFileSystemBuilder::new(CompressionMethod::Stored);
let mut builder = VendoredFileSystemBuilder::new(CompressionMethod::Stored); builder.add_file("stdlib/VERSIONS", "\n").unwrap();
builder.add_file("stdlib/VERSIONS", "\n").unwrap(); builder.finish().unwrap()
builder.finish().unwrap() });
});
#[salsa::db] #[salsa::db]
#[derive(Default)] #[derive(Default)]

View file

@ -46,10 +46,9 @@ libcst = { workspace = true }
log = { workspace = true } log = { workspace = true }
memchr = { workspace = true } memchr = { workspace = true }
natord = { workspace = true } natord = { workspace = true }
once_cell = { workspace = true }
path-absolutize = { workspace = true, features = [ path-absolutize = { workspace = true, features = [
"once_cell_cache", "once_cell_cache",
"use_unix_paths_on_wasm", "use_unix_paths_on_wasm",
] } ] }
pathdiff = { workspace = true } pathdiff = { workspace = true }
pep440_rs = { workspace = true, features = ["serde"] } pep440_rs = { workspace = true, features = ["serde"] }

View file

@ -1,4 +1,5 @@
use std::borrow::Cow; use std::borrow::Cow;
use std::cell::LazyCell;
use std::ops::Deref; use std::ops::Deref;
use std::path::Path; use std::path::Path;
@ -440,7 +441,7 @@ fn diagnostics_to_messages(
locator: &Locator, locator: &Locator,
directives: &Directives, directives: &Directives,
) -> Vec<Message> { ) -> Vec<Message> {
let file = once_cell::unsync::Lazy::new(|| { let file = LazyCell::new(|| {
let mut builder = let mut builder =
SourceFileBuilder::new(path.to_string_lossy().as_ref(), locator.contents()); SourceFileBuilder::new(path.to_string_lossy().as_ref(), locator.contents());

View file

@ -1,12 +1,11 @@
use std::fmt::{Display, Formatter, Write}; use std::fmt::{Display, Formatter, Write};
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use std::sync::Mutex; use std::sync::{LazyLock, Mutex};
use anyhow::Result; use anyhow::Result;
use colored::Colorize; use colored::Colorize;
use fern; use fern;
use log::Level; use log::Level;
use once_cell::sync::Lazy;
use ruff_python_parser::{ParseError, ParseErrorType}; use ruff_python_parser::{ParseError, ParseErrorType};
use rustc_hash::FxHashSet; use rustc_hash::FxHashSet;
@ -16,7 +15,7 @@ use crate::fs;
use crate::source_kind::SourceKind; use crate::source_kind::SourceKind;
use ruff_notebook::Notebook; use ruff_notebook::Notebook;
pub static IDENTIFIERS: Lazy<Mutex<Vec<&'static str>>> = Lazy::new(Mutex::default); pub static IDENTIFIERS: LazyLock<Mutex<Vec<&'static str>>> = LazyLock::new(Mutex::default);
/// Warn a user once, with uniqueness determined by the given ID. /// Warn a user once, with uniqueness determined by the given ID.
#[macro_export] #[macro_export]
@ -35,7 +34,7 @@ macro_rules! warn_user_once_by_id {
}; };
} }
pub static MESSAGES: Lazy<Mutex<FxHashSet<String>>> = Lazy::new(Mutex::default); pub static MESSAGES: LazyLock<Mutex<FxHashSet<String>>> = LazyLock::new(Mutex::default);
/// Warn a user once, if warnings are enabled, with uniqueness determined by the content of the /// Warn a user once, if warnings are enabled, with uniqueness determined by the content of the
/// message. /// message.

View file

@ -1,6 +1,5 @@
use std::collections::HashMap; use std::collections::HashMap;
use std::sync::LazyLock;
use once_cell::sync::Lazy;
/// Returns the redirect target for the given code. /// Returns the redirect target for the given code.
pub(crate) fn get_redirect_target(code: &str) -> Option<&'static str> { pub(crate) fn get_redirect_target(code: &str) -> Option<&'static str> {
@ -13,7 +12,7 @@ pub(crate) fn get_redirect(code: &str) -> Option<(&'static str, &'static str)> {
REDIRECTS.get_key_value(code).map(|(k, v)| (*k, *v)) REDIRECTS.get_key_value(code).map(|(k, v)| (*k, *v))
} }
static REDIRECTS: Lazy<HashMap<&'static str, &'static str>> = Lazy::new(|| { static REDIRECTS: LazyLock<HashMap<&'static str, &'static str>> = LazyLock::new(|| {
HashMap::from_iter([ HashMap::from_iter([
// The following are here because we don't yet have the many-to-one mapping enabled. // The following are here because we don't yet have the many-to-one mapping enabled.
("SIM111", "SIM110"), ("SIM111", "SIM110"),

View file

@ -1,29 +1,28 @@
/// See: [eradicate.py](https://github.com/myint/eradicate/blob/98f199940979c94447a461d50d27862b118b282d/eradicate.py) /// See: [eradicate.py](https://github.com/myint/eradicate/blob/98f199940979c94447a461d50d27862b118b282d/eradicate.py)
use aho_corasick::AhoCorasick; use aho_corasick::AhoCorasick;
use itertools::Itertools; use itertools::Itertools;
use once_cell::sync::Lazy;
use regex::{Regex, RegexSet}; use regex::{Regex, RegexSet};
use ruff_python_parser::parse_module; use ruff_python_parser::parse_module;
use ruff_python_trivia::{SimpleTokenKind, SimpleTokenizer}; use ruff_python_trivia::{SimpleTokenKind, SimpleTokenizer};
use ruff_text_size::TextSize; use ruff_text_size::TextSize;
use std::sync::LazyLock;
static CODE_INDICATORS: Lazy<AhoCorasick> = Lazy::new(|| { static CODE_INDICATORS: LazyLock<AhoCorasick> = LazyLock::new(|| {
AhoCorasick::new([ AhoCorasick::new([
"(", ")", "[", "]", "{", "}", ":", "=", "%", "return", "break", "continue", "import", "(", ")", "[", "]", "{", "}", ":", "=", "%", "return", "break", "continue", "import",
]) ])
.unwrap() .unwrap()
}); });
static ALLOWLIST_REGEX: Lazy<Regex> = Lazy::new(|| { static ALLOWLIST_REGEX: LazyLock<Regex> = LazyLock::new(|| {
Regex::new( Regex::new(
r"^(?i)(?:pylint|pyright|noqa|nosec|region|endregion|type:\s*ignore|fmt:\s*(on|off)|isort:\s*(on|off|skip|skip_file|split|dont-add-imports(:\s*\[.*?])?)|mypy:|SPDX-License-Identifier:|(?:en)?coding[:=][ \t]*([-_.a-zA-Z0-9]+))", r"^(?i)(?:pylint|pyright|noqa|nosec|region|endregion|type:\s*ignore|fmt:\s*(on|off)|isort:\s*(on|off|skip|skip_file|split|dont-add-imports(:\s*\[.*?])?)|mypy:|SPDX-License-Identifier:|(?:en)?coding[:=][ \t]*([-_.a-zA-Z0-9]+))",
).unwrap() ).unwrap()
}); });
static HASH_NUMBER: Lazy<Regex> = Lazy::new(|| Regex::new(r"#\d").unwrap()); static HASH_NUMBER: LazyLock<Regex> = LazyLock::new(|| Regex::new(r"#\d").unwrap());
static POSITIVE_CASES: Lazy<RegexSet> = Lazy::new(|| { static POSITIVE_CASES: LazyLock<RegexSet> = LazyLock::new(|| {
RegexSet::new([ RegexSet::new([
// Keywords // Keywords
r"^(?:elif\s+.*\s*:.*|else\s*:.*|try\s*:.*|finally\s*:.*|except.*:.*|case\s+.*\s*:.*)$", r"^(?:elif\s+.*\s*:.*|else\s*:.*|try\s*:.*|finally\s*:.*|except.*:.*|case\s+.*\s*:.*)$",

View file

@ -1,10 +1,10 @@
use once_cell::sync::Lazy;
use regex::Regex; use regex::Regex;
use ruff_python_ast::{self as ast, Expr}; use ruff_python_ast::{self as ast, Expr};
use std::sync::LazyLock;
use ruff_python_semantic::SemanticModel; use ruff_python_semantic::SemanticModel;
static PASSWORD_CANDIDATE_REGEX: Lazy<Regex> = Lazy::new(|| { static PASSWORD_CANDIDATE_REGEX: LazyLock<Regex> = LazyLock::new(|| {
Regex::new(r"(^|_)(?i)(pas+wo?r?d|pass(phrase)?|pwd|token|secrete?)($|_)").unwrap() Regex::new(r"(^|_)(?i)(pas+wo?r?d|pass(phrase)?|pwd|token|secrete?)($|_)").unwrap()
}); });

View file

@ -1,5 +1,7 @@
use once_cell::sync::Lazy; use std::sync::LazyLock;
use regex::Regex; use regex::Regex;
use ruff_diagnostics::{Diagnostic, Violation}; use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation}; use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::str::raw_contents; use ruff_python_ast::str::raw_contents;
@ -9,7 +11,7 @@ use ruff_text_size::Ranged;
use crate::checkers::ast::Checker; use crate::checkers::ast::Checker;
static SQL_REGEX: Lazy<Regex> = Lazy::new(|| { static SQL_REGEX: LazyLock<Regex> = LazyLock::new(|| {
Regex::new(r"(?i)\b(select\s+.*\s+from\s|delete\s+from\s|(insert|replace)\s+.*\s+values\s|update\s+.*\s+set\s)") Regex::new(r"(?i)\b(select\s+.*\s+from\s|delete\s+from\s|(insert|replace)\s+.*\s+values\s|update\s+.*\s+set\s)")
.unwrap() .unwrap()
}); });

View file

@ -1,11 +1,13 @@
//! Settings for the `flake8-copyright` plugin. //! Settings for the `flake8-copyright` plugin.
use once_cell::sync::Lazy;
use regex::Regex;
use std::fmt::{Display, Formatter}; use std::fmt::{Display, Formatter};
use std::sync::LazyLock;
use regex::Regex;
use ruff_macros::CacheKey;
use crate::display_settings; use crate::display_settings;
use ruff_macros::CacheKey;
#[derive(Debug, Clone, CacheKey)] #[derive(Debug, Clone, CacheKey)]
pub struct Settings { pub struct Settings {
@ -14,8 +16,8 @@ pub struct Settings {
pub min_file_size: usize, pub min_file_size: usize,
} }
pub static COPYRIGHT: Lazy<Regex> = pub static COPYRIGHT: LazyLock<Regex> =
Lazy::new(|| Regex::new(r"(?i)Copyright\s+((?:\(C\)|©)\s+)?\d{4}((-|,\s)\d{4})*").unwrap()); LazyLock::new(|| Regex::new(r"(?i)Copyright\s+((?:\(C\)|©)\s+)?\d{4}((-|,\s)\d{4})*").unwrap());
impl Default for Settings { impl Default for Settings {
fn default() -> Self { fn default() -> Self {

View file

@ -1,7 +1,7 @@
use once_cell::sync::Lazy;
use regex::Regex; use regex::Regex;
use ruff_python_trivia::CommentRanges; use ruff_python_trivia::CommentRanges;
use ruff_source_file::Locator; use ruff_source_file::Locator;
use std::sync::LazyLock;
use ruff_diagnostics::{Diagnostic, Violation}; use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation}; use ruff_macros::{derive_message_formats, violation};
@ -49,8 +49,8 @@ pub(crate) fn type_comment_in_stub(
} }
} }
static TYPE_COMMENT_REGEX: Lazy<Regex> = static TYPE_COMMENT_REGEX: LazyLock<Regex> =
Lazy::new(|| Regex::new(r"^#\s*type:\s*([^#]+)(\s*#.*?)?$").unwrap()); LazyLock::new(|| Regex::new(r"^#\s*type:\s*([^#]+)(\s*#.*?)?$").unwrap());
static TYPE_IGNORE_REGEX: Lazy<Regex> = static TYPE_IGNORE_REGEX: LazyLock<Regex> =
Lazy::new(|| Regex::new(r"^#\s*type:\s*ignore([^#]+)?(\s*#.*?)?$").unwrap()); LazyLock::new(|| Regex::new(r"^#\s*type:\s*ignore([^#]+)?(\s*#.*?)?$").unwrap());

View file

@ -1,8 +1,8 @@
use once_cell::sync::Lazy;
use regex::RegexSet; use regex::RegexSet;
use ruff_python_trivia::CommentRanges; use ruff_python_trivia::CommentRanges;
use ruff_source_file::Locator; use ruff_source_file::Locator;
use ruff_text_size::{TextLen, TextRange, TextSize}; use ruff_text_size::{TextLen, TextRange, TextSize};
use std::sync::LazyLock;
use ruff_diagnostics::{AlwaysFixableViolation, Diagnostic, Edit, Fix, Violation}; use ruff_diagnostics::{AlwaysFixableViolation, Diagnostic, Edit, Fix, Violation};
use ruff_macros::{derive_message_formats, violation}; use ruff_macros::{derive_message_formats, violation};
@ -222,7 +222,7 @@ impl Violation for MissingSpaceAfterTodoColon {
} }
} }
static ISSUE_LINK_REGEX_SET: Lazy<RegexSet> = Lazy::new(|| { static ISSUE_LINK_REGEX_SET: LazyLock<RegexSet> = LazyLock::new(|| {
RegexSet::new([ RegexSet::new([
r"^#\s*(http|https)://.*", // issue link r"^#\s*(http|https)://.*", // issue link
r"^#\s*\d+$", // issue code - like "003" r"^#\s*\d+$", // issue code - like "003"

View file

@ -1,5 +1,5 @@
use once_cell::sync::Lazy;
use regex::Regex; use regex::Regex;
use std::sync::LazyLock;
use ruff_diagnostics::{AlwaysFixableViolation, Diagnostic, Edit, Fix}; use ruff_diagnostics::{AlwaysFixableViolation, Diagnostic, Edit, Fix};
use ruff_macros::{derive_message_formats, violation}; use ruff_macros::{derive_message_formats, violation};
@ -98,8 +98,8 @@ impl AlwaysFixableViolation for NoBlankLineAfterFunction {
} }
} }
static INNER_FUNCTION_OR_CLASS_REGEX: Lazy<Regex> = static INNER_FUNCTION_OR_CLASS_REGEX: LazyLock<Regex> =
Lazy::new(|| Regex::new(r"^\s+(?:(?:class|def|async def)\s|@)").unwrap()); LazyLock::new(|| Regex::new(r"^\s+(?:(?:class|def|async def)\s|@)").unwrap());
/// D201, D202 /// D201, D202
pub(crate) fn blank_before_after_function(checker: &mut Checker, docstring: &Docstring) { pub(crate) fn blank_before_after_function(checker: &mut Checker, docstring: &Docstring) {

View file

@ -1,5 +1,6 @@
use std::sync::LazyLock;
use imperative::Mood; use imperative::Mood;
use once_cell::sync::Lazy;
use ruff_diagnostics::{Diagnostic, Violation}; use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation}; use ruff_macros::{derive_message_formats, violation};
@ -12,7 +13,7 @@ use crate::docstrings::Docstring;
use crate::rules::pydocstyle::helpers::normalize_word; use crate::rules::pydocstyle::helpers::normalize_word;
use crate::rules::pydocstyle::settings::Settings; use crate::rules::pydocstyle::settings::Settings;
static MOOD: Lazy<Mood> = Lazy::new(Mood::new); static MOOD: LazyLock<Mood> = LazyLock::new(Mood::new);
/// ## What it does /// ## What it does
/// Checks for docstring first lines that are not in an imperative mood. /// Checks for docstring first lines that are not in an imperative mood.

View file

@ -1,7 +1,7 @@
use itertools::Itertools; use itertools::Itertools;
use once_cell::sync::Lazy;
use regex::Regex; use regex::Regex;
use rustc_hash::FxHashSet; use rustc_hash::FxHashSet;
use std::sync::LazyLock;
use ruff_diagnostics::{AlwaysFixableViolation, Violation}; use ruff_diagnostics::{AlwaysFixableViolation, Violation};
use ruff_diagnostics::{Diagnostic, Edit, Fix}; use ruff_diagnostics::{Diagnostic, Edit, Fix};
@ -1846,8 +1846,8 @@ fn missing_args(checker: &mut Checker, docstring: &Docstring, docstrings_args: &
} }
// See: `GOOGLE_ARGS_REGEX` in `pydocstyle/checker.py`. // See: `GOOGLE_ARGS_REGEX` in `pydocstyle/checker.py`.
static GOOGLE_ARGS_REGEX: Lazy<Regex> = static GOOGLE_ARGS_REGEX: LazyLock<Regex> =
Lazy::new(|| Regex::new(r"^\s*(\*?\*?\w+)\s*(\(.*?\))?\s*:(\r\n|\n)?\s*.+").unwrap()); LazyLock::new(|| Regex::new(r"^\s*(\*?\*?\w+)\s*(\(.*?\))?\s*:(\r\n|\n)?\s*.+").unwrap());
fn args_section(context: &SectionContext) -> FxHashSet<String> { fn args_section(context: &SectionContext) -> FxHashSet<String> {
let mut following_lines = context.following_lines().peekable(); let mut following_lines = context.following_lines().peekable();

View file

@ -1,7 +1,7 @@
use anyhow::{anyhow, Result}; use anyhow::{anyhow, Result};
use memchr::memchr_iter; use memchr::memchr_iter;
use once_cell::sync::Lazy;
use regex::Regex; use regex::Regex;
use std::sync::LazyLock;
use ruff_diagnostics::{Diagnostic, Violation}; use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation}; use ruff_macros::{derive_message_formats, violation};
@ -102,8 +102,8 @@ pub(crate) fn blanket_type_ignore(
// Match, e.g., `[attr-defined]` or `[attr-defined, misc]`. // Match, e.g., `[attr-defined]` or `[attr-defined, misc]`.
// See: https://github.com/python/mypy/blob/b43e0d34247a6d1b3b9d9094d184bbfcb9808bb9/mypy/fastparse.py#L327 // See: https://github.com/python/mypy/blob/b43e0d34247a6d1b3b9d9094d184bbfcb9808bb9/mypy/fastparse.py#L327
static TYPE_IGNORE_TAG_PATTERN: Lazy<Regex> = static TYPE_IGNORE_TAG_PATTERN: LazyLock<Regex> =
Lazy::new(|| Regex::new(r"^\s*\[(?P<codes>[^]#]*)]\s*(#.*)?$").unwrap()); LazyLock::new(|| Regex::new(r"^\s*\[(?P<codes>[^]#]*)]\s*(#.*)?$").unwrap());
/// Parse the optional `[...]` tag in a `# type: ignore[...]` comment. /// Parse the optional `[...]` tag in a `# type: ignore[...]` comment.
/// ///

View file

@ -1,8 +1,9 @@
use once_cell::sync::Lazy;
use regex::{Captures, Regex}; use regex::{Captures, Regex};
use std::borrow::Cow; use std::borrow::Cow;
use std::sync::LazyLock;
static CURLY_BRACES: Lazy<Regex> = Lazy::new(|| Regex::new(r"(\\N\{[^}]+})|([{}])").unwrap()); static CURLY_BRACES: LazyLock<Regex> =
LazyLock::new(|| Regex::new(r"(\\N\{[^}]+})|([{}])").unwrap());
pub(super) fn curly_escape(text: &str) -> Cow<'_, str> { pub(super) fn curly_escape(text: &str) -> Cow<'_, str> {
// Match all curly braces. This will include named unicode escapes (like // Match all curly braces. This will include named unicode escapes (like
@ -20,7 +21,8 @@ pub(super) fn curly_escape(text: &str) -> Cow<'_, str> {
}) })
} }
static DOUBLE_CURLY_BRACES: Lazy<Regex> = Lazy::new(|| Regex::new(r"((\{\{)|(\}\}))").unwrap()); static DOUBLE_CURLY_BRACES: LazyLock<Regex> =
LazyLock::new(|| Regex::new(r"((\{\{)|(\}\}))").unwrap());
pub(super) fn curly_unescape(text: &str) -> Cow<'_, str> { pub(super) fn curly_unescape(text: &str) -> Cow<'_, str> {
// Match all double curly braces and replace with a single // Match all double curly braces and replace with a single

View file

@ -1,6 +1,6 @@
use once_cell::sync::Lazy;
use ruff_python_ast::{self as ast, Expr}; use ruff_python_ast::{self as ast, Expr};
use rustc_hash::FxHashMap; use rustc_hash::FxHashMap;
use std::sync::LazyLock;
use ruff_diagnostics::{AlwaysFixableViolation, Diagnostic, Edit, Fix}; use ruff_diagnostics::{AlwaysFixableViolation, Diagnostic, Edit, Fix};
use ruff_macros::{derive_message_formats, violation}; use ruff_macros::{derive_message_formats, violation};
@ -57,7 +57,7 @@ impl AlwaysFixableViolation for DeprecatedUnittestAlias {
} }
} }
static DEPRECATED_ALIASES: Lazy<FxHashMap<&'static str, &'static str>> = Lazy::new(|| { static DEPRECATED_ALIASES: LazyLock<FxHashMap<&'static str, &'static str>> = LazyLock::new(|| {
FxHashMap::from_iter([ FxHashMap::from_iter([
("assertAlmostEquals", "assertAlmostEqual"), ("assertAlmostEquals", "assertAlmostEqual"),
("assertEquals", "assertEqual"), ("assertEquals", "assertEqual"),

View file

@ -1,7 +1,7 @@
use anyhow::{anyhow, Result}; use anyhow::{anyhow, Result};
use libcst_native::{Arg, Expression}; use libcst_native::{Arg, Expression};
use once_cell::sync::Lazy;
use regex::Regex; use regex::Regex;
use std::sync::LazyLock;
use ruff_diagnostics::{Diagnostic, Edit, Fix, FixAvailability, Violation}; use ruff_diagnostics::{Diagnostic, Edit, Fix, FixAvailability, Violation};
use ruff_macros::{derive_message_formats, violation}; use ruff_macros::{derive_message_formats, violation};
@ -127,8 +127,8 @@ fn is_sequential(indices: &[usize]) -> bool {
// An opening curly brace, followed by any integer, followed by any text, // An opening curly brace, followed by any integer, followed by any text,
// followed by a closing brace. // followed by a closing brace.
static FORMAT_SPECIFIER: Lazy<Regex> = static FORMAT_SPECIFIER: LazyLock<Regex> =
Lazy::new(|| Regex::new(r"\{(?P<int>\d+)(?P<fmt>.*?)}").unwrap()); LazyLock::new(|| Regex::new(r"\{(?P<int>\d+)(?P<fmt>.*?)}").unwrap());
/// Remove the explicit positional indices from a format string. /// Remove the explicit positional indices from a format string.
fn remove_specifiers<'a>(value: &mut Expression<'a>, arena: &'a typed_arena::Arena<String>) { fn remove_specifiers<'a>(value: &mut Expression<'a>, arena: &'a typed_arena::Arena<String>) {

View file

@ -1,5 +1,5 @@
use once_cell::sync::Lazy;
use regex::Regex; use regex::Regex;
use std::sync::LazyLock;
use ruff_diagnostics::{AlwaysFixableViolation, Diagnostic, Edit, Fix}; use ruff_diagnostics::{AlwaysFixableViolation, Diagnostic, Edit, Fix};
use ruff_macros::{derive_message_formats, violation}; use ruff_macros::{derive_message_formats, violation};
@ -42,8 +42,8 @@ impl AlwaysFixableViolation for UTF8EncodingDeclaration {
} }
// Regex from PEP263. // Regex from PEP263.
static CODING_COMMENT_REGEX: Lazy<Regex> = static CODING_COMMENT_REGEX: LazyLock<Regex> =
Lazy::new(|| Regex::new(r"^[ \t\f]*#.*?coding[:=][ \t]*utf-?8").unwrap()); LazyLock::new(|| Regex::new(r"^[ \t\f]*#.*?coding[:=][ \t]*utf-?8").unwrap());
/// UP009 /// UP009
pub(crate) fn unnecessary_coding_comment( pub(crate) fn unnecessary_coding_comment(

View file

@ -2,13 +2,12 @@
//! command-line options. Structure is optimized for internal usage, as opposed //! command-line options. Structure is optimized for internal usage, as opposed
//! to external visibility or parsing. //! to external visibility or parsing.
use std::fmt::{Display, Formatter};
use std::path::{Path, PathBuf};
use once_cell::sync::Lazy;
use path_absolutize::path_dedot; use path_absolutize::path_dedot;
use regex::Regex; use regex::Regex;
use rustc_hash::FxHashSet; use rustc_hash::FxHashSet;
use std::fmt::{Display, Formatter};
use std::path::{Path, PathBuf};
use std::sync::LazyLock;
use crate::codes::RuleCodePrefix; use crate::codes::RuleCodePrefix;
use ruff_macros::CacheKey; use ruff_macros::CacheKey;
@ -355,8 +354,8 @@ pub const DEFAULT_SELECTORS: &[RuleSelector] = &[
pub const TASK_TAGS: &[&str] = &["TODO", "FIXME", "XXX"]; pub const TASK_TAGS: &[&str] = &["TODO", "FIXME", "XXX"];
pub static DUMMY_VARIABLE_RGX: Lazy<Regex> = pub static DUMMY_VARIABLE_RGX: LazyLock<Regex> =
Lazy::new(|| Regex::new("^(_+|(_+[a-zA-Z0-9_]*[a-zA-Z0-9]+?))$").unwrap()); LazyLock::new(|| Regex::new("^(_+|(_+[a-zA-Z0-9_]*[a-zA-Z0-9]+?))$").unwrap());
impl LinterSettings { impl LinterSettings {
pub fn for_rule(rule_code: Rule) -> Self { pub fn for_rule(rule_code: Rule) -> Self {

View file

@ -20,7 +20,6 @@ ruff_text_size = { workspace = true }
anyhow = { workspace = true } anyhow = { workspace = true }
itertools = { workspace = true } itertools = { workspace = true }
once_cell = { workspace = true }
serde = { workspace = true } serde = { workspace = true }
serde_json = { workspace = true } serde_json = { workspace = true }
serde_with = { workspace = true, default-features = false, features = ["macros"] } serde_with = { workspace = true, default-features = false, features = ["macros"] }

View file

@ -1,15 +1,14 @@
use itertools::Itertools;
use rand::{Rng, SeedableRng};
use serde::Serialize;
use serde_json::error::Category;
use std::cmp::Ordering; use std::cmp::Ordering;
use std::collections::HashSet; use std::collections::HashSet;
use std::fs::File; use std::fs::File;
use std::io::{BufReader, Cursor, Read, Seek, SeekFrom, Write}; use std::io::{BufReader, Cursor, Read, Seek, SeekFrom, Write};
use std::path::Path; use std::path::Path;
use std::sync::OnceLock;
use std::{io, iter}; use std::{io, iter};
use itertools::Itertools;
use once_cell::sync::OnceCell;
use rand::{Rng, SeedableRng};
use serde::Serialize;
use serde_json::error::Category;
use thiserror::Error; use thiserror::Error;
use ruff_diagnostics::{SourceMap, SourceMarker}; use ruff_diagnostics::{SourceMap, SourceMarker};
@ -63,7 +62,7 @@ pub struct Notebook {
source_code: String, source_code: String,
/// The index of the notebook. This is used to map between the concatenated /// The index of the notebook. This is used to map between the concatenated
/// source code and the original notebook. /// source code and the original notebook.
index: OnceCell<NotebookIndex>, index: OnceLock<NotebookIndex>,
/// The raw notebook i.e., the deserialized version of JSON string. /// The raw notebook i.e., the deserialized version of JSON string.
raw: RawNotebook, raw: RawNotebook,
/// The offsets of each cell in the concatenated source code. This includes /// The offsets of each cell in the concatenated source code. This includes
@ -194,7 +193,7 @@ impl Notebook {
Ok(Self { Ok(Self {
raw: raw_notebook, raw: raw_notebook,
index: OnceCell::new(), index: OnceLock::new(),
// The additional newline at the end is to maintain consistency for // The additional newline at the end is to maintain consistency for
// all cells. These newlines will be removed before updating the // all cells. These newlines will be removed before updating the
// source code with the transformed content. Refer `update_cell_content`. // source code with the transformed content. Refer `update_cell_content`.

View file

@ -23,7 +23,6 @@ aho-corasick = { workspace = true }
bitflags = { workspace = true } bitflags = { workspace = true }
is-macro = { workspace = true } is-macro = { workspace = true }
itertools = { workspace = true } itertools = { workspace = true }
once_cell = { workspace = true }
rustc-hash = { workspace = true } rustc-hash = { workspace = true }
schemars = { workspace = true, optional = true } schemars = { workspace = true, optional = true }
serde = { workspace = true, optional = true } serde = { workspace = true, optional = true }

View file

@ -1,7 +1,6 @@
use std::fmt;
use aho_corasick::{AhoCorasick, AhoCorasickKind, Anchored, Input, MatchKind, StartKind}; use aho_corasick::{AhoCorasick, AhoCorasickKind, Anchored, Input, MatchKind, StartKind};
use once_cell::sync::Lazy; use std::fmt;
use std::sync::LazyLock;
use ruff_text_size::{TextLen, TextRange}; use ruff_text_size::{TextLen, TextRange};
@ -205,7 +204,7 @@ pub fn raw_contents_range(contents: &str) -> Option<TextRange> {
} }
/// An [`AhoCorasick`] matcher for string and byte literal prefixes. /// An [`AhoCorasick`] matcher for string and byte literal prefixes.
static PREFIX_MATCHER: Lazy<AhoCorasick> = Lazy::new(|| { static PREFIX_MATCHER: LazyLock<AhoCorasick> = LazyLock::new(|| {
AhoCorasick::builder() AhoCorasick::builder()
.start_kind(StartKind::Anchored) .start_kind(StartKind::Anchored)
.match_kind(MatchKind::LeftmostLongest) .match_kind(MatchKind::LeftmostLongest)

View file

@ -20,7 +20,6 @@ ruff_python_parser = { workspace = true }
ruff_source_file = { workspace = true } ruff_source_file = { workspace = true }
ruff_text_size = { workspace = true } ruff_text_size = { workspace = true }
once_cell = { workspace = true }
[lints] [lints]
workspace = true workspace = true

View file

@ -1,9 +1,8 @@
//! Detect code style from Python source code. //! Detect code style from Python source code.
use std::cell::OnceCell;
use std::ops::Deref; use std::ops::Deref;
use once_cell::unsync::OnceCell;
use ruff_python_ast::str::Quote; use ruff_python_ast::str::Quote;
use ruff_python_parser::{Token, TokenKind, Tokens}; use ruff_python_parser::{Token, TokenKind, Tokens};
use ruff_source_file::{find_newline, LineEnding, Locator}; use ruff_source_file::{find_newline, LineEnding, Locator};

View file

@ -28,7 +28,6 @@ clap = { workspace = true }
countme = { workspace = true } countme = { workspace = true }
itertools = { workspace = true } itertools = { workspace = true }
memchr = { workspace = true } memchr = { workspace = true }
once_cell = { workspace = true }
regex = { workspace = true } regex = { workspace = true }
rustc-hash = { workspace = true } rustc-hash = { workspace = true }
serde = { workspace = true, optional = true } serde = { workspace = true, optional = true }

View file

@ -2,15 +2,15 @@
// "reStructuredText." // "reStructuredText."
#![allow(clippy::doc_markdown)] #![allow(clippy::doc_markdown)]
use itertools::Itertools;
use std::cmp::Ordering; use std::cmp::Ordering;
use std::sync::LazyLock;
use std::{borrow::Cow, collections::VecDeque}; use std::{borrow::Cow, collections::VecDeque};
use itertools::Itertools; use regex::Regex;
use ruff_formatter::printer::SourceMapGeneration; use ruff_formatter::printer::SourceMapGeneration;
use ruff_python_ast::{str::Quote, StringFlags}; use ruff_python_ast::{str::Quote, StringFlags};
use ruff_python_trivia::CommentRanges; use ruff_python_trivia::CommentRanges;
use {once_cell::sync::Lazy, regex::Regex};
use { use {
ruff_formatter::{write, FormatOptions, IndentStyle, LineWidth, Printed}, ruff_formatter::{write, FormatOptions, IndentStyle, LineWidth, Printed},
ruff_python_trivia::{is_python_whitespace, PythonWhitespace}, ruff_python_trivia::{is_python_whitespace, PythonWhitespace},
@ -1075,7 +1075,7 @@ impl<'src> CodeExampleRst<'src> {
// [directives]: https://docutils.sourceforge.io/docs/ref/rst/restructuredtext.html#directives // [directives]: https://docutils.sourceforge.io/docs/ref/rst/restructuredtext.html#directives
// [Pygments lexer names]: https://pygments.org/docs/lexers/ // [Pygments lexer names]: https://pygments.org/docs/lexers/
// [code-block]: https://www.sphinx-doc.org/en/master/usage/restructuredtext/directives.html#directive-code-block // [code-block]: https://www.sphinx-doc.org/en/master/usage/restructuredtext/directives.html#directive-code-block
static DIRECTIVE_START: Lazy<Regex> = Lazy::new(|| { static DIRECTIVE_START: LazyLock<Regex> = LazyLock::new(|| {
Regex::new( Regex::new(
r"(?m)^\s*\.\. \s*(?i:code-block|sourcecode)::\s*(?i:python|py|python3|py3)$", r"(?m)^\s*\.\. \s*(?i:code-block|sourcecode)::\s*(?i:python|py|python3|py3)$",
) )
@ -1320,7 +1320,7 @@ impl<'src> CodeExampleMarkdown<'src> {
/// ///
/// [fenced code block]: https://spec.commonmark.org/0.30/#fenced-code-blocks /// [fenced code block]: https://spec.commonmark.org/0.30/#fenced-code-blocks
fn new(original: InputDocstringLine<'src>) -> Option<CodeExampleMarkdown<'src>> { fn new(original: InputDocstringLine<'src>) -> Option<CodeExampleMarkdown<'src>> {
static FENCE_START: Lazy<Regex> = Lazy::new(|| { static FENCE_START: LazyLock<Regex> = LazyLock::new(|| {
Regex::new( Regex::new(
r"(?xm) r"(?xm)
^ ^

View file

@ -1,6 +1,6 @@
use std::sync::LazyLock;
use { use {
itertools::Either::{Left, Right}, itertools::Either::{Left, Right},
once_cell::sync::Lazy,
regex::Regex, regex::Regex,
}; };
@ -60,7 +60,7 @@ impl Transformer for Normalizer {
} }
fn visit_string_literal(&self, string_literal: &mut ast::StringLiteral) { fn visit_string_literal(&self, string_literal: &mut ast::StringLiteral) {
static STRIP_DOC_TESTS: Lazy<Regex> = Lazy::new(|| { static STRIP_DOC_TESTS: LazyLock<Regex> = LazyLock::new(|| {
Regex::new( Regex::new(
r"(?mx) r"(?mx)
( (
@ -75,14 +75,14 @@ impl Transformer for Normalizer {
) )
.unwrap() .unwrap()
}); });
static STRIP_RST_BLOCKS: Lazy<Regex> = Lazy::new(|| { static STRIP_RST_BLOCKS: LazyLock<Regex> = LazyLock::new(|| {
// This is kind of unfortunate, but it's pretty tricky (likely // This is kind of unfortunate, but it's pretty tricky (likely
// impossible) to detect a reStructuredText block with a simple // impossible) to detect a reStructuredText block with a simple
// regex. So we just look for the start of a block and remove // regex. So we just look for the start of a block and remove
// everything after it. Talk about a hammer. // everything after it. Talk about a hammer.
Regex::new(r"::(?s:.*)").unwrap() Regex::new(r"::(?s:.*)").unwrap()
}); });
static STRIP_MARKDOWN_BLOCKS: Lazy<Regex> = Lazy::new(|| { static STRIP_MARKDOWN_BLOCKS: LazyLock<Regex> = LazyLock::new(|| {
// This covers more than valid Markdown blocks, but that's OK. // This covers more than valid Markdown blocks, but that's OK.
Regex::new(r"(```|~~~)\p{any}*(```|~~~|$)").unwrap() Regex::new(r"(```|~~~)\p{any}*(```|~~~|$)").unwrap()
}); });

View file

@ -16,7 +16,6 @@ license = { workspace = true }
ruff_text_size = { workspace = true } ruff_text_size = { workspace = true }
memchr = { workspace = true } memchr = { workspace = true }
once_cell = { workspace = true }
serde = { workspace = true, optional = true } serde = { workspace = true, optional = true }
[dev-dependencies] [dev-dependencies]

View file

@ -1,6 +1,6 @@
use std::cmp::Ordering; use std::cmp::Ordering;
use std::fmt::{Debug, Display, Formatter}; use std::fmt::{Debug, Display, Formatter};
use std::sync::Arc; use std::sync::{Arc, OnceLock};
#[cfg(feature = "serde")] #[cfg(feature = "serde")]
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
@ -134,9 +134,9 @@ impl SourceFileBuilder {
/// Consumes `self` and returns the [`SourceFile`]. /// Consumes `self` and returns the [`SourceFile`].
pub fn finish(self) -> SourceFile { pub fn finish(self) -> SourceFile {
let index = if let Some(index) = self.index { let index = if let Some(index) = self.index {
once_cell::sync::OnceCell::with_value(index) OnceLock::from(index)
} else { } else {
once_cell::sync::OnceCell::new() OnceLock::new()
}; };
SourceFile { SourceFile {
@ -218,7 +218,7 @@ impl Ord for SourceFile {
struct SourceFileInner { struct SourceFileInner {
name: Box<str>, name: Box<str>,
code: Box<str>, code: Box<str>,
line_index: once_cell::sync::OnceCell<LineIndex>, line_index: OnceLock<LineIndex>,
} }
impl PartialEq for SourceFileInner { impl PartialEq for SourceFileInner {

View file

@ -1,10 +1,9 @@
//! Struct used to efficiently slice source code at (row, column) Locations. //! Struct used to efficiently slice source code at (row, column) Locations.
use std::ops::Add;
use memchr::{memchr2, memrchr2}; use memchr::{memchr2, memrchr2};
use once_cell::unsync::OnceCell;
use ruff_text_size::{Ranged, TextLen, TextRange, TextSize}; use ruff_text_size::{Ranged, TextLen, TextRange, TextSize};
use std::cell::OnceCell;
use std::ops::Add;
use crate::newlines::find_newline; use crate::newlines::find_newline;
use crate::{LineIndex, OneIndexed, SourceCode, SourceLocation}; use crate::{LineIndex, OneIndexed, SourceCode, SourceLocation};
@ -23,10 +22,10 @@ impl<'a> Locator<'a> {
} }
} }
pub const fn with_index(contents: &'a str, index: LineIndex) -> Self { pub fn with_index(contents: &'a str, index: LineIndex) -> Self {
Self { Self {
contents, contents,
index: OnceCell::with_value(index), index: OnceCell::from(index),
} }
} }