fix(formatting): ran rustfmt with nightly features to organize imports

This commit is contained in:
Elijah Potter 2024-11-07 19:15:24 -07:00
parent 003177df3a
commit bffac10ebf
37 changed files with 68 additions and 75 deletions

View file

@ -3,14 +3,12 @@ use std::collections::VecDeque;
use std::fmt::Display;
use harper_data::{
FatToken, Lrc, NumberSuffix, Punctuation, Token, TokenKind, TokenStringExt, VecExt,
FatToken, Lrc, NumberSuffix, Punctuation, Span, Token, TokenKind, TokenStringExt, VecExt,
};
use paste::paste;
use harper_data::Span;
use harper_parsing::{Markdown, Parser, PlainEnglish};
use harper_patterns::{PatternExt, RepeatingPattern, SequencePattern};
use harper_spell::{Dictionary, FstDictionary};
use paste::paste;
/// A document containing some amount of lexed and parsed English text.
#[derive(Debug, Clone)]
@ -544,10 +542,10 @@ impl Display for Document {
#[cfg(test)]
mod tests {
use harper_data::Span;
use itertools::Itertools;
use super::Document;
use harper_data::Span;
fn assert_condensed_contractions(text: &str, final_tok_count: usize) {
let document = Document::new_plain_english_curated(text);

View file

@ -1,10 +1,10 @@
use std::collections::VecDeque;
use harper_data::{Lrc, Span, Token, TokenKind, VecExt};
use harper_patterns::{PatternExt, SequencePattern};
use itertools::Itertools;
use super::Parser;
use harper_patterns::{PatternExt, SequencePattern};
/// A parser that wraps any other parser to collapse token strings that match
/// the pattern `word_word` or `word-word`.
@ -52,9 +52,10 @@ impl Parser for CollapseIdentifiers {
#[cfg(test)]
mod tests {
use super::*;
use harper_parsing::{PlainEnglish, StrParser};
use super::*;
#[test]
fn matches_kebab() {
let source: Vec<_> = "kebab-case".chars().collect();

View file

@ -3,9 +3,9 @@ use harper_parsing::{Markdown, Parser};
/// A Harper parser for Git commit files.
///
/// In this crate since the only place it's needed at the moment is the Gestalt parser.
/// If it needs to be used _without_ the rest of the Gestalt parser, feel free to move it to it's
/// own crate.
/// In this crate since the only place it's needed at the moment is the Gestalt
/// parser. If it needs to be used _without_ the rest of the Gestalt parser,
/// feel free to move it to it's own crate.
pub struct GitCommitParser;
impl Parser for GitCommitParser {

View file

@ -10,11 +10,13 @@ use harper_data::Token;
use harper_html::HtmlParser;
use harper_parsing::{Markdown, Parser, PlainEnglish};
/// A [`Parser`](harper_parsing::Parser) that combines a variety of parsers to singlehandedly
/// support a significant variety of programming languages and file formats.
/// A [`Parser`](harper_parsing::Parser) that combines a variety of parsers to
/// singlehandedly support a significant variety of programming languages and
/// file formats.
///
/// For now, it just allows us to provide a filetype and get a parser.
/// Eventually, we plan to support nesting (like linting the comments inside Markdown code blocks).
/// Eventually, we plan to support nesting (like linting the comments inside
/// Markdown code blocks).
pub struct GestaltParser {
inner: Box<dyn Parser>,
}
@ -55,7 +57,8 @@ impl GestaltParser {
/// Note to contributors: try to keep this in sync with
/// [`Self::new_from_language_id`].
///
/// This operates in _addition_ to the similarly named function in the [`CommentParser`].
/// This operates in _addition_ to the similarly named function in the
/// [`CommentParser`].
fn filename_to_filetype(path: &Path) -> Option<&'static str> {
Some(match path.extension()?.to_str()? {
"md" => "markdown",

View file

@ -1,8 +1,9 @@
use std::path::Path;
use harper_core::Document;
use harper_gestalt::GestaltParser;
use harper_linting::{LintGroup, LintGroupConfig, Linter};
use harper_spell::FstDictionary;
use std::path::Path;
/// Creates a unit test checking that the linting of a source file in
/// `tests_sources` produces the expected number of lints.

View file

@ -1,12 +1,11 @@
use harper_data::Token;
use harper_data::TokenStringExt;
use harper_data::{Token, TokenStringExt};
use harper_parsing::Parser;
use crate::is_likely_english;
use crate::Dictionary;
use crate::{is_likely_english, Dictionary};
/// A [`Parser`](harper_parsing::Parser) that wraps another, using heuristics to quickly redact paragraphs
/// of a document that aren't intended to be English text.
/// A [`Parser`](harper_parsing::Parser) that wraps another, using heuristics to
/// quickly redact paragraphs of a document that aren't intended to be English
/// text.
pub struct IsolateEnglish<D: Dictionary> {
inner: Box<dyn Parser>,
dict: D,
@ -40,11 +39,11 @@ impl<D: Dictionary> Parser for IsolateEnglish<D> {
#[cfg(test)]
mod tests {
use harper_data::TokenStringExt;
use harper_parsing::PlainEnglish;
use harper_spell::FstDictionary;
use super::IsolateEnglish;
use crate::Document;
use harper_parsing::PlainEnglish;
/// Assert that the provided text contains _no_ chunks of valid English
fn assert_no_english(text: &str) {

View file

@ -1,8 +1,7 @@
mod isolate_english;
use harper_data::{Token, TokenKind};
use harper_core::Document;
use harper_data::{Token, TokenKind};
use harper_spell::Dictionary;
pub use isolate_english::IsolateEnglish;
@ -56,10 +55,11 @@ pub fn is_likely_english(toks: &[Token], source: &[char], dict: &impl Dictionary
#[cfg(test)]
mod tests {
use super::is_doc_likely_english;
use harper_core::Document;
use harper_spell::FstDictionary;
use super::is_doc_likely_english;
fn assert_not_english(source: &'static str) {
let dict = FstDictionary::curated();
let doc = Document::new_plain_english(source, &dict);

View file

@ -1,10 +1,10 @@
use std::borrow::Cow;
use harper_core::Document;
use harper_data::TokenStringExt;
use itertools::Itertools;
use crate::{Lint, LintKind, Linter, Suggestion};
use harper_core::Document;
#[derive(Debug, Default)]
pub struct AnA;

View file

@ -1,7 +1,7 @@
use harper_core::Document;
use harper_data::TokenStringExt;
use crate::{Lint, LintKind, Linter};
use harper_core::Document;
#[derive(Debug, Default)]
pub struct AvoidCurses;

View file

@ -1,7 +1,7 @@
use harper_data::{Token, TokenStringExt};
use harper_patterns::{Pattern, WordPatternGroup};
use super::{Lint, LintKind, PatternLinter};
use harper_patterns::{Pattern, WordPatternGroup};
pub struct BoringWords {
pattern: Box<dyn Pattern>,

View file

@ -1,7 +1,7 @@
use harper_core::Document;
use harper_data::{NumberSuffix, Span, TokenKind, TokenStringExt};
use super::{Lint, LintKind, Linter, Suggestion};
use harper_core::Document;
/// Detect and warn that the sentence is too long.
#[derive(Debug, Clone, Copy, Default)]

View file

@ -1,8 +1,8 @@
use harper_data::{Token, TokenStringExt};
use harper_patterns::{Pattern, SequencePattern, WordPatternGroup};
use hashbrown::HashMap;
use super::{Lint, LintKind, PatternLinter, Suggestion};
use harper_patterns::{Pattern, SequencePattern, WordPatternGroup};
pub struct DotInitialisms {
pattern: Box<dyn Pattern>,

View file

@ -39,9 +39,8 @@ impl Linter for EllipsisLength {
#[cfg(test)]
mod tests {
use crate::tests::{assert_lint_count, assert_suggestion_result};
use super::EllipsisLength;
use crate::tests::{assert_lint_count, assert_suggestion_result};
#[test]
fn allows_correct_ellipsis() {

View file

@ -30,6 +30,7 @@ pub use boring_words::BoringWords;
pub use correct_number_suffix::CorrectNumberSuffix;
pub use dot_initialisms::DotInitialisms;
pub use ellipsis_length::EllipsisLength;
use harper_core::Document;
use harper_data::VecExt;
pub use linking_verbs::LinkingVerbs;
pub use lint::{Lint, LintKind, Suggestion};
@ -49,8 +50,6 @@ pub use unclosed_quotes::UnclosedQuotes;
pub use use_genitive::UseGenitive;
pub use wrong_quotes::WrongQuotes;
use harper_core::Document;
#[cfg(not(feature = "concurrent"))]
pub trait Linter {
fn lint(&mut self, document: &Document) -> Vec<Lint>;
@ -93,13 +92,12 @@ pub fn remove_overlaps(lints: &mut Vec<Lint>) {
#[cfg(test)]
mod tests {
use super::Linter;
use crate::remove_overlaps;
use crate::{LintGroup, LintGroupConfig};
use harper_core::Document;
use harper_spell::FstDictionary;
use super::Linter;
use crate::{remove_overlaps, LintGroup, LintGroupConfig};
#[test]
fn keeps_space_lint() {
let doc = Document::new_plain_english_curated("Ths tet");

View file

@ -1,7 +1,7 @@
use harper_core::Document;
use harper_data::TokenStringExt;
use super::{Lint, LintKind, Linter};
use harper_core::Document;
/// Detect and warn that the sentence is too long.
#[derive(Debug, Clone, Copy, Default)]

View file

@ -1,3 +1,5 @@
use harper_core::Document;
use harper_spell::Dictionary;
use paste::paste;
use serde::{Deserialize, Serialize};
@ -22,8 +24,6 @@ use super::unclosed_quotes::UnclosedQuotes;
use super::use_genitive::UseGenitive;
use super::wrong_quotes::WrongQuotes;
use super::{Lint, Linter};
use harper_core::Document;
use harper_spell::Dictionary;
macro_rules! create_lint_group_config {
($($linter:ident => $default:expr),*) => {

View file

@ -1,7 +1,7 @@
use harper_core::Document;
use harper_data::{Span, TokenStringExt};
use super::{Lint, LintKind, Linter};
use harper_core::Document;
/// Detect and warn that the sentence is too long.
#[derive(Debug, Clone, Copy, Default)]

View file

@ -1,11 +1,10 @@
use harper_data::{Lrc, Token, TokenStringExt};
use harper_patterns::{Pattern, SequencePattern};
use hashbrown::HashSet;
use super::pattern_linter::PatternLinter;
use super::Suggestion;
use crate::Lint;
use crate::LintKind;
use harper_patterns::{Pattern, SequencePattern};
use crate::{Lint, LintKind};
/// Linter that checks if multiple pronouns are being used right after each
/// other. This is a common mistake to make during the revision process.

View file

@ -1,7 +1,7 @@
use harper_core::Document;
use harper_data::{Span, TokenKind, TokenStringExt};
use super::{Lint, LintKind, Linter, Suggestion};
use harper_core::Document;
/// Detect and warn that the sentence is too long.
#[derive(Debug, Clone, Copy, Default)]

View file

@ -1,7 +1,7 @@
use harper_data::{Token, TokenStringExt};
use harper_patterns::Pattern;
use super::{Lint, Linter};
use harper_patterns::Pattern;
#[cfg(not(feature = "concurrent"))]
pub trait PatternLinter {

View file

@ -1,7 +1,7 @@
use harper_data::{Token, TokenStringExt};
use harper_patterns::{Pattern, SequencePattern, WordPatternGroup};
use super::{Lint, LintKind, PatternLinter, Suggestion};
use harper_patterns::{Pattern, SequencePattern, WordPatternGroup};
pub struct RepeatedWords {
pattern: Box<dyn Pattern>,

View file

@ -1,9 +1,9 @@
use harper_core::Document;
use harper_data::TokenStringExt;
use itertools::Itertools;
use super::lint::Suggestion;
use super::{Lint, LintKind, Linter};
use harper_core::Document;
#[derive(Debug, Clone, Copy, Default)]
pub struct SentenceCapitalization;

View file

@ -1,7 +1,7 @@
use harper_core::Document;
use harper_data::{Token, TokenKind, TokenStringExt};
use super::{Lint, LintKind, Linter, Suggestion};
use harper_core::Document;
#[derive(Debug, Default)]
pub struct Spaces;

View file

@ -1,11 +1,11 @@
use harper_core::Document;
use harper_data::{CharString, TokenStringExt};
use harper_spell::{suggest_correct_spelling, Dictionary};
use hashbrown::HashMap;
use smallvec::ToSmallVec;
use super::lint::Suggestion;
use super::{Lint, LintKind, Linter};
use harper_core::Document;
use harper_spell::{suggest_correct_spelling, Dictionary};
pub struct SpellCheck<T>
where

View file

@ -1,8 +1,7 @@
use harper_core::Document;
use harper_data::TokenStringExt;
use crate::Lint;
use crate::{LintKind, Linter, Suggestion};
use harper_core::Document;
use crate::{Lint, LintKind, Linter, Suggestion};
/// Linter that checks to make sure small integers (< one hundred) are spelled
/// out.

View file

@ -1,7 +1,7 @@
use harper_data::{Lrc, Token};
use harper_patterns::{ConsumesRemainingPattern, Pattern, SequencePattern};
use super::{Lint, LintKind, PatternLinter};
use harper_patterns::{ConsumesRemainingPattern, Pattern, SequencePattern};
pub struct TerminatingConjunctions {
pattern: Box<dyn Pattern>,

View file

@ -1,7 +1,7 @@
use harper_core::Document;
use harper_data::{Punctuation, Quote, TokenKind};
use super::{Lint, LintKind, Linter};
use harper_core::Document;
#[derive(Debug, Clone, Copy, Default)]
pub struct UnclosedQuotes;

View file

@ -1,9 +1,8 @@
use harper_data::{Lrc, Token};
use crate::Lint;
use crate::{LintKind, PatternLinter, Suggestion};
use harper_patterns::{EitherPattern, Pattern, SequencePattern, WordPatternGroup};
use crate::{Lint, LintKind, PatternLinter, Suggestion};
// Looks for places where the genitive case _isn't_ being used, and should be.
pub struct UseGenitive {
pattern: Box<dyn Pattern>,

View file

@ -1,7 +1,7 @@
use harper_core::Document;
use harper_data::{Token, TokenStringExt};
use super::{Lint, Linter, Suggestion};
use harper_core::Document;
#[derive(Debug, Clone, Copy, Default)]
pub struct WrongQuotes;

View file

@ -2,11 +2,10 @@ mod email_address;
mod hostname;
mod url;
use harper_data::{CharExt, Punctuation, Quote, TokenKind};
use harper_data::{CharExt, Punctuation, Quote, TokenKind, WordMetadata};
use url::lex_url;
use self::email_address::lex_email_address;
use harper_data::WordMetadata;
#[derive(Debug)]
pub struct FoundToken {

View file

@ -73,9 +73,10 @@ impl Mask {
#[cfg(test)]
mod tests {
use crate::Mask;
use harper_data::Span;
use crate::Mask;
#[test]
fn bumps_existing() {
let mut mask = Mask::new_blank();

View file

@ -33,9 +33,10 @@ impl Pattern for RepeatingPattern {
#[cfg(test)]
mod tests {
use harper_parsing::{Parser, PlainEnglish};
use super::RepeatingPattern;
use crate::{AnyPattern, Pattern};
use harper_parsing::{Parser, PlainEnglish};
#[test]
fn matches_anything() {

View file

@ -138,11 +138,11 @@ impl Pattern for SequencePattern {
#[cfg(test)]
mod tests {
use harper_data::Lrc;
use harper_parsing::{Parser, PlainEnglish};
use hashbrown::HashSet;
use super::SequencePattern;
use crate::Pattern;
use harper_parsing::{Parser, PlainEnglish};
#[test]
fn matches_n_whitespace_tokens() {

View file

@ -1,9 +1,8 @@
use harper_data::Token;
use harper_data::{CharString, Token};
use hashbrown::HashMap;
use super::naive_pattern_group::NaivePatternGroup;
use super::{Pattern, SequencePattern};
use harper_data::CharString;
/// A pattern collection to look for patterns that start with a specific
/// word.

View file

@ -191,8 +191,7 @@ mod tests {
use itertools::Itertools;
use super::FstDictionary;
use crate::seq_to_normalized;
use crate::Dictionary;
use crate::{seq_to_normalized, Dictionary};
#[test]
fn fst_map_contains_all_in_full_dict() {

View file

@ -1,12 +1,12 @@
use std::borrow::Cow;
use harper_data::{CharString, CharStringExt, WordMetadata};
use itertools::{Itertools, MinMaxResult};
pub use self::dictionary::Dictionary;
pub use self::fst_dictionary::FstDictionary;
pub use self::full_dictionary::FullDictionary;
pub use self::merged_dictionary::MergedDictionary;
use harper_data::{CharString, CharStringExt, WordMetadata};
mod dictionary;
mod fst_dictionary;
@ -144,12 +144,11 @@ fn edit_distance_min_alloc(
mod tests {
use itertools::Itertools;
use crate::edit_distance_min_alloc;
use super::{
order_suggestions, seq_to_normalized, suggest_correct_spelling_str, Dictionary,
FstDictionary, FullDictionary,
};
use crate::edit_distance_min_alloc;
// A convenience method for these tests.
fn edit_distance(source: &[char], target: &[char]) -> u8 {

View file

@ -5,8 +5,7 @@ use std::sync::Mutex;
use harper_core::Document;
use harper_data::{Lrc, Span as HarperSpan};
use harper_language_detection::{is_doc_likely_english, IsolateEnglish};
use harper_linting::{remove_overlaps, Linter};
use harper_linting::{LintGroup, LintGroupConfig};
use harper_linting::{remove_overlaps, LintGroup, LintGroupConfig, Linter};
use harper_parsing::{Markdown, PlainEnglish};
use harper_spell::FstDictionary;
use once_cell::sync::Lazy;