chore: a lot of documentation cleanups

Go throgh the `doc_markdown` lint everywhere, fixing code links and some grammar.
This commit is contained in:
Yuri Astrakhan 2025-07-02 23:21:33 -04:00
parent d04d0cd987
commit 1d39e75b89
47 changed files with 147 additions and 148 deletions

View file

@ -635,7 +635,6 @@ multiple_crate_versions = "allow" # 2314
missing_errors_doc = "allow" # 1504
missing_panics_doc = "allow" # 946
must_use_candidate = "allow" # 322
doc_markdown = "allow" # 267
match_same_arms = "allow" # 212
unnecessary_semicolon = "allow" # 156
redundant_closure_for_method_calls = "allow" # 133

View file

@ -188,7 +188,7 @@ struct InputHandle<R: FdReadable> {
/// Concrete enum of recognized file types.
///
/// *Note*: `cat`-ing a directory should result in an
/// CatError::IsDirectory
/// [`CatError::IsDirectory`]
enum InputType {
Directory,
File,

View file

@ -52,7 +52,7 @@ struct Options {
/// # Arguments
///
/// * `options` - CLI options for the assigning checksum algorithm
/// * `files` - A iterator of OsStr which is a bunch of files that are using for calculating checksum
/// * `files` - A iterator of [`OsStr`] which is a bunch of files that are using for calculating checksum
#[allow(clippy::cognitive_complexity)]
fn cksum<'a, I>(mut options: Options, files: I) -> UResult<()>
where

View file

@ -543,7 +543,7 @@ pub fn path_has_prefix(p1: &Path, p2: &Path) -> io::Result<bool> {
/// copied from the provided file. Otherwise, the new directory will have the default
/// attributes for the current user.
/// - This method excludes certain permissions if ownership or special mode bits could
/// potentially change. (See `test_dir_perm_race_with_preserve_mode_and_ownership``)
/// potentially change. (See `test_dir_perm_race_with_preserve_mode_and_ownership`)
/// - The `recursive` flag determines whether parent directories should be created
/// if they do not already exist.
// we need to allow unused_variable since `options` might be unused in non unix systems

View file

@ -48,11 +48,11 @@ mod platform;
#[derive(Debug, Error)]
pub enum CpError {
/// Simple io::Error wrapper
/// Simple [`io::Error`] wrapper
#[error("{0}")]
IoErr(#[from] io::Error),
/// Wrapper for io::Error with path context
/// Wrapper for [`io::Error`] with path context
#[error("{1}: {0}")]
IoErrContext(io::Error, String),
@ -65,11 +65,11 @@ pub enum CpError {
#[error("{}", get_message("cp-error-not-all-files-copied"))]
NotAllFilesCopied,
/// Simple walkdir::Error wrapper
/// Simple [`walkdir::Error`] wrapper
#[error("{0}")]
WalkDirErr(#[from] walkdir::Error),
/// Simple std::path::StripPrefixError wrapper
/// Simple [`StripPrefixError`] wrapper
#[error(transparent)]
StripPrefixError(#[from] StripPrefixError),
@ -84,9 +84,9 @@ pub enum CpError {
#[error("{0}")]
InvalidArgument(String),
/// All standard options are included as an an implementation
/// All standard options are included as an implementation
/// path, but those that are not implemented yet should return
/// a NotImplemented error.
/// a `NotImplemented` error.
#[error("{}", get_message_with_args("cp-error-option-not-implemented", HashMap::from([("option".to_string(), 0.to_string())])))]
NotImplemented(String),
@ -931,8 +931,8 @@ impl Attributes {
}
}
/// Set the field to Preserve::NO { explicit: true } if the corresponding field
/// in other is set to Preserve::Yes { .. }.
/// Set the field to `Preserve::No { explicit: true }` if the corresponding field
/// in other is set to `Preserve::Yes { .. }`.
pub fn diff(self, other: &Self) -> Self {
fn update_preserve_field(current: Preserve, other: Preserve) -> Preserve {
if matches!(other, Preserve::Yes { .. }) {
@ -1254,7 +1254,7 @@ impl Options {
}
impl TargetType {
/// Return TargetType required for `target`.
/// Return [`TargetType`] required for `target`.
///
/// Treat target as a dir if we have multiple sources or the target
/// exists and already is a directory
@ -1875,7 +1875,7 @@ fn context_for(src: &Path, dest: &Path) -> String {
}
/// Implements a simple backup copy for the destination file .
/// if is_dest_symlink flag is set to true dest will be renamed to backup_path
/// if `is_dest_symlink` flag is set to true dest will be renamed to `backup_path`
/// TODO: for the backup, should this function be replaced by `copy_file(...)`?
fn backup_dest(dest: &Path, backup_path: &Path, is_dest_symlink: bool) -> CopyResult<PathBuf> {
if is_dest_symlink {

View file

@ -29,10 +29,10 @@ enum CloneFallback {
/// Use [`std::fs::copy`].
FSCopy,
/// Use sparse_copy
/// Use [`sparse_copy`]
SparseCopy,
/// Use sparse_copy_without_hole
/// Use [`sparse_copy_without_hole`]
SparseCopyWithoutHole,
}
@ -43,9 +43,9 @@ enum CopyMethod {
SparseCopy,
/// Use [`std::fs::copy`].
FSCopy,
/// Default (can either be sparse_copy or FSCopy)
/// Default (can either be [`CopyMethod::SparseCopy`] or [`CopyMethod::FSCopy`])
Default,
/// Use sparse_copy_without_hole
/// Use [`sparse_copy_without_hole`]
SparseCopyWithoutHole,
}
@ -124,8 +124,8 @@ fn check_sparse_detection(source: &Path) -> Result<bool, std::io::Error> {
Ok(false)
}
/// Optimized sparse_copy, doesn't create holes for large sequences of zeros in non sparse_files
/// Used when --sparse=auto
/// Optimized [`sparse_copy`] doesn't create holes for large sequences of zeros in non `sparse_files`
/// Used when `--sparse=auto`
#[cfg(any(target_os = "linux", target_os = "android"))]
fn sparse_copy_without_hole<P>(source: P, dest: P) -> std::io::Result<()>
where
@ -175,7 +175,7 @@ where
Ok(())
}
/// Perform a sparse copy from one file to another.
/// Creates a holes for large sequences of zeros in non_sparse_files, used for --sparse=always
/// Creates a holes for large sequences of zeros in `non_sparse_files`, used for `--sparse=always`
#[cfg(any(target_os = "linux", target_os = "android"))]
fn sparse_copy<P>(source: P, dest: P) -> std::io::Result<()>
where
@ -470,7 +470,7 @@ fn handle_reflink_never_sparse_never(source: &Path) -> Result<CopyDebug, std::io
}
/// Handles debug results when flags are "--reflink=auto" and "--sparse=never", files will be copied
/// through cloning them with fallback switching to std::fs::copy
/// through cloning them with fallback switching to [`std::fs::copy`]
fn handle_reflink_auto_sparse_never(source: &Path) -> Result<CopyDebug, std::io::Error> {
let mut copy_debug = CopyDebug {
offload: OffloadReflinkDebug::Unknown,

View file

@ -16,7 +16,7 @@ pub struct SplitName {
}
impl SplitName {
/// Creates a new SplitName with the given user-defined options:
/// Creates a new [`SplitName`] with the given user-defined options:
/// - `prefix_opt` specifies a prefix for all splits.
/// - `format_opt` specifies a custom format for the suffix part of the filename, using the
/// `sprintf` format notation.

View file

@ -476,8 +476,8 @@ fn set_system_datetime(date: Zoned) -> UResult<()> {
#[cfg(windows)]
/// System call to set date (Windows).
/// See here for more:
/// https://docs.microsoft.com/en-us/windows/win32/api/sysinfoapi/nf-sysinfoapi-setsystemtime
/// https://docs.microsoft.com/en-us/windows/win32/api/minwinbase/ns-minwinbase-systemtime
/// * <https://docs.microsoft.com/en-us/windows/win32/api/sysinfoapi/nf-sysinfoapi-setsystemtime>
/// * <https://docs.microsoft.com/en-us/windows/win32/api/minwinbase/ns-minwinbase-systemtime>
fn set_system_datetime(date: Zoned) -> UResult<()> {
let system_time = SYSTEMTIME {
wYear: date.year() as u16,

View file

@ -37,7 +37,7 @@ const SI_BASES: [u128; 10] = [
const SI_SUFFIXES: [&str; 9] = ["B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"];
/// A SuffixType determines whether the suffixes are 1000 or 1024 based.
/// A `SuffixType` determines whether the suffixes are 1000 or 1024 based.
#[derive(Clone, Copy)]
pub(crate) enum SuffixType {
Iec,

View file

@ -446,7 +446,7 @@ fn show_zero_multiplier_warning() {
);
}
/// Parse bytes using str::parse, then map error if needed.
/// Parse bytes using [`str::parse`], then map error if needed.
fn parse_bytes_only(s: &str, i: usize) -> Result<u64, ParseError> {
s[..i]
.parse()
@ -502,7 +502,7 @@ fn parse_bytes_no_x(full: &str, s: &str) -> Result<u64, ParseError> {
}
/// Parse byte and multiplier like 512, 5KiB, or 1G.
/// Uses uucore::parse_size, and adds the 'w' and 'c' suffixes which are mentioned
/// Uses [`uucore::parser::parse_size`], and adds the 'w' and 'c' suffixes which are mentioned
/// in dd's info page.
pub fn parse_bytes_with_opt_multiplier(s: &str) -> Result<u64, ParseError> {
// TODO On my Linux system, there seems to be a maximum block size of 4096 bytes:

View file

@ -40,8 +40,8 @@ const SI_BASES: [u128; 10] = [
1_000_000_000_000_000_000_000_000_000,
];
/// A SuffixType determines whether the suffixes are 1000 or 1024 based, and whether they are
/// intended for HumanReadable mode or not.
/// A `SuffixType` determines whether the suffixes are 1000 or 1024 based, and whether they are
/// intended for `HumanReadable` mode or not.
#[derive(Clone, Copy)]
pub(crate) enum SuffixType {
Iec,

View file

@ -48,7 +48,7 @@ pub(crate) enum FsError {
/// Check whether `mount` has been over-mounted.
///
/// `mount` is considered over-mounted if it there is an element in
/// `mounts` after mount that has the same mount_dir.
/// `mounts` after mount that has the same `mount_dir`.
#[cfg(not(windows))]
fn is_over_mounted(mounts: &[MountInfo], mount: &MountInfo) -> bool {
let last_mount_for_dir = mounts

View file

@ -297,7 +297,7 @@ impl<'a> RowFormatter<'a> {
}
}
/// A HeaderMode defines what header labels should be shown.
/// A `HeaderMode` defines what header labels should be shown.
pub(crate) enum HeaderMode {
Default,
// the user used -h or -H

View file

@ -294,7 +294,7 @@ pub fn uu_app() -> Command {
pub trait StrUtils {
/// Remove comments and trim whitespace
fn purify(&self) -> &Self;
/// Like split_whitespace() but only produce 2 components
/// Like `split_whitespace()` but only produce 2 parts
fn split_two(&self) -> (&str, &str);
fn fnmatch(&self, pattern: &str) -> bool;
}

View file

@ -69,8 +69,8 @@ fn is_echo_flag(arg: &OsString, echo_options: &mut EchoOptions) -> bool {
/// Processes command line arguments, separating flags from normal arguments
/// Returns:
/// - Vector of non-flag arguments
/// - trailing_newline: whether to print a trailing newline
/// - escape: whether to process escape sequences
/// - `trailing_newline`: whether to print a trailing newline
/// - `escape`: whether to process escape sequences
fn filter_echo_flags(args: impl uucore::Args) -> (Vec<OsString>, bool, bool) {
let mut result = Vec::new();
let mut echo_options = EchoOptions {

View file

@ -7,9 +7,9 @@
// licensed under the Apache License, Version 2.0 <LICENSE-APACHE>
// or the MIT license <LICENSE-MIT>, at your option.
//
//! Process command line according to parsing rules of original GNU env.
//! Process command line according to parsing rules of the original GNU env.
//! Even though it looks quite like a POSIX syntax, the original
//! "shell_words" implementation had to be adapted significantly.
//! `shell_words` implementation had to be adapted significantly.
//!
//! Apart from the grammar differences, there is a new feature integrated: $VARIABLE expansion.
//!

View file

@ -16,8 +16,8 @@ use crate::{
/// This class makes parsing and word collection more convenient.
///
/// It manages an "output" buffer that is automatically filled.
/// It provides "skip_one" and "take_one" that focus on
/// working with ASCII separators. Thus they will skip or take
/// It provides `skip_one` and `take_one` that focus on
/// working with ASCII separators. Thus, they will skip or take
/// all consecutive non-ascii char sequences at once.
pub struct StringExpander<'a> {
parser: StringParser<'a>,

View file

@ -25,7 +25,7 @@ pub enum ErrorType {
InternalError,
}
/// Provides a valid char or a invalid sequence of bytes.
/// Provides a valid char or an invalid sequence of bytes.
///
/// Invalid byte sequences can't be split in any meaningful way.
/// Thus, they need to be consumed as one piece.
@ -34,9 +34,9 @@ pub enum Chunk<'a> {
ValidSingleIntChar((char, NativeCharInt)),
}
/// This class makes parsing a OsString char by char more convenient.
/// This class makes parsing a [`std::ffi::OsString`] char by char more convenient.
///
/// It also allows to capturing of intermediate positions for later splitting.
/// It also allows capturing the intermediate positions for later splitting.
pub struct StringParser<'a> {
input: &'a NativeIntStr,
pointer: usize,

View file

@ -331,7 +331,7 @@ where
///
/// This method is not comprehensively checking all cases in which
/// a regular expression could be invalid; any cases not caught will
/// result in a [ExprError::InvalidRegexExpression] when passing the
/// result in a [`ExprError::InvalidRegexExpression`] when passing the
/// regular expression through the Oniguruma bindings. This method is
/// intended to just identify a few situations for which GNU coreutils
/// has specific error messages.

View file

@ -207,10 +207,10 @@ impl Iterator for FileLines<'_> {
}
}
/// A paragraph : a collection of FileLines that are to be formatted
/// A paragraph : a collection of [`FileLines`] that are to be formatted
/// plus info about the paragraph's indentation
///
/// We only retain the String from the FileLine; the other info
/// We only retain the String from the [`FileLine`]; the other info
/// is only there to help us in deciding how to merge lines into Paragraphs
#[derive(Debug)]
pub struct Paragraph {

View file

@ -57,7 +57,7 @@ struct Options<'a> {
///
/// # Returns
///
/// Returns a UResult of a tuple containing the algorithm name, the hasher instance, and
/// Returns a [`UResult`] of a tuple containing the algorithm name, the hasher instance, and
/// the output length in bits or an Err if multiple hash algorithms are specified or if a
/// required flag is missing.
#[allow(clippy::cognitive_complexity)]

View file

@ -82,10 +82,10 @@ impl TakeAllBuffer {
/// copied.
///
/// Algorithm for this function is as follows...
/// 1 - Chunks of the input file are read into a queue of TakeAllBuffer instances.
/// 1 - Chunks of the input file are read into a queue of [`TakeAllBuffer`] instances.
/// Chunks are read until at least we have enough data to write out the entire contents of the
/// first TakeAllBuffer in the queue whilst still retaining at least `n` bytes in the queue.
/// If we hit EoF at any point, stop reading.
/// first [`TakeAllBuffer`] in the queue whilst still retaining at least `n` bytes in the queue.
/// If we hit `EoF` at any point, stop reading.
/// 2 - Assess whether we managed to queue up greater-than `n` bytes. If not, we must be done, in
/// which case break and return.
/// 3 - Write either the full first buffer of data, or just enough bytes to get back down to having
@ -233,19 +233,19 @@ impl TakeAllLinesBuffer {
/// copied.
///
/// Algorithm for this function is as follows...
/// 1 - Chunks of the input file are read into a queue of TakeAllLinesBuffer instances.
/// 1 - Chunks of the input file are read into a queue of [`TakeAllLinesBuffer`] instances.
/// Chunks are read until at least we have enough lines that we can write out the entire
/// contents of the first TakeAllLinesBuffer in the queue whilst still retaining at least
/// contents of the first [`TakeAllLinesBuffer`] in the queue whilst still retaining at least
/// `n` lines in the queue.
/// If we hit EoF at any point, stop reading.
/// If we hit `EoF` at any point, stop reading.
/// 2 - Asses whether we managed to queue up greater-than `n` lines. If not, we must be done, in
/// which case break and return.
/// 3 - Write either the full first buffer of data, or just enough lines to get back down to
/// having the required `n` lines of data queued.
/// 4 - Go back to (1).
///
/// Note that lines will regularly straddle multiple TakeAllLinesBuffer instances. The partial_line
/// flag on TakeAllLinesBuffer tracks this, and we use that to ensure that we write out enough
/// Note that lines will regularly straddle multiple [`TakeAllLinesBuffer`] instances. The `partial_line`
/// flag on [`TakeAllLinesBuffer`] tracks this, and we use that to ensure that we write out enough
/// lines in the case that the input file doesn't end with a `separator` character.
pub fn copy_all_but_n_lines<R: Read, W: Write>(
mut reader: R,

View file

@ -655,8 +655,8 @@ fn standard(mut paths: Vec<String>, b: &Behavior) -> UResult<()> {
///
/// # Parameters
///
/// _files_ must all exist as non-directories.
/// _target_dir_ must be a directory.
/// `files` must all exist as non-directories.
/// `target_dir` must be a directory.
///
fn copy_files_into_dir(files: &[PathBuf], target_dir: &Path, b: &Behavior) -> UResult<()> {
if !target_dir.is_dir() {
@ -768,7 +768,7 @@ fn perform_backup(to: &Path, b: &Behavior) -> UResult<Option<PathBuf>> {
}
}
/// Copy a non-special file using std::fs::copy.
/// Copy a non-special file using [`fs::copy`].
///
/// # Parameters
/// * `from` - The source file path.

View file

@ -147,9 +147,8 @@ impl<'a> StyleManager<'a> {
}
/// Colors the provided name based on the style determined for the given path
/// This function is quite long because it tries to leverage DirEntry to avoid
/// unnecessary calls to stat()
/// and manages the symlink errors
/// This function is quite long because it tries to leverage [`DirEntry`] to avoid
/// unnecessary calls to stat and manages the symlink errors
pub(crate) fn color_name(
name: OsString,
path: &PathData,

View file

@ -77,6 +77,7 @@ use uucore::{parser::parse_glob, show, show_error, show_warning};
mod dired;
use dired::{DiredOutput, is_dired_arg_present};
mod colors;
use crate::options::QUOTING_STYLE;
use colors::{StyleManager, color_name};
pub mod options {
@ -583,12 +584,12 @@ fn extract_hyperlink(options: &clap::ArgMatches) -> bool {
}
}
/// Match the argument given to --quoting-style or the QUOTING_STYLE env variable.
/// Match the argument given to --quoting-style or the [`QUOTING_STYLE`] env variable.
///
/// # Arguments
///
/// * `style`: the actual argument string
/// * `show_control` - A boolean value representing whether or not to show control characters.
/// * `show_control` - A boolean value representing whether to show control characters.
///
/// # Returns
///
@ -609,18 +610,18 @@ fn match_quoting_style_name(style: &str, show_control: bool) -> Option<QuotingSt
/// Extracts the quoting style to use based on the options provided.
/// If no options are given, it looks if a default quoting style is provided
/// through the QUOTING_STYLE environment variable.
/// through the [`QUOTING_STYLE`] environment variable.
///
/// # Arguments
///
/// * `options` - A reference to a clap::ArgMatches object containing command line arguments.
/// * `options` - A reference to a [`clap::ArgMatches`] object containing command line arguments.
/// * `show_control` - A boolean value representing whether or not to show control characters.
///
/// # Returns
///
/// A QuotingStyle variant representing the quoting style to use.
/// A [`QuotingStyle`] variant representing the quoting style to use.
fn extract_quoting_style(options: &clap::ArgMatches, show_control: bool) -> QuotingStyle {
let opt_quoting_style = options.get_one::<String>(options::QUOTING_STYLE);
let opt_quoting_style = options.get_one::<String>(QUOTING_STYLE);
if let Some(style) = opt_quoting_style {
match match_quoting_style_name(style, show_control) {
@ -670,7 +671,7 @@ fn extract_quoting_style(options: &clap::ArgMatches, show_control: bool) -> Quot
///
/// # Returns
///
/// An IndicatorStyle variant representing the indicator style to use.
/// An [`IndicatorStyle`] variant representing the indicator style to use.
fn extract_indicator_style(options: &clap::ArgMatches) -> IndicatorStyle {
if let Some(field) = options.get_one::<String>(options::INDICATOR_STYLE) {
match field.as_str() {
@ -998,7 +999,7 @@ impl Config {
let zero_colors_opts = [options::COLOR];
let zero_show_control_opts = [options::HIDE_CONTROL_CHARS, options::SHOW_CONTROL_CHARS];
let zero_quoting_style_opts = [
options::QUOTING_STYLE,
QUOTING_STYLE,
options::quoting::C,
options::quoting::ESCAPE,
options::quoting::LITERAL,
@ -1330,8 +1331,8 @@ pub fn uu_app() -> Command {
)
// Quoting style
.arg(
Arg::new(options::QUOTING_STYLE)
.long(options::QUOTING_STYLE)
Arg::new(QUOTING_STYLE)
.long(QUOTING_STYLE)
.help(get_message("ls-help-set-quoting-style"))
.value_parser(ShortcutValueParser::new([
PossibleValue::new("literal"),
@ -1343,7 +1344,7 @@ pub fn uu_app() -> Command {
PossibleValue::new("escape"),
]))
.overrides_with_all([
options::QUOTING_STYLE,
QUOTING_STYLE,
options::quoting::LITERAL,
options::quoting::ESCAPE,
options::quoting::C,
@ -1356,7 +1357,7 @@ pub fn uu_app() -> Command {
.alias("l")
.help(get_message("ls-help-literal-quoting-style"))
.overrides_with_all([
options::QUOTING_STYLE,
QUOTING_STYLE,
options::quoting::LITERAL,
options::quoting::ESCAPE,
options::quoting::C,
@ -1369,7 +1370,7 @@ pub fn uu_app() -> Command {
.long(options::quoting::ESCAPE)
.help(get_message("ls-help-escape-quoting-style"))
.overrides_with_all([
options::QUOTING_STYLE,
QUOTING_STYLE,
options::quoting::LITERAL,
options::quoting::ESCAPE,
options::quoting::C,
@ -1382,7 +1383,7 @@ pub fn uu_app() -> Command {
.long(options::quoting::C)
.help(get_message("ls-help-c-quoting-style"))
.overrides_with_all([
options::QUOTING_STYLE,
QUOTING_STYLE,
options::quoting::LITERAL,
options::quoting::ESCAPE,
options::quoting::C,
@ -2703,7 +2704,7 @@ fn display_grid(
Ok(())
}
/// This writes to the BufWriter state.out a single string of the output of `ls -l`.
/// This writes to the [`BufWriter`] `state.out` a single string of the output of `ls -l`.
///
/// It writes the following keys, in order:
/// * `inode` ([`get_inode`], config-optional)
@ -2717,8 +2718,8 @@ fn display_grid(
/// * `item_name` ([`display_item_name`])
///
/// This function needs to display information in columns:
/// * permissions and system_time are already guaranteed to be pre-formatted in fixed length.
/// * item_name is the last column and is left-aligned.
/// * permissions and `system_time` are already guaranteed to be pre-formatted in fixed length.
/// * `item_name` is the last column and is left-aligned.
/// * Everything else needs to be padded using [`pad_left`].
///
/// That's why we have the parameters:

View file

@ -42,10 +42,10 @@ pub struct Config<'a> {
/// Print message for each created directory.
pub verbose: bool,
/// Set SELinux security context.
/// Set `SELinux` security context.
pub set_selinux_context: bool,
/// Specific SELinux context.
/// Specific `SELinux` context.
pub context: Option<&'a String>,
}

View file

@ -56,10 +56,10 @@ pub struct Config<'a> {
pub dev: dev_t,
/// Set SELinux security context.
/// Set `SELinux` security context.
pub set_selinux_context: bool,
/// Specific SELinux context.
/// Specific `SELinux` context.
pub context: Option<&'a String>,
}

View file

@ -12,7 +12,7 @@ pub enum Radix {
/// provides the byte offset printed at the left margin
pub struct InputOffset {
/// The radix to print the byte offset. NoPrefix will not print a byte offset.
/// The radix to print the byte offset. [`Radix::NoPrefix`] will not print a byte offset.
radix: Radix,
/// The current position. Initialize at `new`, increase using `increase_position`.
byte_pos: u64,

View file

@ -54,7 +54,7 @@ pub struct FailingMockStream {
}
impl FailingMockStream {
/// Creates a FailingMockStream
/// Creates a [`FailingMockStream`]
///
/// When `read` or `write` is called, it will return an error `repeat_count` times.
/// `kind` and `message` can be specified to define the exact error.

View file

@ -102,7 +102,7 @@ enum RemoveMethod {
WipeSync, // The same as 'Wipe' sync the file name changes
}
/// Iterates over all possible filenames of a certain length using NAME_CHARSET as an alphabet
/// Iterates over all possible filenames of a certain length using [`NAME_CHARSET`] as an alphabet
struct FilenameIter {
// Store the indices of the letters of our filename in NAME_CHARSET
name_charset_indices: Vec<usize>,
@ -156,7 +156,7 @@ enum RandomSource {
Read(File),
}
/// Used to generate blocks of bytes of size <= BLOCK_SIZE based on either a give pattern
/// Used to generate blocks of bytes of size <= [`BLOCK_SIZE`] based on either a give pattern
/// or randomness
// The lint warns about a large difference because StdRng is big, but the buffers are much
// larger anyway, so it's fine.
@ -170,7 +170,7 @@ enum BytesWriter<'a> {
rng_file: &'a File,
buffer: [u8; BLOCK_SIZE],
},
// To write patterns we only write to the buffer once. To be able to do
// To write patterns, we only write to the buffer once. To be able to do
// this, we need to extend the buffer with 2 bytes. We can then easily
// obtain a buffer starting with any character of the pattern that we
// want with an offset of either 0, 1 or 2.
@ -178,7 +178,7 @@ enum BytesWriter<'a> {
// For example, if we have the pattern ABC, but we want to write a block
// of BLOCK_SIZE starting with B, we just pick the slice [1..BLOCK_SIZE+1]
// This means that we only have to fill the buffer once and can just reuse
// it afterwards.
// it afterward.
Pattern {
offset: usize,
buffer: [u8; PATTERN_BUFFER_SIZE],

View file

@ -5,12 +5,12 @@
//! Fast comparison for strings representing a base 10 number without precision loss.
//!
//! To be able to short-circuit when comparing, [NumInfo] must be passed along with each number
//! to [numeric_str_cmp]. [NumInfo] is generally obtained by calling [NumInfo::parse] and should be cached.
//! It is allowed to arbitrarily modify the exponent afterwards, which is equivalent to shifting the decimal point.
//! To be able to short-circuit when comparing, [`NumInfo`] must be passed along with each number
//! to [`numeric_str_cmp`]. [`NumInfo`] is generally obtained by calling [`NumInfo::parse`] and should be cached.
//! It is allowed to arbitrarily modify the exponent afterward, which is equivalent to shifting the decimal point.
//!
//! More specifically, exponent can be understood so that the original number is in (1..10)*10^exponent.
//! From that follows the constraints of this algorithm: It is able to compare numbers in ±(1*10^[i64::MIN]..10*10^[i64::MAX]).
//! More specifically, exponent can be understood so that the original number is in `(1..10)*10^exponent`.
//! From that follows the constraints of this algorithm: It is able to compare numbers in ±(1*10^[`i64::MIN`]..10*10^[`i64::MAX`]).
use std::{cmp::Ordering, ops::Range};
@ -43,8 +43,8 @@ impl Default for NumInfoParseSettings {
}
impl NumInfo {
/// Parse NumInfo for this number.
/// Also returns the range of num that should be passed to numeric_str_cmp later.
/// Parse [`NumInfo`] for this number.
/// Also returns the range of num that should be passed to [`numeric_str_cmp`] later.
///
/// Leading zeros will be excluded from the returned range. If the number consists of only zeros,
/// an empty range (idx..idx) is returned so that idx is the char after the last zero.
@ -213,7 +213,7 @@ pub fn human_numeric_str_cmp(
}
/// Compare two numbers as strings without parsing them as a number first. This should be more performant and can handle numbers more precisely.
/// NumInfo is needed to provide a fast path for most numbers.
/// [`NumInfo`] is needed to provide a fast path for most numbers.
#[inline(always)]
pub fn numeric_str_cmp((a, a_info): (&str, &NumInfo), (b, b_info): (&str, &NumInfo)) -> Ordering {
// check for a difference in the sign

View file

@ -933,7 +933,7 @@ impl FieldSelector {
}
/// Get the selection that corresponds to this selector for the line.
/// If needs_fields returned false, tokens may be empty.
/// If `needs_fields` returned false, tokens may be empty.
fn get_selection<'a>(&self, line: &'a str, tokens: &[Field]) -> Selection<'a> {
// `get_range` expects `None` when we don't need tokens and would get confused by an empty vector.
let tokens = if self.needs_tokens {
@ -964,7 +964,7 @@ impl FieldSelector {
}
/// Look up the range in the line that corresponds to this selector.
/// If needs_fields returned false, tokens must be None.
/// If `needs_fields` returned false, tokens must be None.
fn get_range(&self, line: &str, tokens: Option<&[Field]>) -> Range<usize> {
enum Resolution {
// The start index of the resolved character, inclusive
@ -1878,8 +1878,8 @@ pub enum GeneralBigDecimalParseResult {
Infinity,
}
/// Parse the beginning string into a GeneralBigDecimalParseResult.
/// Using a GeneralBigDecimalParseResult instead of ExtendedBigDecimal is necessary to correctly order floats.
/// Parse the beginning string into a [`GeneralBigDecimalParseResult`].
/// Using a [`GeneralBigDecimalParseResult`] instead of [`ExtendedBigDecimal`] is necessary to correctly order floats.
#[inline(always)]
fn general_bd_parse(a: &str) -> GeneralBigDecimalParseResult {
// Parse digits, and fold in recoverable errors
@ -1946,7 +1946,7 @@ enum Month {
December,
}
/// Parse the beginning string into a Month, returning Month::Unknown on errors.
/// Parse the beginning string into a Month, returning [`Month::Unknown`] on errors.
fn month_parse(line: &str) -> Month {
let line = line.trim();

View file

@ -18,7 +18,7 @@ use uucore::{
use crate::SortError;
/// A wrapper around TempDir that may only exist once in a process.
/// A wrapper around [`TempDir`] that may only exist once in a process.
///
/// `TmpDirWrapper` handles the allocation of new temporary files in this temporary directory and
/// deleting the whole directory when `SIGINT` is received. Creating a second `TmpDirWrapper` will

View file

@ -14,7 +14,7 @@ use uucore::fs::FileInformation;
use uucore::locale::get_message_with_args;
use uucore::show;
/// A writer that writes to a shell_process' stdin
/// A writer that writes to a `shell_process`' stdin
///
/// We use a shell process (not directly calling a sub-process) so we can forward the name of the
/// corresponding output file (xaa, xab, xac… ). This is the way it was implemented in GNU split.

View file

@ -539,7 +539,7 @@ impl Settings {
}
/// When using `--filter` option, writing to child command process stdin
/// could fail with BrokenPipe error
/// could fail with [`ErrorKind::BrokenPipe`] error
/// It can be safely ignored
fn ignorable_io_error(error: &io::Error, settings: &Settings) -> bool {
error.kind() == ErrorKind::BrokenPipe && settings.filter.is_some()
@ -560,7 +560,7 @@ fn custom_write<T: Write>(bytes: &[u8], writer: &mut T, settings: &Settings) ->
/// Custom wrapper for `write_all()` method
/// Similar to [`custom_write`], but returns true or false
/// depending on if `--filter` stdin is still open (no BrokenPipe error)
/// depending on if `--filter` stdin is still open (no [`ErrorKind::BrokenPipe`] error)
/// Should not be used for Kth chunk number sub-strategies
/// as those do not work with `--filter` option
fn custom_write_all<T: Write>(
@ -923,7 +923,7 @@ trait ManageOutFiles {
settings: &Settings,
) -> UResult<&mut BufWriter<Box<dyn Write>>>;
/// Initialize a new set of output files
/// Each OutFile is generated with filename, while the writer for it could be
/// Each [`OutFile`] is generated with filename, while the writer for it could be
/// optional, to be instantiated later by the calling function as needed.
/// Optional writers could happen in the following situations:
/// * in [`n_chunks_by_line`] and [`n_chunks_by_line_round_robin`] if `elide_empty_files` parameter is set to `true`

View file

@ -268,7 +268,7 @@ struct Stater {
///
/// # Arguments
///
/// * `output` - A reference to the OutputType enum containing the value to be printed.
/// * `output` - A reference to the [`OutputType`] enum containing the value to be printed.
/// * `flags` - A Flags struct containing formatting flags.
/// * `width` - The width of the field for the printed output.
/// * `precision` - How many digits of precision, if any.

View file

@ -19,7 +19,7 @@ use uucore::error::UResult;
/// block read at a time.
pub const BLOCK_SIZE: u64 = 1 << 16;
/// The size of the backing buffer of a LinesChunk or BytesChunk in bytes. The value of BUFFER_SIZE
/// The size of the backing buffer of a [`LinesChunk`] or [`BytesChunk`] in bytes. The value of `BUFFER_SIZE`
/// originates from the BUFSIZ constant in stdio.h and the libc crate to make stream IO efficient.
/// In the latter the value is constantly set to 8192 on all platforms, where the value in stdio.h
/// is determined on each platform differently. Since libc chose 8192 as a reasonable default the
@ -115,8 +115,8 @@ pub struct BytesChunk {
/// [`BytesChunk::fill`]
buffer: ChunkBuffer,
/// Stores the number of bytes, this buffer holds. This is not equal to buffer.len(), since the
/// [`BytesChunk`] may store less bytes than the internal buffer can hold. In addition
/// Stores the number of bytes, this buffer holds. This is not equal to `buffer.len()`, since the
/// [`BytesChunk`] may store less bytes than the internal buffer can hold. In addition,
/// [`BytesChunk`] may be reused, what makes it necessary to track the number of stored bytes.
/// The choice of usize is sufficient here, since the number of bytes max value is
/// [`BUFFER_SIZE`], which is a usize.

View file

@ -18,9 +18,9 @@ use uucore::error::UResult;
/// Data structure to keep a handle on files to follow.
/// `last` always holds the path/key of the last file that was printed from.
/// The keys of the HashMap can point to an existing file path (normal case),
/// or stdin ("-"), or to a non existing path (--retry).
/// For existing files, all keys in the HashMap are absolute Paths.
/// The keys of the [`HashMap`] can point to an existing file path (normal case),
/// or stdin ("-"), or to a non-existing path (--retry).
/// For existing files, all keys in the [`HashMap`] are absolute Paths.
pub struct FileHandling {
map: HashMap<PathBuf, PathData>,
last: Option<PathBuf>,
@ -36,7 +36,7 @@ impl FileHandling {
}
}
/// Wrapper for HashMap::insert using Path::canonicalize
/// Wrapper for [`HashMap::insert`] using [`Path::canonicalize`]
pub fn insert(&mut self, k: &Path, v: PathData, update_last: bool) {
let k = Self::canonicalize_path(k);
if update_last {
@ -45,17 +45,17 @@ impl FileHandling {
let _ = self.map.insert(k, v);
}
/// Wrapper for HashMap::remove using Path::canonicalize
/// Wrapper for [`HashMap::remove`] using [`Path::canonicalize`]
pub fn remove(&mut self, k: &Path) -> PathData {
self.map.remove(&Self::canonicalize_path(k)).unwrap()
}
/// Wrapper for HashMap::get using Path::canonicalize
/// Wrapper for [`HashMap::get`] using [`Path::canonicalize`]
pub fn get(&self, k: &Path) -> &PathData {
self.map.get(&Self::canonicalize_path(k)).unwrap()
}
/// Wrapper for HashMap::get_mut using Path::canonicalize
/// Wrapper for [`HashMap::get_mut`] using [`Path::canonicalize`]
pub fn get_mut(&mut self, k: &Path) -> &mut PathData {
self.map.get_mut(&Self::canonicalize_path(k)).unwrap()
}
@ -115,8 +115,8 @@ impl FileHandling {
pub fn update_reader(&mut self, path: &Path) -> UResult<()> {
/*
BUG: If it's not necessary to reopen a file, GNU's tail calls seek to offset 0.
However we can't call seek here because `BufRead` does not implement `Seek`.
As a workaround we always reopen the file even though this might not always
However, we can't call seek here because `BufRead` does not implement `Seek`.
As a workaround, we always reopen the file even though this might not always
be necessary.
*/
self.get_mut(path)
@ -172,8 +172,8 @@ impl FileHandling {
}
}
/// Data structure to keep a handle on the BufReader, Metadata
/// and the display_name (header_name) of files that are being followed.
/// Data structure to keep a handle on the [`BufReader`], [`Metadata`]
/// and the `display_name` (`header_name`) of files that are being followed.
pub struct PathData {
pub reader: Option<Box<dyn BufRead>>,
pub metadata: Option<Metadata>,

View file

@ -29,7 +29,7 @@ pub enum ParseError {
/// A Result type for parsing test expressions
pub type ParseResult<T> = Result<T, ParseError>;
/// Implement UError trait for ParseError to make it easier to return useful error codes from main().
/// Implement `UError` trait for `ParseError` to make it easier to return useful error codes from `main()`.
impl uucore::error::UError for ParseError {
fn code(&self) -> i32 {
2

View file

@ -40,9 +40,9 @@ pub enum Symbol {
}
impl Symbol {
/// Create a new Symbol from an OsString.
/// Create a new Symbol from an [`OsString`].
///
/// Returns Symbol::None in place of None
/// Returns `Symbol::None` in place of None
fn new(token: Option<OsString>) -> Self {
match token {
Some(s) => match s.to_str() {
@ -66,13 +66,13 @@ impl Symbol {
}
}
/// Convert this Symbol into a Symbol::Literal, useful for cases where
/// Convert this Symbol into a [`Symbol::Literal`], useful for cases where
/// test treats an operator as a string operand (test has no reserved
/// words).
///
/// # Panics
///
/// Panics if `self` is Symbol::None
/// Panics if `self` is [`Symbol::None`]
fn into_literal(self) -> Self {
Self::Literal(match self {
Self::LParen => OsString::from("("),
@ -106,7 +106,7 @@ impl std::fmt::Display for Symbol {
}
}
/// Recursive descent parser for test, which converts a list of OsStrings
/// Recursive descent parser for test, which converts a list of [`OsString`]s
/// (typically command line arguments) into a stack of Symbols in postfix
/// order.
///

View file

@ -17,7 +17,7 @@ pub enum TouchError {
#[error("{}", get_message_with_args("touch-error-unable-to-parse-date", HashMap::from([("date".to_string(), .0.clone())])))]
InvalidDateFormat(String),
/// The source time couldn't be converted to a [chrono::DateTime]
/// The source time couldn't be converted to a [`chrono::DateTime`]
#[error("{}", get_message_with_args("touch-error-invalid-filetime", HashMap::from([("time".to_string(), .0.to_string())])))]
InvalidFiletime(FileTime),

View file

@ -123,9 +123,9 @@ mod format {
pub(crate) const YYYYMMDDHHMM_OFFSET: &str = "%Y-%m-%d %H:%M %z";
}
/// Convert a DateTime with a TZ offset into a FileTime
/// Convert a [`DateTime`] with a TZ offset into a [`FileTime`]
///
/// The DateTime is converted into a unix timestamp from which the FileTime is
/// The [`DateTime`] is converted into a unix timestamp from which the [`FileTime`] is
/// constructed.
fn datetime_to_filetime<T: TimeZone>(dt: &DateTime<T>) -> FileTime {
FileTime::from_unix_time(dt.timestamp(), dt.timestamp_subsec_nanos())
@ -693,9 +693,9 @@ fn prepend_century(s: &str) -> UResult<String> {
))
}
/// Parses a timestamp string into a FileTime.
/// Parses a timestamp string into a [`FileTime`].
///
/// This function attempts to parse a string into a FileTime
/// This function attempts to parse a string into a [`FileTime`]
/// As expected by gnu touch -t : `[[cc]yy]mmddhhmm[.ss]`
///
/// Note that If the year is specified with only two digits,
@ -772,9 +772,9 @@ fn parse_timestamp(s: &str) -> UResult<FileTime> {
}
// TODO: this may be a good candidate to put in fsext.rs
/// Returns a PathBuf to stdout.
/// Returns a [`PathBuf`] to stdout.
///
/// On Windows, uses GetFinalPathNameByHandleW to attempt to get the path
/// On Windows, uses `GetFinalPathNameByHandleW` to attempt to get the path
/// from the stdout handle.
fn pathbuf_from_stdout() -> Result<PathBuf, TouchError> {
#[cfg(all(unix, not(target_os = "android")))]

View file

@ -581,7 +581,7 @@ impl Sequence {
pub trait SymbolTranslator {
fn translate(&mut self, current: u8) -> Option<u8>;
/// Takes two SymbolTranslators and creates a new SymbolTranslator over both in sequence.
/// Takes two [`SymbolTranslator`]s and creates a new [`SymbolTranslator`] over both in sequence.
///
/// This behaves pretty much identical to [`Iterator::chain`].
fn chain<T>(self, other: T) -> ChainedSymbolTranslator<Self, T>

View file

@ -512,7 +512,7 @@ fn handle_extract_obs_skip_chars(
}
}
/// Maps Clap errors to USimpleError and overrides 3 specific ones
/// Maps Clap errors to [`USimpleError`] and overrides 3 specific ones
/// to meet requirements of GNU tests for `uniq`.
/// Unfortunately these overrides are necessary, since several GNU tests
/// for `uniq` hardcode and require the exact wording of the error message

View file

@ -198,7 +198,7 @@ pub(crate) fn count_bytes_fast<T: WordCountable>(handle: &mut T) -> (usize, Opti
}
}
/// A simple structure used to align a BUF_SIZE buffer to 32-byte boundary.
/// A simple structure used to align a [`BUF_SIZE`] buffer to 32-byte boundary.
///
/// This is useful as bytecount uses 256-bit wide vector operations that run much
/// faster on aligned data (at least on x86 with AVX2 support).
@ -215,7 +215,7 @@ impl Default for AlignedBuffer {
}
}
/// Returns a WordCount that counts the number of bytes, lines, and/or the number of Unicode characters encoded in UTF-8 read via a Reader.
/// Returns a [`WordCount`] that counts the number of bytes, lines, and/or the number of Unicode characters encoded in UTF-8 read via a Reader.
///
/// This corresponds to the `-c`, `-l` and `-m` command line flags to wc.
///

View file

@ -14,9 +14,9 @@ use std::str;
/// Incremental, zero-copy UTF-8 decoding with error handling
///
/// The original implementation was written by Simon Sapin in the utf-8 crate <https://crates.io/crates/utf-8>.
/// uu_wc used to depend on that crate.
/// `uu_wc` used to depend on that crate.
/// The author archived the repository <https://github.com/SimonSapin/rust-utf8>.
/// They suggested incorporating the source directly into uu_wc <https://github.com/uutils/coreutils/issues/4289>.
/// They suggested incorporating the source directly into `uu_wc` <https://github.com/uutils/coreutils/issues/4289>.
///
#[derive(Debug, Copy, Clone)]
@ -53,9 +53,9 @@ impl Incomplete {
&self.buffer[..len]
}
/// (consumed_from_input, None): not enough input
/// (consumed_from_input, Some(Err(()))): error bytes in buffer
/// (consumed_from_input, Some(Ok(()))): UTF-8 string in buffer
/// `(consumed_from_input, None)`: not enough input
/// `(consumed_from_input, Some(Err(())))`: error bytes in buffer
/// `(consumed_from_input, Some(Ok(())))`: UTF-8 string in buffer
fn try_complete_offsets(&mut self, input: &[u8]) -> (usize, Option<Result<(), ()>>) {
let initial_buffer_len = self.buffer_len as usize;
let copied_from_input;

View file

@ -198,7 +198,7 @@ impl<'a> Inputs<'a> {
#[derive(Clone, Copy, Debug)]
enum StdinKind {
/// Specified on command-line with "-" (STDIN_REPR)
/// Specified on command-line with "-" ([`STDIN_REPR`])
Explicit,
/// Implied by the lack of any arguments
Implicit,
@ -234,7 +234,7 @@ impl<'a, T: AsRef<Path> + ?Sized> From<&'a T> for Input<'a> {
}
impl<'a> Input<'a> {
/// Translates Path(Cow::Owned(_)) to Path(Cow::Borrowed(_)).
/// Translates `Path(Cow::Owned(_))` to `Path(Cow::Borrowed(_))`.
fn as_borrowed(&'a self) -> Self {
match self {
Self::Path(p) => Self::Path(Cow::Borrowed(p.borrow())),
@ -271,7 +271,7 @@ impl<'a> Input<'a> {
/// When given --files0-from, we may be given a path or stdin. Either may be a stream or
/// a regular file. If given a file less than 10 MiB, it will be consumed and turned into
/// a Vec of Input::Paths which can be scanned to determine the widths of the columns that
/// a Vec of [`Input::Path`] which can be scanned to determine the widths of the columns that
/// will ultimately be printed.
fn try_as_files0(&self) -> UResult<Option<Vec<Input<'static>>>> {
match self {
@ -657,11 +657,11 @@ enum CountResult {
Failure(io::Error),
}
/// If we fail opening a file, we only show the error. If we fail reading the
/// If we fail to open a file, we only show the error. If we fail reading the
/// file, we show a count for what we managed to read.
///
/// Therefore, the reading implementations always return a total and sometimes
/// return an error: (WordCount, Option<io::Error>).
/// return an error: ([`WordCount`], `Option<io::Error>`).
fn word_count_from_input(input: &Input<'_>, settings: &Settings) -> CountResult {
let (total, maybe_err) = match input {
Input::Stdin(_) => word_count_from_reader(io::stdin().lock(), settings),
@ -734,7 +734,7 @@ fn compute_number_width(inputs: &Inputs, settings: &Settings) -> usize {
type InputIterItem<'a> = Result<Input<'a>, Box<dyn UError>>;
/// To be used with `--files0-from=-`, this applies a filter on the results of files0_iter to
/// To be used with `--files0-from=-`, this applies a filter on the results of [`files0_iter`] to
/// translate '-' into the appropriate error.
fn files0_iter_stdin<'a>() -> impl Iterator<Item = InputIterItem<'a>> {
files0_iter(io::stdin().lock(), STDIN_REPR.into()).map(|i| match i {