Rollup merge of #89835 - jkugelman:must-use-expensive-computations, r=joshtriplett
Add #[must_use] to expensive computations The unifying theme for this commit is weak, admittedly. I put together a list of "expensive" functions when I originally proposed this whole effort, but nobody's cared about that criterion. Still, it's a decent way to bite off a not-too-big chunk of work. Given the grab bag nature of this commit, the messages I used vary quite a bit. I'm open to wording changes. For some reason clippy flagged four `BTreeSet` methods but didn't say boo about equivalent ones on `HashSet`. I stared at them for a while but I can't figure out the difference so I added the `HashSet` ones in. ```rust // Flagged by clippy. alloc::collections::btree_set::BTreeSet<T> fn difference<'a>(&'a self, other: &'a BTreeSet<T>) -> Difference<'a, T>; alloc::collections::btree_set::BTreeSet<T> fn symmetric_difference<'a>(&'a self, other: &'a BTreeSet<T>) -> SymmetricDifference<'a, T> alloc::collections::btree_set::BTreeSet<T> fn intersection<'a>(&'a self, other: &'a BTreeSet<T>) -> Intersection<'a, T>; alloc::collections::btree_set::BTreeSet<T> fn union<'a>(&'a self, other: &'a BTreeSet<T>) -> Union<'a, T>; // Ignored by clippy, but not by me. std::collections::HashSet<T, S> fn difference<'a>(&'a self, other: &'a HashSet<T, S>) -> Difference<'a, T, S>; std::collections::HashSet<T, S> fn symmetric_difference<'a>(&'a self, other: &'a HashSet<T, S>) -> SymmetricDifference<'a, T, S> std::collections::HashSet<T, S> fn intersection<'a>(&'a self, other: &'a HashSet<T, S>) -> Intersection<'a, T, S>; std::collections::HashSet<T, S> fn union<'a>(&'a self, other: &'a HashSet<T, S>) -> Union<'a, T, S>; ``` Parent issue: #89692 r? ```@joshtriplett```
This commit is contained in:
commit
a26b1d2259
11 changed files with 32 additions and 1 deletions
|
@ -11,6 +11,7 @@ use crate::ops;
|
|||
impl [u8] {
|
||||
/// Checks if all bytes in this slice are within the ASCII range.
|
||||
#[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
|
||||
#[must_use]
|
||||
#[inline]
|
||||
pub fn is_ascii(&self) -> bool {
|
||||
is_ascii(self)
|
||||
|
@ -21,6 +22,7 @@ impl [u8] {
|
|||
/// Same as `to_ascii_lowercase(a) == to_ascii_lowercase(b)`,
|
||||
/// but without allocating and copying temporaries.
|
||||
#[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
|
||||
#[must_use]
|
||||
#[inline]
|
||||
pub fn eq_ignore_ascii_case(&self, other: &[u8]) -> bool {
|
||||
self.len() == other.len() && iter::zip(self, other).all(|(a, b)| a.eq_ignore_ascii_case(b))
|
||||
|
|
|
@ -37,6 +37,7 @@ fn repeat_byte(b: u8) -> usize {
|
|||
}
|
||||
|
||||
/// Returns the first index matching the byte `x` in `text`.
|
||||
#[must_use]
|
||||
#[inline]
|
||||
pub fn memchr(x: u8, text: &[u8]) -> Option<usize> {
|
||||
// Fast path for small slices
|
||||
|
@ -91,6 +92,7 @@ fn memchr_general_case(x: u8, text: &[u8]) -> Option<usize> {
|
|||
}
|
||||
|
||||
/// Returns the last index matching the byte `x` in `text`.
|
||||
#[must_use]
|
||||
pub fn memrchr(x: u8, text: &[u8]) -> Option<usize> {
|
||||
// Scan for a single byte value by reading two `usize` words at a time.
|
||||
//
|
||||
|
|
|
@ -27,6 +27,7 @@ use super::{IsAsciiWhitespace, IsNotEmpty, IsWhitespace};
|
|||
/// [`char`]: prim@char
|
||||
/// [`chars`]: str::chars
|
||||
#[derive(Clone)]
|
||||
#[must_use = "iterators are lazy and do nothing unless consumed"]
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub struct Chars<'a> {
|
||||
pub(super) iter: slice::Iter<'a, u8>,
|
||||
|
@ -125,6 +126,7 @@ impl<'a> Chars<'a> {
|
|||
/// [`char`]: prim@char
|
||||
/// [`char_indices`]: str::char_indices
|
||||
#[derive(Clone, Debug)]
|
||||
#[must_use = "iterators are lazy and do nothing unless consumed"]
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub struct CharIndices<'a> {
|
||||
pub(super) front_offset: usize,
|
||||
|
@ -1089,6 +1091,7 @@ generate_pattern_iterators! {
|
|||
///
|
||||
/// [`lines`]: str::lines
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
#[must_use = "iterators are lazy and do nothing unless consumed"]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Lines<'a>(pub(super) Map<SplitTerminator<'a, char>, LinesAnyMap>);
|
||||
|
||||
|
@ -1128,6 +1131,7 @@ impl FusedIterator for Lines<'_> {}
|
|||
/// [`lines_any`]: str::lines_any
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
#[rustc_deprecated(since = "1.4.0", reason = "use lines()/Lines instead now")]
|
||||
#[must_use = "iterators are lazy and do nothing unless consumed"]
|
||||
#[derive(Clone, Debug)]
|
||||
#[allow(deprecated)]
|
||||
pub struct LinesAny<'a>(pub(super) Lines<'a>);
|
||||
|
|
|
@ -29,6 +29,7 @@ impl Utf8Lossy {
|
|||
}
|
||||
|
||||
/// Iterator over lossy UTF-8 string
|
||||
#[must_use = "iterators are lazy and do nothing unless consumed"]
|
||||
#[unstable(feature = "str_internals", issue = "none")]
|
||||
#[allow(missing_debug_implementations)]
|
||||
pub struct Utf8LossyChunksIter<'a> {
|
||||
|
|
|
@ -2255,6 +2255,7 @@ impl str {
|
|||
/// assert!(!non_ascii.is_ascii());
|
||||
/// ```
|
||||
#[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
|
||||
#[must_use]
|
||||
#[inline]
|
||||
pub fn is_ascii(&self) -> bool {
|
||||
// We can treat each byte as character here: all multibyte characters
|
||||
|
@ -2276,6 +2277,7 @@ impl str {
|
|||
/// assert!(!"Ferrös".eq_ignore_ascii_case("FERRÖS"));
|
||||
/// ```
|
||||
#[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
|
||||
#[must_use]
|
||||
#[inline]
|
||||
pub fn eq_ignore_ascii_case(&self, other: &str) -> bool {
|
||||
self.as_bytes().eq_ignore_ascii_case(other.as_bytes())
|
||||
|
|
|
@ -115,7 +115,7 @@ fn test_eq_ignore_ascii_case() {
|
|||
#[test]
|
||||
fn inference_works() {
|
||||
let x = "a".to_string();
|
||||
x.eq_ignore_ascii_case("A");
|
||||
let _ = x.eq_ignore_ascii_case("A");
|
||||
}
|
||||
|
||||
// Shorthands used by the is_ascii_* tests.
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue