diff --git a/library/core/src/sync/atomic.rs b/library/core/src/sync/atomic.rs index bb6d82ff13d..bf8ce6971b8 100644 --- a/library/core/src/sync/atomic.rs +++ b/library/core/src/sync/atomic.rs @@ -1462,12 +1462,12 @@ impl AtomicPtr { /// to offset the pointer by an amount which is not a multiple of /// `size_of::()`. This can sometimes be inconvenient, as you may want to /// work with a deliberately misaligned pointer. In such cases, you may use - /// the [`fetch_add_bytes`](Self::fetch_add_bytes) method instead. + /// the [`fetch_byte_add`](Self::fetch_byte_add) method instead. /// - /// `fetch_add` takes an [`Ordering`] argument which describes the memory - /// ordering of this operation. All ordering modes are possible. Note that - /// using [`Acquire`] makes the store part of this operation [`Relaxed`], - /// and using [`Release`] makes the load part [`Relaxed`]. + /// `fetch_ptr_add` takes an [`Ordering`] argument which describes the + /// memory ordering of this operation. All ordering modes are possible. Note + /// that using [`Acquire`] makes the store part of this operation + /// [`Relaxed`], and using [`Release`] makes the load part [`Relaxed`]. /// /// **Note**: This method is only available on platforms that support atomic /// operations on [`AtomicPtr`]. @@ -1481,15 +1481,15 @@ impl AtomicPtr { /// use core::sync::atomic::{AtomicPtr, Ordering}; /// /// let atom = AtomicPtr::::new(core::ptr::null_mut()); - /// assert_eq!(atom.fetch_add(1, Ordering::Relaxed).addr(), 0); + /// assert_eq!(atom.fetch_ptr_add(1, Ordering::Relaxed).addr(), 0); /// // Note: units of `size_of::()`. /// assert_eq!(atom.load(Ordering::Relaxed).addr(), 8); /// ``` #[inline] #[cfg(target_has_atomic = "ptr")] #[unstable(feature = "strict_provenance_atomic_ptr", issue = "95228")] - pub fn fetch_add(&self, val: usize, order: Ordering) -> *mut T { - self.fetch_add_bytes(val.wrapping_mul(core::mem::size_of::()), order) + pub fn fetch_ptr_add(&self, val: usize, order: Ordering) -> *mut T { + self.fetch_byte_add(val.wrapping_mul(core::mem::size_of::()), order) } /// Offsets the pointer's address by subtracting `val` (in units of `T`), @@ -1502,9 +1502,9 @@ impl AtomicPtr { /// to offset the pointer by an amount which is not a multiple of /// `size_of::()`. This can sometimes be inconvenient, as you may want to /// work with a deliberately misaligned pointer. In such cases, you may use - /// the [`fetch_sub_bytes`](Self::fetch_sub_bytes) method instead. + /// the [`fetch_byte_sub`](Self::fetch_byte_sub) method instead. /// - /// `fetch_sub` takes an [`Ordering`] argument which describes the memory + /// `fetch_ptr_sub` takes an [`Ordering`] argument which describes the memory /// ordering of this operation. All ordering modes are possible. Note that /// using [`Acquire`] makes the store part of this operation [`Relaxed`], /// and using [`Release`] makes the load part [`Relaxed`]. @@ -1524,7 +1524,7 @@ impl AtomicPtr { /// let atom = AtomicPtr::new(array.as_ptr().wrapping_add(1) as *mut _); /// /// assert!(core::ptr::eq( - /// atom.fetch_sub(1, Ordering::Relaxed), + /// atom.fetch_ptr_sub(1, Ordering::Relaxed), /// &array[1], /// )); /// assert!(core::ptr::eq(atom.load(Ordering::Relaxed), &array[0])); @@ -1532,8 +1532,8 @@ impl AtomicPtr { #[inline] #[cfg(target_has_atomic = "ptr")] #[unstable(feature = "strict_provenance_atomic_ptr", issue = "95228")] - pub fn fetch_sub(&self, val: usize, order: Ordering) -> *mut T { - self.fetch_sub_bytes(val.wrapping_mul(core::mem::size_of::()), order) + pub fn fetch_ptr_sub(&self, val: usize, order: Ordering) -> *mut T { + self.fetch_byte_sub(val.wrapping_mul(core::mem::size_of::()), order) } /// Offsets the pointer's address by adding `val` *bytes*, returning the @@ -1542,7 +1542,7 @@ impl AtomicPtr { /// This is equivalent to using [`wrapping_add`] and [`cast`] to atomically /// perform `ptr = ptr.cast::().wrapping_add(val).cast::()`. /// - /// `fetch_add_bytes` takes an [`Ordering`] argument which describes the + /// `fetch_byte_add` takes an [`Ordering`] argument which describes the /// memory ordering of this operation. All ordering modes are possible. Note /// that using [`Acquire`] makes the store part of this operation /// [`Relaxed`], and using [`Release`] makes the load part [`Relaxed`]. @@ -1560,14 +1560,14 @@ impl AtomicPtr { /// use core::sync::atomic::{AtomicPtr, Ordering}; /// /// let atom = AtomicPtr::::new(core::ptr::null_mut()); - /// assert_eq!(atom.fetch_add_bytes(1, Ordering::Relaxed).addr(), 0); + /// assert_eq!(atom.fetch_byte_add(1, Ordering::Relaxed).addr(), 0); /// // Note: in units of bytes, not `size_of::()`. /// assert_eq!(atom.load(Ordering::Relaxed).addr(), 1); /// ``` #[inline] #[cfg(target_has_atomic = "ptr")] #[unstable(feature = "strict_provenance_atomic_ptr", issue = "95228")] - pub fn fetch_add_bytes(&self, val: usize, order: Ordering) -> *mut T { + pub fn fetch_byte_add(&self, val: usize, order: Ordering) -> *mut T { #[cfg(not(bootstrap))] // SAFETY: data races are prevented by atomic intrinsics. unsafe { @@ -1586,7 +1586,7 @@ impl AtomicPtr { /// This is equivalent to using [`wrapping_sub`] and [`cast`] to atomically /// perform `ptr = ptr.cast::().wrapping_sub(val).cast::()`. /// - /// `fetch_add_bytes` takes an [`Ordering`] argument which describes the + /// `fetch_byte_sub` takes an [`Ordering`] argument which describes the /// memory ordering of this operation. All ordering modes are possible. Note /// that using [`Acquire`] makes the store part of this operation /// [`Relaxed`], and using [`Release`] makes the load part [`Relaxed`]. @@ -1604,13 +1604,13 @@ impl AtomicPtr { /// use core::sync::atomic::{AtomicPtr, Ordering}; /// /// let atom = AtomicPtr::::new(core::ptr::invalid_mut(1)); - /// assert_eq!(atom.fetch_sub_bytes(1, Ordering::Relaxed).addr(), 1); + /// assert_eq!(atom.fetch_byte_sub(1, Ordering::Relaxed).addr(), 1); /// assert_eq!(atom.load(Ordering::Relaxed).addr(), 0); /// ``` #[inline] #[cfg(target_has_atomic = "ptr")] #[unstable(feature = "strict_provenance_atomic_ptr", issue = "95228")] - pub fn fetch_sub_bytes(&self, val: usize, order: Ordering) -> *mut T { + pub fn fetch_byte_sub(&self, val: usize, order: Ordering) -> *mut T { #[cfg(not(bootstrap))] // SAFETY: data races are prevented by atomic intrinsics. unsafe { diff --git a/library/core/tests/atomic.rs b/library/core/tests/atomic.rs index 2c048435dde..13b12db209a 100644 --- a/library/core/tests/atomic.rs +++ b/library/core/tests/atomic.rs @@ -131,16 +131,16 @@ fn int_max() { #[cfg(any(not(target_arch = "arm"), target_os = "linux"))] // Missing intrinsic in compiler-builtins fn ptr_add_null() { let atom = AtomicPtr::::new(core::ptr::null_mut()); - assert_eq!(atom.fetch_add(1, SeqCst).addr(), 0); + assert_eq!(atom.fetch_ptr_add(1, SeqCst).addr(), 0); assert_eq!(atom.load(SeqCst).addr(), 8); - assert_eq!(atom.fetch_add_bytes(1, SeqCst).addr(), 8); + assert_eq!(atom.fetch_byte_add(1, SeqCst).addr(), 8); assert_eq!(atom.load(SeqCst).addr(), 9); - assert_eq!(atom.fetch_sub(1, SeqCst).addr(), 9); + assert_eq!(atom.fetch_ptr_sub(1, SeqCst).addr(), 9); assert_eq!(atom.load(SeqCst).addr(), 1); - assert_eq!(atom.fetch_sub_bytes(1, SeqCst).addr(), 1); + assert_eq!(atom.fetch_byte_sub(1, SeqCst).addr(), 1); assert_eq!(atom.load(SeqCst).addr(), 0); } @@ -150,23 +150,23 @@ fn ptr_add_data() { let num = 0i64; let n = &num as *const i64 as *mut _; let atom = AtomicPtr::::new(n); - assert_eq!(atom.fetch_add(1, SeqCst), n); + assert_eq!(atom.fetch_ptr_add(1, SeqCst), n); assert_eq!(atom.load(SeqCst), n.wrapping_add(1)); - assert_eq!(atom.fetch_sub(1, SeqCst), n.wrapping_add(1)); + assert_eq!(atom.fetch_ptr_sub(1, SeqCst), n.wrapping_add(1)); assert_eq!(atom.load(SeqCst), n); let bytes_from_n = |b| n.cast::().wrapping_add(b).cast::(); - assert_eq!(atom.fetch_add_bytes(1, SeqCst), n); + assert_eq!(atom.fetch_byte_add(1, SeqCst), n); assert_eq!(atom.load(SeqCst), bytes_from_n(1)); - assert_eq!(atom.fetch_add_bytes(5, SeqCst), bytes_from_n(1)); + assert_eq!(atom.fetch_byte_add(5, SeqCst), bytes_from_n(1)); assert_eq!(atom.load(SeqCst), bytes_from_n(6)); - assert_eq!(atom.fetch_sub_bytes(1, SeqCst), bytes_from_n(6)); + assert_eq!(atom.fetch_byte_sub(1, SeqCst), bytes_from_n(6)); assert_eq!(atom.load(SeqCst), bytes_from_n(5)); - assert_eq!(atom.fetch_sub_bytes(5, SeqCst), bytes_from_n(5)); + assert_eq!(atom.fetch_byte_sub(5, SeqCst), bytes_from_n(5)); assert_eq!(atom.load(SeqCst), n); }