1
Fork 0

Auto merge of #95241 - Gankra:cleaned-provenance, r=workingjubilee

Strict Provenance MVP

This patch series examines the question: how bad would it be if we adopted
an extremely strict pointer provenance model that completely banished all
int<->ptr casts.

The key insight to making this approach even *vaguely* pallatable is the

ptr.with_addr(addr) -> ptr

function, which takes a pointer and an address and creates a new pointer
with that address and the provenance of the input pointer. In this way
the "chain of custody" is completely and dynamically restored, making the
model suitable even for dynamic checkers like CHERI and Miri.

This is not a formal model, but lots of the docs discussing the model
have been updated to try to the *concept* of this design in the hopes
that it can be iterated on.

See #95228
This commit is contained in:
bors 2022-03-30 10:09:10 +00:00
commit e50ff9b452
39 changed files with 621 additions and 126 deletions

View file

@ -18,6 +18,7 @@
#![feature(decl_macro)] #![feature(decl_macro)]
#![feature(rustc_attrs)] #![feature(rustc_attrs)]
#![cfg_attr(test, feature(test))] #![cfg_attr(test, feature(test))]
#![feature(strict_provenance)]
use smallvec::SmallVec; use smallvec::SmallVec;
@ -87,7 +88,7 @@ impl<T> ArenaChunk<T> {
unsafe { unsafe {
if mem::size_of::<T>() == 0 { if mem::size_of::<T>() == 0 {
// A pointer as large as possible for zero-sized elements. // A pointer as large as possible for zero-sized elements.
!0 as *mut T ptr::invalid_mut(!0)
} else { } else {
self.start().add(self.storage.len()) self.start().add(self.storage.len())
} }
@ -199,7 +200,7 @@ impl<T> TypedArena<T> {
unsafe { unsafe {
if mem::size_of::<T>() == 0 { if mem::size_of::<T>() == 0 {
self.ptr.set((self.ptr.get() as *mut u8).wrapping_offset(1) as *mut T); self.ptr.set((self.ptr.get() as *mut u8).wrapping_offset(1) as *mut T);
let ptr = mem::align_of::<T>() as *mut T; let ptr = ptr::NonNull::<T>::dangling().as_ptr();
// Don't drop the object. This `write` is equivalent to `forget`. // Don't drop the object. This `write` is equivalent to `forget`.
ptr::write(ptr, object); ptr::write(ptr, object);
&mut *ptr &mut *ptr
@ -216,7 +217,9 @@ impl<T> TypedArena<T> {
#[inline] #[inline]
fn can_allocate(&self, additional: usize) -> bool { fn can_allocate(&self, additional: usize) -> bool {
let available_bytes = self.end.get() as usize - self.ptr.get() as usize; // FIXME: this should *likely* use `offset_from`, but more
// investigation is needed (including running tests in miri).
let available_bytes = self.end.get().addr() - self.ptr.get().addr();
let additional_bytes = additional.checked_mul(mem::size_of::<T>()).unwrap(); let additional_bytes = additional.checked_mul(mem::size_of::<T>()).unwrap();
available_bytes >= additional_bytes available_bytes >= additional_bytes
} }
@ -262,7 +265,9 @@ impl<T> TypedArena<T> {
// If a type is `!needs_drop`, we don't need to keep track of how many elements // If a type is `!needs_drop`, we don't need to keep track of how many elements
// the chunk stores - the field will be ignored anyway. // the chunk stores - the field will be ignored anyway.
if mem::needs_drop::<T>() { if mem::needs_drop::<T>() {
let used_bytes = self.ptr.get() as usize - last_chunk.start() as usize; // FIXME: this should *likely* use `offset_from`, but more
// investigation is needed (including running tests in miri).
let used_bytes = self.ptr.get().addr() - last_chunk.start().addr();
last_chunk.entries = used_bytes / mem::size_of::<T>(); last_chunk.entries = used_bytes / mem::size_of::<T>();
} }
@ -288,9 +293,9 @@ impl<T> TypedArena<T> {
// chunks. // chunks.
fn clear_last_chunk(&self, last_chunk: &mut ArenaChunk<T>) { fn clear_last_chunk(&self, last_chunk: &mut ArenaChunk<T>) {
// Determine how much was filled. // Determine how much was filled.
let start = last_chunk.start() as usize; let start = last_chunk.start().addr();
// We obtain the value of the pointer to the first uninitialized element. // We obtain the value of the pointer to the first uninitialized element.
let end = self.ptr.get() as usize; let end = self.ptr.get().addr();
// We then calculate the number of elements to be dropped in the last chunk, // We then calculate the number of elements to be dropped in the last chunk,
// which is the filled area's length. // which is the filled area's length.
let diff = if mem::size_of::<T>() == 0 { let diff = if mem::size_of::<T>() == 0 {
@ -299,6 +304,8 @@ impl<T> TypedArena<T> {
// Recall that `end` was incremented for each allocated value. // Recall that `end` was incremented for each allocated value.
end - start end - start
} else { } else {
// FIXME: this should *likely* use `offset_from`, but more
// investigation is needed (including running tests in miri).
(end - start) / mem::size_of::<T>() (end - start) / mem::size_of::<T>()
}; };
// Pass that to the `destroy` method. // Pass that to the `destroy` method.
@ -395,15 +402,16 @@ impl DroplessArena {
/// request. /// request.
#[inline] #[inline]
fn alloc_raw_without_grow(&self, layout: Layout) -> Option<*mut u8> { fn alloc_raw_without_grow(&self, layout: Layout) -> Option<*mut u8> {
let start = self.start.get() as usize; let start = self.start.get().addr();
let end = self.end.get() as usize; let old_end = self.end.get();
let end = old_end.addr();
let align = layout.align(); let align = layout.align();
let bytes = layout.size(); let bytes = layout.size();
let new_end = end.checked_sub(bytes)? & !(align - 1); let new_end = end.checked_sub(bytes)? & !(align - 1);
if start <= new_end { if start <= new_end {
let new_end = new_end as *mut u8; let new_end = old_end.with_addr(new_end);
self.end.set(new_end); self.end.set(new_end);
Some(new_end) Some(new_end)
} else { } else {

View file

@ -6,6 +6,7 @@
#![feature(once_cell)] #![feature(once_cell)]
#![feature(nll)] #![feature(nll)]
#![feature(associated_type_bounds)] #![feature(associated_type_bounds)]
#![feature(strict_provenance)]
#![recursion_limit = "256"] #![recursion_limit = "256"]
#![allow(rustc::potential_query_instability)] #![allow(rustc::potential_query_instability)]

View file

@ -116,7 +116,7 @@ impl<'a, 'tcx: 'a> MonoItemExt<'a, 'tcx> for MonoItem<'tcx> {
fn to_raw_string(&self) -> String { fn to_raw_string(&self) -> String {
match *self { match *self {
MonoItem::Fn(instance) => { MonoItem::Fn(instance) => {
format!("Fn({:?}, {})", instance.def, instance.substs.as_ptr() as usize) format!("Fn({:?}, {})", instance.def, instance.substs.as_ptr().addr())
} }
MonoItem::Static(id) => format!("Static({:?})", id), MonoItem::Static(id) => format!("Static({:?})", id),
MonoItem::GlobalAsm(id) => format!("GlobalAsm({:?})", id), MonoItem::GlobalAsm(id) => format!("GlobalAsm({:?})", id),

View file

@ -127,6 +127,7 @@
#![feature(slice_ptr_len)] #![feature(slice_ptr_len)]
#![feature(slice_range)] #![feature(slice_range)]
#![feature(str_internals)] #![feature(str_internals)]
#![feature(strict_provenance)]
#![feature(trusted_len)] #![feature(trusted_len)]
#![feature(trusted_random_access)] #![feature(trusted_random_access)]
#![feature(try_trait_v2)] #![feature(try_trait_v2)]

View file

@ -2115,13 +2115,12 @@ impl<T> Weak<T> {
#[rustc_const_unstable(feature = "const_weak_new", issue = "95091", reason = "recently added")] #[rustc_const_unstable(feature = "const_weak_new", issue = "95091", reason = "recently added")]
#[must_use] #[must_use]
pub const fn new() -> Weak<T> { pub const fn new() -> Weak<T> {
Weak { ptr: unsafe { NonNull::new_unchecked(usize::MAX as *mut RcBox<T>) } } Weak { ptr: unsafe { NonNull::new_unchecked(ptr::invalid_mut::<RcBox<T>>(usize::MAX)) } }
} }
} }
pub(crate) fn is_dangling<T: ?Sized>(ptr: *mut T) -> bool { pub(crate) fn is_dangling<T: ?Sized>(ptr: *mut T) -> bool {
let address = ptr as *mut () as usize; (ptr as *mut ()).addr() == usize::MAX
address == usize::MAX
} }
/// Helper type to allow accessing the reference counts without /// Helper type to allow accessing the reference counts without

View file

@ -1043,9 +1043,9 @@ where
impl<T> Drop for MergeHole<T> { impl<T> Drop for MergeHole<T> {
fn drop(&mut self) { fn drop(&mut self) {
// `T` is not a zero-sized type, so it's okay to divide by its size. // `T` is not a zero-sized type, and these are pointers into a slice's elements.
let len = (self.end as usize - self.start as usize) / mem::size_of::<T>();
unsafe { unsafe {
let len = self.end.offset_from(self.start) as usize;
ptr::copy_nonoverlapping(self.start, self.dest, len); ptr::copy_nonoverlapping(self.start, self.dest, len);
} }
} }

View file

@ -1746,7 +1746,7 @@ impl<T> Weak<T> {
#[rustc_const_unstable(feature = "const_weak_new", issue = "95091", reason = "recently added")] #[rustc_const_unstable(feature = "const_weak_new", issue = "95091", reason = "recently added")]
#[must_use] #[must_use]
pub const fn new() -> Weak<T> { pub const fn new() -> Weak<T> {
Weak { ptr: unsafe { NonNull::new_unchecked(usize::MAX as *mut ArcInner<T>) } } Weak { ptr: unsafe { NonNull::new_unchecked(ptr::invalid_mut::<ArcInner<T>>(usize::MAX)) } }
} }
} }

View file

@ -159,7 +159,7 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> {
#[inline] #[inline]
fn size_hint(&self) -> (usize, Option<usize>) { fn size_hint(&self) -> (usize, Option<usize>) {
let exact = if mem::size_of::<T>() == 0 { let exact = if mem::size_of::<T>() == 0 {
(self.end as usize).wrapping_sub(self.ptr as usize) self.end.addr().wrapping_sub(self.ptr.addr())
} else { } else {
unsafe { self.end.offset_from(self.ptr) as usize } unsafe { self.end.offset_from(self.ptr) as usize }
}; };

View file

@ -194,7 +194,7 @@ impl Layout {
#[inline] #[inline]
pub const fn dangling(&self) -> NonNull<u8> { pub const fn dangling(&self) -> NonNull<u8> {
// SAFETY: align is guaranteed to be non-zero // SAFETY: align is guaranteed to be non-zero
unsafe { NonNull::new_unchecked(self.align() as *mut u8) } unsafe { NonNull::new_unchecked(crate::ptr::invalid_mut::<u8>(self.align())) }
} }
/// Creates a layout describing the record that can hold a value /// Creates a layout describing the record that can hold a value

View file

@ -352,6 +352,10 @@ impl<'a> ArgumentV1<'a> {
} }
fn as_usize(&self) -> Option<usize> { fn as_usize(&self) -> Option<usize> {
// We are type punning a bit here: USIZE_MARKER only takes an &usize but
// formatter takes an &Opaque. Rust understandably doesn't think we should compare
// the function pointers if they don't have the same signature, so we cast to
// usizes to tell it that we just want to compare addresses.
if self.formatter as usize == USIZE_MARKER as usize { if self.formatter as usize == USIZE_MARKER as usize {
// SAFETY: The `formatter` field is only set to USIZE_MARKER if // SAFETY: The `formatter` field is only set to USIZE_MARKER if
// the value is a usize, so this is safe // the value is a usize, so this is safe
@ -2246,7 +2250,7 @@ impl<T: ?Sized> Pointer for *const T {
} }
f.flags |= 1 << (FlagV1::Alternate as u32); f.flags |= 1 << (FlagV1::Alternate as u32);
let ret = LowerHex::fmt(&(ptr as usize), f); let ret = LowerHex::fmt(&(ptr.addr()), f);
f.width = old_width; f.width = old_width;
f.flags = old_flags; f.flags = old_flags;

View file

@ -793,7 +793,7 @@ mod impls {
#[inline] #[inline]
fn hash<H: Hasher>(&self, state: &mut H) { fn hash<H: Hasher>(&self, state: &mut H) {
let (address, metadata) = self.to_raw_parts(); let (address, metadata) = self.to_raw_parts();
state.write_usize(address as usize); state.write_usize(address.addr());
metadata.hash(state); metadata.hash(state);
} }
} }
@ -803,7 +803,7 @@ mod impls {
#[inline] #[inline]
fn hash<H: Hasher>(&self, state: &mut H) { fn hash<H: Hasher>(&self, state: &mut H) {
let (address, metadata) = self.to_raw_parts(); let (address, metadata) = self.to_raw_parts();
state.write_usize(address as usize); state.write_usize(address.addr());
metadata.hash(state); metadata.hash(state);
} }
} }

View file

@ -1972,15 +1972,15 @@ extern "rust-intrinsic" {
/// Checks whether `ptr` is properly aligned with respect to /// Checks whether `ptr` is properly aligned with respect to
/// `align_of::<T>()`. /// `align_of::<T>()`.
pub(crate) fn is_aligned_and_not_null<T>(ptr: *const T) -> bool { pub(crate) fn is_aligned_and_not_null<T>(ptr: *const T) -> bool {
!ptr.is_null() && ptr as usize % mem::align_of::<T>() == 0 !ptr.is_null() && ptr.addr() % mem::align_of::<T>() == 0
} }
/// Checks whether the regions of memory starting at `src` and `dst` of size /// Checks whether the regions of memory starting at `src` and `dst` of size
/// `count * size_of::<T>()` do *not* overlap. /// `count * size_of::<T>()` do *not* overlap.
#[cfg(debug_assertions)] #[cfg(debug_assertions)]
pub(crate) fn is_nonoverlapping<T>(src: *const T, dst: *const T, count: usize) -> bool { pub(crate) fn is_nonoverlapping<T>(src: *const T, dst: *const T, count: usize) -> bool {
let src_usize = src as usize; let src_usize = src.addr();
let dst_usize = dst as usize; let dst_usize = dst.addr();
let size = mem::size_of::<T>().checked_mul(count).unwrap(); let size = mem::size_of::<T>().checked_mul(count).unwrap();
let diff = if src_usize > dst_usize { src_usize - dst_usize } else { dst_usize - src_usize }; let diff = if src_usize > dst_usize { src_usize - dst_usize } else { dst_usize - src_usize };
// If the absolute distance between the ptrs is at least as big as the size of the buffer, // If the absolute distance between the ptrs is at least as big as the size of the buffer,

View file

@ -150,6 +150,79 @@ impl<T: ?Sized> *const T {
bits as Self bits as Self
} }
/// Gets the "address" portion of the pointer.
///
/// This is equivalent to `self as usize`, which semantically discards
/// *provenance* and *address-space* information. To properly restore that information,
/// use [`with_addr`][pointer::with_addr] or [`map_addr`][pointer::map_addr].
///
/// On most platforms this will produce a value with the same bytes as the original
/// pointer, because all the bytes are dedicated to describing the address.
/// Platforms which need to store additional information in the pointer may
/// perform a change of representation to produce a value containing only the address
/// portion of the pointer. What that means is up to the platform to define.
///
/// This API and its claimed semantics are part of the Strict Provenance experiment,
/// see the [module documentation][crate::ptr] for details.
#[must_use]
#[inline]
#[unstable(feature = "strict_provenance", issue = "95228")]
pub fn addr(self) -> usize
where
T: Sized,
{
// FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
self as usize
}
/// Creates a new pointer with the given address.
///
/// This performs the same operation as an `addr as ptr` cast, but copies
/// the *address-space* and *provenance* of `self` to the new pointer.
/// This allows us to dynamically preserve and propagate this important
/// information in a way that is otherwise impossible with a unary cast.
///
/// This is equivalent to using [`wrapping_offset`][pointer::wrapping_offset] to offset
/// `self` to the given address, and therefore has all the same capabilities and restrictions.
///
/// This API and its claimed semantics are part of the Strict Provenance experiment,
/// see the [module documentation][crate::ptr] for details.
#[must_use]
#[inline]
#[unstable(feature = "strict_provenance", issue = "95228")]
pub fn with_addr(self, addr: usize) -> Self
where
T: Sized,
{
// FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
//
// In the mean-time, this operation is defined to be "as if" it was
// a wrapping_offset, so we can emulate it as such. This should properly
// restore pointer provenance even under today's compiler.
let self_addr = self.addr() as isize;
let dest_addr = addr as isize;
let offset = dest_addr.wrapping_sub(self_addr);
// This is the canonical desugarring of this operation
self.cast::<u8>().wrapping_offset(offset).cast::<T>()
}
/// Creates a new pointer by mapping `self`'s address to a new one.
///
/// This is a convenience for [`with_addr`][pointer::with_addr], see that method for details.
///
/// This API and its claimed semantics are part of the Strict Provenance experiment,
/// see the [module documentation][crate::ptr] for details.
#[must_use]
#[inline]
#[unstable(feature = "strict_provenance", issue = "95228")]
pub fn map_addr(self, f: impl FnOnce(usize) -> usize) -> Self
where
T: Sized,
{
self.with_addr(f(self.addr()))
}
/// Decompose a (possibly wide) pointer into its address and metadata components. /// Decompose a (possibly wide) pointer into its address and metadata components.
/// ///
/// The pointer can be later reconstructed with [`from_raw_parts`]. /// The pointer can be later reconstructed with [`from_raw_parts`].
@ -1006,7 +1079,7 @@ impl<T> *const [T] {
/// use std::ptr; /// use std::ptr;
/// ///
/// let slice: *const [i8] = ptr::slice_from_raw_parts(ptr::null(), 3); /// let slice: *const [i8] = ptr::slice_from_raw_parts(ptr::null(), 3);
/// assert_eq!(slice.as_ptr(), 0 as *const i8); /// assert_eq!(slice.as_ptr(), ptr::null());
/// ``` /// ```
#[inline] #[inline]
#[unstable(feature = "slice_ptr_get", issue = "74265")] #[unstable(feature = "slice_ptr_get", issue = "74265")]

View file

@ -63,12 +63,269 @@
//! separate allocated object), heap allocations (each allocation created by the global allocator is //! separate allocated object), heap allocations (each allocation created by the global allocator is
//! a separate allocated object), and `static` variables. //! a separate allocated object), and `static` variables.
//! //!
//!
//! # Strict Provenance
//!
//! **The following text is non-normative, insufficiently formal, and is an extremely strict
//! interpretation of provenance. It's ok if your code doesn't strictly conform to it.**
//!
//! [Strict Provenance][] is an experimental set of APIs that help tools that try
//! to validate the memory-safety of your program's execution. Notably this includes [miri][]
//! and [CHERI][], which can detect when you access out of bounds memory or otherwise violate
//! Rust's memory model.
//!
//! Provenance must exist in some form for any programming
//! language compiled for modern computer architectures, but specifying a model for provenance
//! in a way that is useful to both compilers and programmers is an ongoing challenge.
//! The [Strict Provenance][] experiment seeks to explore the question: *what if we just said you
//! couldn't do all the nasty operations that make provenance so messy?*
//!
//! What APIs would have to be removed? What APIs would have to be added? How much would code
//! have to change, and is it worse or better now? Would any patterns become truly inexpressible?
//! Could we carve out special exceptions for those patterns? Should we?
//!
//! A secondary goal of this project is to see if we can disamiguate the many functions of
//! pointer<->integer casts enough for the definition of `usize` to be loosened so that it
//! isn't *pointer*-sized but address-space/offset/allocation-sized (we'll probably continue
//! to conflate these notions). This would potentially make it possible to more efficiently
//! target platforms where pointers are larger than offsets, such as CHERI and maybe some
//! segmented architecures.
//!
//! ## Provenance
//!
//! **This section is *non-normative* and is part of the [Strict Provenance][] experiment.**
//!
//! Pointers are not *simply* an "integer" or "address". For instance, it's uncontroversial
//! to say that a Use After Free is clearly Undefined Behaviour, even if you "get lucky"
//! and the freed memory gets reallocated before your read/write (in fact this is the
//! worst-case scenario, UAFs would be much less concerning if this didn't happen!).
//! To rationalize this claim, pointers need to somehow be *more* than just their addresses:
//! they must have provenance.
//!
//! When an allocation is created, that allocation has a unique Original Pointer. For alloc
//! APIs this is literally the pointer the call returns, and for local variables and statics,
//! this is the name of the variable/static. This is mildly overloading the term "pointer"
//! for the sake of brevity/exposition.
//!
//! The Original Pointer for an allocation is guaranteed to have unique access to the entire
//! allocation and *only* that allocation. In this sense, an allocation can be thought of
//! as a "sandbox" that cannot be broken into or out of. *Provenance* is the permission
//! to access an allocation's sandbox and has both a *spatial* and *temporal* component:
//!
//! * Spatial: A range of bytes that the pointer is allowed to access.
//! * Temporal: The lifetime (of the allocation) that access to these bytes is tied to.
//!
//! Spatial provenance makes sure you don't go beyond your sandbox, while temporal provenance
//! makes sure that you can't "get lucky" after your permission to access some memory
//! has been revoked (either through deallocations or borrows expiring).
//!
//! Provenance is implicitly shared with all pointers transitively derived from
//! The Original Pointer through operations like [`offset`], borrowing, and pointer casts.
//! Some operations may *shrink* the derived provenance, limiting how much memory it can
//! access or how long it's valid for (i.e. borrowing a subfield and subslicing).
//!
//! Shrinking provenance cannot be undone: even if you "know" there is a larger allocation, you
//! can't derive a pointer with a larger provenance. Similarly, you cannot "recombine"
//! two contiguous provenances back into one (i.e. with a `fn merge(&[T], &[T]) -> &[T]`).
//!
//! A reference to a value always has provenance over exactly the memory that field occupies.
//! A reference to a slice always has provenance over exactly the range that slice describes.
//!
//! If an allocation is deallocated, all pointers with provenance to that allocation become
//! invalidated, and effectively lose their provenance.
//!
//! The strict provenance experiment is mostly only interested in exploring stricter *spatial*
//! provenance. In this sense it can be thought of as a subset of the more ambitious and
//! formal [Stacked Borrows][] research project, which is what tools like [miri][] are based on.
//! In particular, Stacked Borrows is necessary to properly describe what borrows are allowed
//! to do and when they become invalidated. This necessarily involves much more complex
//! *temporal* reasoning than simply identifying allocations. Adjusting APIs and code
//! for the strict provenance experiment will also greatly help Stacked Borrows.
//!
//!
//! ## Pointer Vs Addresses
//!
//! **This section is *non-normative* and is part of the [Strict Provenance][] experiment.**
//!
//! One of the largest historical issues with trying to define provenance is that programmers
//! freely convert between pointers and integers. Once you allow for this, it generally becomes
//! impossible to accurately track and preserve provenance information, and you need to appeal
//! to very complex and unreliable heuristics. But of course, converting between pointers and
//! integers is very useful, so what can we do?
//!
//! Also did you know WASM is actually a "Harvard Architecture"? As in function pointers are
//! handled completely differently from data pointers? And we kind of just shipped Rust on WASM
//! without really addressing the fact that we let you freely convert between function pointers
//! and data pointers, because it mostly Just Works? Let's just put that on the "pointer casts
//! are dubious" pile.
//!
//! Strict Provenance attempts to square these circles by decoupling Rust's traditional conflation
//! of pointers and `usize` (and `isize`), and defining a pointer to semantically contain the
//! following information:
//!
//! * The **address-space** it is part of (i.e. "data" vs "code" in WASM).
//! * The **address** it points to, which can be represented by a `usize`.
//! * The **provenance** it has, defining the memory it has permission to access.
//!
//! Under Strict Provenance, a usize *cannot* accurately represent a pointer, and converting from
//! a pointer to a usize is generally an operation which *only* extracts the address. It is
//! therefore *impossible* to construct a valid pointer from a usize because there is no way
//! to restore the address-space and provenance.
//!
//! The key insight to making this model *at all* viable is the [`with_addr`][] method:
//!
//! ```text
//! /// Creates a new pointer with the given address.
//! ///
//! /// This performs the same operation as an `addr as ptr` cast, but copies
//! /// the *address-space* and *provenance* of `self` to the new pointer.
//! /// This allows us to dynamically preserve and propagate this important
//! /// information in a way that is otherwise impossible with a unary cast.
//! ///
//! /// This is equivalent to using `wrapping_offset` to offset `self` to the
//! /// given address, and therefore has all the same capabilities and restrictions.
//! pub fn with_addr(self, addr: usize) -> Self;
//! ```
//!
//! So you're still able to drop down to the address representation and do whatever
//! clever bit tricks you want *as long as* you're able to keep around a pointer
//! into the allocation you care about that can "reconstitute" the other parts of the pointer.
//! Usually this is very easy, because you only are taking a pointer, messing with the address,
//! and then immediately converting back to a pointer. To make this use case more ergonomic,
//! we provide the [`map_addr`][] method.
//!
//! To help make it clear that code is "following" Strict Provenance semantics, we also
//! provide an [`addr`][] method which is currently equivalent to `ptr as usize`. In the
//! future we may provide a lint for pointer<->integer casts to help you audit if your
//! code conforms to strict provenance.
//!
//!
//! ## Using Strict Provenance
//!
//! Most code needs no changes to conform to strict provenance, as the only really concerning
//! operation that *wasn't* obviously already Undefined Behaviour is casts from usize to a
//! pointer. For code which *does* cast a usize to a pointer, the scope of the change depends
//! on exactly what you're doing.
//!
//! In general you just need to make sure that if you want to convert a usize address to a
//! pointer and then use that pointer to read/write memory, you need to keep around a pointer
//! that has sufficient provenance to perform that read/write itself. In this way all of your
//! casts from an address to a pointer are essentially just applying offsets/indexing.
//!
//! This is generally trivial to do for simple cases like tagged pointers *as long as you
//! represent the tagged pointer as an actual pointer and not a usize*. For instance:
//!
//! ```
//! #![feature(strict_provenance)]
//!
//! unsafe {
//! // A flag we want to pack into our pointer
//! static HAS_DATA: usize = 0x1;
//! static FLAG_MASK: usize = !HAS_DATA;
//!
//! // Our value, which must have enough alignment to have spare least-significant-bits.
//! let my_precious_data: u32 = 17;
//! assert!(core::mem::align_of::<u32>() > 1);
//!
//! // Create a tagged pointer
//! let ptr = &my_precious_data as *const u32;
//! let tagged = ptr.map_addr(|addr| addr | HAS_DATA);
//!
//! // Check the flag:
//! if tagged.addr() & HAS_DATA != 0 {
//! // Untag and read the pointer
//! let data = *tagged.map_addr(|addr| addr & FLAG_MASK);
//! assert_eq!(data, 17);
//! } else {
//! unreachable!()
//! }
//! }
//! ```
//!
//! (Yes, if you've been using AtomicUsize for pointers in concurrent datastructures, you should
//! be using AtomicPtr instead. If that messes up the way you atomically manipulate pointers,
//! we would like to know why, and what needs to be done to fix it.)
//!
//! Something more complicated and just generally *evil* like a XOR-List requires more significant
//! changes like allocating all nodes in a pre-allocated Vec or Arena and using a pointer
//! to the whole allocation to reconstitute the XORed addresses.
//!
//! Situations where a valid pointer *must* be created from just an address, such as baremetal code
//! accessing a memory-mapped interface at a fixed address, are an open question on how to support.
//! These situations *will* still be allowed, but we might require some kind of "I know what I'm
//! doing" annotation to explain the situation to the compiler. It's also possible they need no
//! special attention at all, because they're generally accessing memory outside the scope of
//! "the abstract machine", or already using "I know what I'm doing" annotations like "volatile".
//!
//! Under [Strict Provenance] is is Undefined Behaviour to:
//!
//! * Access memory through a pointer that does not have provenance over that memory.
//!
//! * [`offset`] a pointer to or from an address it doesn't have provenance over.
//! This means it's always UB to offset a pointer derived from something deallocated,
//! even if the offset is 0. Note that a pointer "one past the end" of its provenance
//! is not actually outside its provenance, it just has 0 bytes it can load/store.
//!
//! But it *is* still sound to:
//!
//! * Create an invalid pointer from just an address (see [`ptr::invalid`][]). This can
//! be used for sentinel values like `null` *or* to represent a tagged pointer that will
//! never be dereferencable. In general, it is always sound for an integer to pretend
//! to be a pointer "for fun" as long as you don't use operations on it which require
//! it to be valid (offset, read, write, etc).
//!
//! * Forge an allocation of size zero at any sufficiently aligned non-null address.
//! i.e. the usual "ZSTs are fake, do what you want" rules apply *but* this only applies
//! for actual forgery (integers cast to pointers). If you borrow some struct's field
//! that *happens* to be zero-sized, the resulting pointer will have provenance tied to
//! that allocation and it will still get invalidated if the allocation gets deallocated.
//! In the future we may introduce an API to make such a forged allocation explicit.
//!
//! * [`wrapping_offset`][] a pointer outside its provenance. This includes invalid pointers
//! which have "no" provenance. Unfortunately there may be practical limits on this for a
//! particular platform, and it's an open question as to how to specify this (if at all).
//! Notably, [CHERI][] relies on a compression scheme that can't handle a
//! pointer getting offset "too far" out of bounds. If this happens, the address
//! returned by `addr` will be the value you expect, but the provenance will get invalidated
//! and using it to read/write will fault. The details of this are architecture-specific
//! and based on alignment, but the buffer on either side of the pointer's range is pretty
//! generous (think kilobytes, not bytes).
//!
//! * Compare arbitrary pointers by address. Addresses *are* just integers and so there is
//! always a coherent answer, even if the pointers are invalid or from different
//! address-spaces/provenances. Of course, comparing addresses from different address-spaces
//! is generally going to be *meaningless*, but so is comparing Kilograms to Meters, and Rust
//! doesn't prevent that either. Similarly, if you get "lucky" and notice that a pointer
//! one-past-the-end is the "same" address as the start of an unrelated allocation, anything
//! you do with that fact is *probably* going to be gibberish. The scope of that gibberish
//! is kept under control by the fact that the two pointers *still* aren't allowed to access
//! the other's allocation (bytes), because they still have different provenance.
//!
//! * Perform pointer tagging tricks. This falls out of [`wrapping_offset`] but is worth
//! mentioning in more detail because of the limitations of [CHERI][]. Low-bit tagging
//! is very robust, and often doesn't even go out of bounds because types ensure
//! size >= align (and over-aligning actually gives CHERI more flexibility). Anything
//! more complex than this rapidly enters "extremely platform-specific" territory as
//! certain things may or may not be allowed based on specific supported operations.
//! For instance, ARM explicitly supports high-bit tagging, and so CHERI on ARM inherits
//! that and should support it.
//!
//!
//! [aliasing]: ../../nomicon/aliasing.html //! [aliasing]: ../../nomicon/aliasing.html
//! [book]: ../../book/ch19-01-unsafe-rust.html#dereferencing-a-raw-pointer //! [book]: ../../book/ch19-01-unsafe-rust.html#dereferencing-a-raw-pointer
//! [ub]: ../../reference/behavior-considered-undefined.html //! [ub]: ../../reference/behavior-considered-undefined.html
//! [zst]: ../../nomicon/exotic-sizes.html#zero-sized-types-zsts //! [zst]: ../../nomicon/exotic-sizes.html#zero-sized-types-zsts
//! [atomic operations]: crate::sync::atomic //! [atomic operations]: crate::sync::atomic
//! [`offset`]: pointer::offset //! [`offset`]: pointer::offset
//! [`wrapping_offset`]: pointer::wrapping_offset
//! [`with_addr`]: pointer::with_addr
//! [`map_addr`]: pointer::map_addr
//! [`addr`]: pointer::addr
//! [`ptr::invalid`]: core::ptr::invalid
//! [miri]: https://github.com/rust-lang/miri
//! [CHERI]: https://www.cl.cam.ac.uk/research/security/ctsrd/cheri/
//! [Strict Provenance]: https://github.com/rust-lang/rust/issues/95228
//! [Stacked Borrows]: https://plv.mpi-sws.org/rustbelt/stacked-borrows/
#![stable(feature = "rust1", since = "1.0.0")] #![stable(feature = "rust1", since = "1.0.0")]
@ -210,7 +467,7 @@ pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) {
#[rustc_const_stable(feature = "const_ptr_null", since = "1.24.0")] #[rustc_const_stable(feature = "const_ptr_null", since = "1.24.0")]
#[rustc_diagnostic_item = "ptr_null"] #[rustc_diagnostic_item = "ptr_null"]
pub const fn null<T>() -> *const T { pub const fn null<T>() -> *const T {
0 as *const T invalid(0)
} }
/// Creates a null mutable raw pointer. /// Creates a null mutable raw pointer.
@ -230,7 +487,61 @@ pub const fn null<T>() -> *const T {
#[rustc_const_stable(feature = "const_ptr_null", since = "1.24.0")] #[rustc_const_stable(feature = "const_ptr_null", since = "1.24.0")]
#[rustc_diagnostic_item = "ptr_null_mut"] #[rustc_diagnostic_item = "ptr_null_mut"]
pub const fn null_mut<T>() -> *mut T { pub const fn null_mut<T>() -> *mut T {
0 as *mut T invalid_mut(0)
}
/// Creates an invalid pointer with the given address.
///
/// This is *currently* equivalent to `addr as *const T` but it expresses the intended semantic
/// more clearly, and may become important under future memory models.
///
/// The module's top-level documentation discusses the precise meaning of an "invalid"
/// pointer but essentially this expresses that the pointer is not associated
/// with any actual allocation and is little more than a usize address in disguise.
///
/// This pointer will have no provenance associated with it and is therefore
/// UB to read/write/offset. This mostly exists to facilitate things
/// like ptr::null and NonNull::dangling which make invalid pointers.
///
/// (Standard "Zero-Sized-Types get to cheat and lie" caveats apply, although it
/// may be desirable to give them their own API just to make that 100% clear.)
///
/// This API and its claimed semantics are part of the Strict Provenance experiment,
/// see the [module documentation][crate::ptr] for details.
#[inline(always)]
#[must_use]
#[rustc_const_stable(feature = "strict_provenance", since = "1.61.0")]
#[unstable(feature = "strict_provenance", issue = "95228")]
pub const fn invalid<T>(addr: usize) -> *const T {
// FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
addr as *const T
}
/// Creates an invalid mutable pointer with the given address.
///
/// This is *currently* equivalent to `addr as *mut T` but it expresses the intended semantic
/// more clearly, and may become important under future memory models.
///
/// The module's top-level documentation discusses the precise meaning of an "invalid"
/// pointer but essentially this expresses that the pointer is not associated
/// with any actual allocation and is little more than a usize address in disguise.
///
/// This pointer will have no provenance associated with it and is therefore
/// UB to read/write/offset. This mostly exists to facilitate things
/// like ptr::null and NonNull::dangling which make invalid pointers.
///
/// (Standard "Zero-Sized-Types get to cheat and lie" caveats apply, although it
/// may be desirable to give them their own API just to make that 100% clear.)
///
/// This API and its claimed semantics are part of the Strict Provenance experiment,
/// see the [module documentation][crate::ptr] for details.
#[inline(always)]
#[must_use]
#[rustc_const_stable(feature = "strict_provenance", since = "1.61.0")]
#[unstable(feature = "strict_provenance", issue = "95228")]
pub const fn invalid_mut<T>(addr: usize) -> *mut T {
// FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
addr as *mut T
} }
/// Forms a raw slice from a pointer and a length. /// Forms a raw slice from a pointer and a length.
@ -1110,6 +1421,8 @@ pub(crate) unsafe fn align_offset<T: Sized>(p: *const T, a: usize) -> usize {
unchecked_shl, unchecked_shr, unchecked_sub, wrapping_add, wrapping_mul, wrapping_sub, unchecked_shl, unchecked_shr, unchecked_sub, wrapping_add, wrapping_mul, wrapping_sub,
}; };
let addr = p.addr();
/// Calculate multiplicative modular inverse of `x` modulo `m`. /// Calculate multiplicative modular inverse of `x` modulo `m`.
/// ///
/// This implementation is tailored for `align_offset` and has following preconditions: /// This implementation is tailored for `align_offset` and has following preconditions:
@ -1170,13 +1483,10 @@ pub(crate) unsafe fn align_offset<T: Sized>(p: *const T, a: usize) -> usize {
// //
// which distributes operations around the load-bearing, but pessimizing `and` sufficiently // which distributes operations around the load-bearing, but pessimizing `and` sufficiently
// for LLVM to be able to utilize the various optimizations it knows about. // for LLVM to be able to utilize the various optimizations it knows about.
return wrapping_sub( return wrapping_sub(wrapping_add(addr, a_minus_one) & wrapping_sub(0, a), addr);
wrapping_add(p as usize, a_minus_one) & wrapping_sub(0, a),
p as usize,
);
} }
let pmoda = p as usize & a_minus_one; let pmoda = addr & a_minus_one;
if pmoda == 0 { if pmoda == 0 {
// Already aligned. Yay! // Already aligned. Yay!
return 0; return 0;
@ -1193,7 +1503,7 @@ pub(crate) unsafe fn align_offset<T: Sized>(p: *const T, a: usize) -> usize {
let gcd = unsafe { unchecked_shl(1usize, gcdpow) }; let gcd = unsafe { unchecked_shl(1usize, gcdpow) };
// SAFETY: gcd is always greater or equal to 1. // SAFETY: gcd is always greater or equal to 1.
if p as usize & unsafe { unchecked_sub(gcd, 1) } == 0 { if addr & unsafe { unchecked_sub(gcd, 1) } == 0 {
// This branch solves for the following linear congruence equation: // This branch solves for the following linear congruence equation:
// //
// ` p + so = 0 mod a ` // ` p + so = 0 mod a `
@ -1347,6 +1657,11 @@ pub fn hash<T: ?Sized, S: hash::Hasher>(hashee: *const T, into: &mut S) {
hashee.hash(into); hashee.hash(into);
} }
// FIXME(strict_provenance_magic): function pointers have buggy codegen that
// necessitates casting to a usize to get the backend to do the right thing.
// for now I will break AVR to silence *a billion* lints. We should probably
// have a proper "opaque function pointer type" to handle this kind of thing.
// Impls for function pointers // Impls for function pointers
macro_rules! fnptr_impls_safety_abi { macro_rules! fnptr_impls_safety_abi {
($FnTy: ty, $($Arg: ident),*) => { ($FnTy: ty, $($Arg: ident),*) => {

View file

@ -154,6 +154,79 @@ impl<T: ?Sized> *mut T {
bits as Self bits as Self
} }
/// Gets the "address" portion of the pointer.
///
/// This is equivalent to `self as usize`, which semantically discards
/// *provenance* and *address-space* information. To properly restore that information,
/// use [`with_addr`][pointer::with_addr] or [`map_addr`][pointer::map_addr].
///
/// On most platforms this will produce a value with the same bytes as the original
/// pointer, because all the bytes are dedicated to describing the address.
/// Platforms which need to store additional information in the pointer may
/// perform a change of representation to produce a value containing only the address
/// portion of the pointer. What that means is up to the platform to define.
///
/// This API and its claimed semantics are part of the Strict Provenance experiment,
/// see the [module documentation][crate::ptr] for details.
#[must_use]
#[inline]
#[unstable(feature = "strict_provenance", issue = "95228")]
pub fn addr(self) -> usize
where
T: Sized,
{
// FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
self as usize
}
/// Creates a new pointer with the given address.
///
/// This performs the same operation as an `addr as ptr` cast, but copies
/// the *address-space* and *provenance* of `self` to the new pointer.
/// This allows us to dynamically preserve and propagate this important
/// information in a way that is otherwise impossible with a unary cast.
///
/// This is equivalent to using [`wrapping_offset`][pointer::wrapping_offset] to offset
/// `self` to the given address, and therefore has all the same capabilities and restrictions.
///
/// This API and its claimed semantics are part of the Strict Provenance experiment,
/// see the [module documentation][crate::ptr] for details.
#[must_use]
#[inline]
#[unstable(feature = "strict_provenance", issue = "95228")]
pub fn with_addr(self, addr: usize) -> Self
where
T: Sized,
{
// FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
//
// In the mean-time, this operation is defined to be "as if" it was
// a wrapping_offset, so we can emulate it as such. This should properly
// restore pointer provenance even under today's compiler.
let self_addr = self.addr() as isize;
let dest_addr = addr as isize;
let offset = dest_addr.wrapping_sub(self_addr);
// This is the canonical desugarring of this operation
self.cast::<u8>().wrapping_offset(offset).cast::<T>()
}
/// Creates a new pointer by mapping `self`'s address to a new one.
///
/// This is a convenience for [`with_addr`][pointer::with_addr], see that method for details.
///
/// This API and its claimed semantics are part of the Strict Provenance experiment,
/// see the [module documentation][crate::ptr] for details.
#[must_use]
#[inline]
#[unstable(feature = "strict_provenance", issue = "95228")]
pub fn map_addr(self, f: impl FnOnce(usize) -> usize) -> Self
where
T: Sized,
{
self.with_addr(f(self.addr()))
}
/// Decompose a (possibly wide) pointer into its address and metadata components. /// Decompose a (possibly wide) pointer into its address and metadata components.
/// ///
/// The pointer can be later reconstructed with [`from_raw_parts_mut`]. /// The pointer can be later reconstructed with [`from_raw_parts_mut`].
@ -1276,7 +1349,7 @@ impl<T> *mut [T] {
/// use std::ptr; /// use std::ptr;
/// ///
/// let slice: *mut [i8] = ptr::slice_from_raw_parts_mut(ptr::null_mut(), 3); /// let slice: *mut [i8] = ptr::slice_from_raw_parts_mut(ptr::null_mut(), 3);
/// assert_eq!(slice.as_mut_ptr(), 0 as *mut i8); /// assert_eq!(slice.as_mut_ptr(), ptr::null_mut());
/// ``` /// ```
#[inline(always)] #[inline(always)]
#[unstable(feature = "slice_ptr_get", issue = "74265")] #[unstable(feature = "slice_ptr_get", issue = "74265")]

View file

@ -90,7 +90,7 @@ impl<T: Sized> NonNull<T> {
// to a *mut T. Therefore, `ptr` is not null and the conditions for // to a *mut T. Therefore, `ptr` is not null and the conditions for
// calling new_unchecked() are respected. // calling new_unchecked() are respected.
unsafe { unsafe {
let ptr = mem::align_of::<T>() as *mut T; let ptr = crate::ptr::invalid_mut::<T>(mem::align_of::<T>());
NonNull::new_unchecked(ptr) NonNull::new_unchecked(ptr)
} }
} }
@ -469,7 +469,7 @@ impl<T> NonNull<[T]> {
/// use std::ptr::NonNull; /// use std::ptr::NonNull;
/// ///
/// let slice: NonNull<[i8]> = NonNull::slice_from_raw_parts(NonNull::dangling(), 3); /// let slice: NonNull<[i8]> = NonNull::slice_from_raw_parts(NonNull::dangling(), 3);
/// assert_eq!(slice.as_non_null_ptr(), NonNull::new(1 as *mut i8).unwrap()); /// assert_eq!(slice.as_non_null_ptr(), NonNull::<i8>::dangling());
/// ``` /// ```
#[inline] #[inline]
#[must_use] #[must_use]
@ -489,7 +489,7 @@ impl<T> NonNull<[T]> {
/// use std::ptr::NonNull; /// use std::ptr::NonNull;
/// ///
/// let slice: NonNull<[i8]> = NonNull::slice_from_raw_parts(NonNull::dangling(), 3); /// let slice: NonNull<[i8]> = NonNull::slice_from_raw_parts(NonNull::dangling(), 3);
/// assert_eq!(slice.as_mut_ptr(), 1 as *mut i8); /// assert_eq!(slice.as_mut_ptr(), NonNull::<i8>::dangling().as_ptr());
/// ``` /// ```
#[inline] #[inline]
#[must_use] #[must_use]

View file

@ -73,7 +73,7 @@ impl<T: Sized> Unique<T> {
pub const fn dangling() -> Self { pub const fn dangling() -> Self {
// SAFETY: mem::align_of() returns a valid, non-null pointer. The // SAFETY: mem::align_of() returns a valid, non-null pointer. The
// conditions to call new_unchecked() are thus respected. // conditions to call new_unchecked() are thus respected.
unsafe { Unique::new_unchecked(mem::align_of::<T>() as *mut T) } unsafe { Unique::new_unchecked(crate::ptr::invalid_mut::<T>(mem::align_of::<T>())) }
} }
} }

View file

@ -294,7 +294,7 @@ fn is_ascii(s: &[u8]) -> bool {
// Paranoia check about alignment, since we're about to do a bunch of // Paranoia check about alignment, since we're about to do a bunch of
// unaligned loads. In practice this should be impossible barring a bug in // unaligned loads. In practice this should be impossible barring a bug in
// `align_offset` though. // `align_offset` though.
debug_assert_eq!((word_ptr as usize) % mem::align_of::<usize>(), 0); debug_assert_eq!(word_ptr.addr() % mem::align_of::<usize>(), 0);
// Read subsequent words until the last aligned word, excluding the last // Read subsequent words until the last aligned word, excluding the last
// aligned word by itself to be done in tail check later, to ensure that // aligned word by itself to be done in tail check later, to ensure that
@ -302,9 +302,9 @@ fn is_ascii(s: &[u8]) -> bool {
while byte_pos < len - USIZE_SIZE { while byte_pos < len - USIZE_SIZE {
debug_assert!( debug_assert!(
// Sanity check that the read is in bounds // Sanity check that the read is in bounds
(word_ptr as usize + USIZE_SIZE) <= (start.wrapping_add(len) as usize) && (word_ptr.addr() + USIZE_SIZE) <= start.addr().wrapping_add(len) &&
// And that our assumptions about `byte_pos` hold. // And that our assumptions about `byte_pos` hold.
(word_ptr as usize) - (start as usize) == byte_pos (word_ptr.addr() - start.addr()) == byte_pos
); );
// SAFETY: We know `word_ptr` is properly aligned (because of // SAFETY: We know `word_ptr` is properly aligned (because of

View file

@ -20,13 +20,13 @@ macro_rules! len {
if size == 0 { if size == 0 {
// This _cannot_ use `unchecked_sub` because we depend on wrapping // This _cannot_ use `unchecked_sub` because we depend on wrapping
// to represent the length of long ZST slice iterators. // to represent the length of long ZST slice iterators.
($self.end as usize).wrapping_sub(start.as_ptr() as usize) $self.end.addr().wrapping_sub(start.as_ptr().addr())
} else { } else {
// We know that `start <= end`, so can do better than `offset_from`, // We know that `start <= end`, so can do better than `offset_from`,
// which needs to deal in signed. By setting appropriate flags here // which needs to deal in signed. By setting appropriate flags here
// we can tell LLVM this, which helps it remove bounds checks. // we can tell LLVM this, which helps it remove bounds checks.
// SAFETY: By the type invariant, `start <= end` // SAFETY: By the type invariant, `start <= end`
let diff = unsafe { unchecked_sub($self.end as usize, start.as_ptr() as usize) }; let diff = unsafe { unchecked_sub($self.end.addr(), start.as_ptr().addr()) };
// By also telling LLVM that the pointers are apart by an exact // By also telling LLVM that the pointers are apart by an exact
// multiple of the type size, it can optimize `len() == 0` down to // multiple of the type size, it can optimize `len() == 0` down to
// `start == end` instead of `(end - start) < size`. // `start == end` instead of `(end - start) < size`.

View file

@ -269,7 +269,9 @@ where
// Returns the number of elements between pointers `l` (inclusive) and `r` (exclusive). // Returns the number of elements between pointers `l` (inclusive) and `r` (exclusive).
fn width<T>(l: *mut T, r: *mut T) -> usize { fn width<T>(l: *mut T, r: *mut T) -> usize {
assert!(mem::size_of::<T>() > 0); assert!(mem::size_of::<T>() > 0);
(r as usize - l as usize) / mem::size_of::<T>() // FIXME: this should *likely* use `offset_from`, but more
// investigation is needed (including running tests in miri).
(r.addr() - l.addr()) / mem::size_of::<T>()
} }
loop { loop {

View file

@ -330,7 +330,7 @@ impl Backtrace {
frame: RawFrame::Actual(frame.clone()), frame: RawFrame::Actual(frame.clone()),
symbols: Vec::new(), symbols: Vec::new(),
}); });
if frame.symbol_address() as usize == ip && actual_start.is_none() { if frame.symbol_address().addr() == ip && actual_start.is_none() {
actual_start = Some(frames.len()); actual_start = Some(frames.len());
} }
true true
@ -493,7 +493,7 @@ impl RawFrame {
match self { match self {
RawFrame::Actual(frame) => frame.ip(), RawFrame::Actual(frame) => frame.ip(),
#[cfg(test)] #[cfg(test)]
RawFrame::Fake => 1 as *mut c_void, RawFrame::Fake => crate::ptr::invalid_mut(1),
} }
} }
} }

View file

@ -106,7 +106,7 @@ use super::{Custom, ErrorData, ErrorKind, SimpleMessage};
use alloc::boxed::Box; use alloc::boxed::Box;
use core::marker::PhantomData; use core::marker::PhantomData;
use core::mem::{align_of, size_of}; use core::mem::{align_of, size_of};
use core::ptr::NonNull; use core::ptr::{self, NonNull};
// The 2 least-significant bits are used as tag. // The 2 least-significant bits are used as tag.
const TAG_MASK: usize = 0b11; const TAG_MASK: usize = 0b11;
@ -136,7 +136,7 @@ impl Repr {
let p = Box::into_raw(b).cast::<u8>(); let p = Box::into_raw(b).cast::<u8>();
// Should only be possible if an allocator handed out a pointer with // Should only be possible if an allocator handed out a pointer with
// wrong alignment. // wrong alignment.
debug_assert_eq!((p as usize & TAG_MASK), 0); debug_assert_eq!(p.addr() & TAG_MASK, 0);
// Note: We know `TAG_CUSTOM <= size_of::<Custom>()` (static_assert at // Note: We know `TAG_CUSTOM <= size_of::<Custom>()` (static_assert at
// end of file), and both the start and end of the expression must be // end of file), and both the start and end of the expression must be
// valid without address space wraparound due to `Box`'s semantics. // valid without address space wraparound due to `Box`'s semantics.
@ -166,7 +166,7 @@ impl Repr {
pub(super) fn new_os(code: i32) -> Self { pub(super) fn new_os(code: i32) -> Self {
let utagged = ((code as usize) << 32) | TAG_OS; let utagged = ((code as usize) << 32) | TAG_OS;
// Safety: `TAG_OS` is not zero, so the result of the `|` is not 0. // Safety: `TAG_OS` is not zero, so the result of the `|` is not 0.
let res = Self(unsafe { NonNull::new_unchecked(utagged as *mut ()) }, PhantomData); let res = Self(unsafe { NonNull::new_unchecked(ptr::invalid_mut(utagged)) }, PhantomData);
// quickly smoke-check we encoded the right thing (This generally will // quickly smoke-check we encoded the right thing (This generally will
// only run in libstd's tests, unless the user uses -Zbuild-std) // only run in libstd's tests, unless the user uses -Zbuild-std)
debug_assert!( debug_assert!(
@ -180,7 +180,7 @@ impl Repr {
pub(super) fn new_simple(kind: ErrorKind) -> Self { pub(super) fn new_simple(kind: ErrorKind) -> Self {
let utagged = ((kind as usize) << 32) | TAG_SIMPLE; let utagged = ((kind as usize) << 32) | TAG_SIMPLE;
// Safety: `TAG_SIMPLE` is not zero, so the result of the `|` is not 0. // Safety: `TAG_SIMPLE` is not zero, so the result of the `|` is not 0.
let res = Self(unsafe { NonNull::new_unchecked(utagged as *mut ()) }, PhantomData); let res = Self(unsafe { NonNull::new_unchecked(ptr::invalid_mut(utagged)) }, PhantomData);
// quickly smoke-check we encoded the right thing (This generally will // quickly smoke-check we encoded the right thing (This generally will
// only run in libstd's tests, unless the user uses -Zbuild-std) // only run in libstd's tests, unless the user uses -Zbuild-std)
debug_assert!( debug_assert!(
@ -238,7 +238,7 @@ unsafe fn decode_repr<C, F>(ptr: NonNull<()>, make_custom: F) -> ErrorData<C>
where where
F: FnOnce(*mut Custom) -> C, F: FnOnce(*mut Custom) -> C,
{ {
let bits = ptr.as_ptr() as usize; let bits = ptr.as_ptr().addr();
match bits & TAG_MASK { match bits & TAG_MASK {
TAG_OS => { TAG_OS => {
let code = ((bits as i64) >> 32) as i32; let code = ((bits as i64) >> 32) as i32;

View file

@ -275,6 +275,7 @@
#![feature(extend_one)] #![feature(extend_one)]
#![feature(float_minimum_maximum)] #![feature(float_minimum_maximum)]
#![feature(format_args_nl)] #![feature(format_args_nl)]
#![feature(strict_provenance)]
#![feature(get_mut_unchecked)] #![feature(get_mut_unchecked)]
#![feature(hashmap_internals)] #![feature(hashmap_internals)]
#![feature(int_error_internals)] #![feature(int_error_internals)]

View file

@ -17,8 +17,8 @@ mod libc {
fn sun_path_offset(addr: &libc::sockaddr_un) -> usize { fn sun_path_offset(addr: &libc::sockaddr_un) -> usize {
// Work with an actual instance of the type since using a null pointer is UB // Work with an actual instance of the type since using a null pointer is UB
let base = addr as *const _ as usize; let base = (addr as *const libc::sockaddr_un).addr();
let path = &addr.sun_path as *const _ as usize; let path = (&addr.sun_path as *const libc::c_char).addr();
path - base path - base
} }

View file

@ -9,6 +9,7 @@ use crate::fs;
use crate::io; use crate::io;
use crate::marker::PhantomData; use crate::marker::PhantomData;
use crate::mem::forget; use crate::mem::forget;
use crate::ptr;
use crate::sys::c; use crate::sys::c;
use crate::sys::cvt; use crate::sys::cvt;
use crate::sys_common::{AsInner, FromInner, IntoInner}; use crate::sys_common::{AsInner, FromInner, IntoInner};
@ -182,7 +183,7 @@ impl OwnedHandle {
return unsafe { Ok(Self::from_raw_handle(handle)) }; return unsafe { Ok(Self::from_raw_handle(handle)) };
} }
let mut ret = 0 as c::HANDLE; let mut ret = ptr::null_mut();
cvt(unsafe { cvt(unsafe {
let cur_proc = c::GetCurrentProcess(); let cur_proc = c::GetCurrentProcess();
c::DuplicateHandle( c::DuplicateHandle(

View file

@ -129,6 +129,7 @@ impl OwnedSocket {
} }
} }
// FIXME(strict_provenance_magic): we defined RawSocket to be a u64 ;-;
#[cfg(not(target_vendor = "uwp"))] #[cfg(not(target_vendor = "uwp"))]
pub(crate) fn set_no_inherit(&self) -> io::Result<()> { pub(crate) fn set_no_inherit(&self) -> io::Result<()> {
cvt(unsafe { cvt(unsafe {

View file

@ -1449,8 +1449,8 @@ impl PathBuf {
}; };
// truncate until right after the file stem // truncate until right after the file stem
let end_file_stem = file_stem[file_stem.len()..].as_ptr() as usize; let end_file_stem = file_stem[file_stem.len()..].as_ptr().addr();
let start = os_str_as_u8_slice(&self.inner).as_ptr() as usize; let start = os_str_as_u8_slice(&self.inner).as_ptr().addr();
let v = self.as_mut_vec(); let v = self.as_mut_vec();
v.truncate(end_file_stem.wrapping_sub(start)); v.truncate(end_file_stem.wrapping_sub(start));

View file

@ -91,9 +91,12 @@ use crate::cell::Cell;
use crate::fmt; use crate::fmt;
use crate::marker; use crate::marker;
use crate::panic::{RefUnwindSafe, UnwindSafe}; use crate::panic::{RefUnwindSafe, UnwindSafe};
use crate::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; use crate::ptr;
use crate::sync::atomic::{AtomicBool, AtomicPtr, Ordering};
use crate::thread::{self, Thread}; use crate::thread::{self, Thread};
type Masked = ();
/// A synchronization primitive which can be used to run a one-time global /// A synchronization primitive which can be used to run a one-time global
/// initialization. Useful for one-time initialization for FFI or related /// initialization. Useful for one-time initialization for FFI or related
/// functionality. This type can only be constructed with [`Once::new()`]. /// functionality. This type can only be constructed with [`Once::new()`].
@ -113,7 +116,7 @@ use crate::thread::{self, Thread};
pub struct Once { pub struct Once {
// `state_and_queue` is actually a pointer to a `Waiter` with extra state // `state_and_queue` is actually a pointer to a `Waiter` with extra state
// bits, so we add the `PhantomData` appropriately. // bits, so we add the `PhantomData` appropriately.
state_and_queue: AtomicUsize, state_and_queue: AtomicPtr<Masked>,
_marker: marker::PhantomData<*const Waiter>, _marker: marker::PhantomData<*const Waiter>,
} }
@ -136,7 +139,7 @@ impl RefUnwindSafe for Once {}
#[derive(Debug)] #[derive(Debug)]
pub struct OnceState { pub struct OnceState {
poisoned: bool, poisoned: bool,
set_state_on_drop_to: Cell<usize>, set_state_on_drop_to: Cell<*mut Masked>,
} }
/// Initialization value for static [`Once`] values. /// Initialization value for static [`Once`] values.
@ -184,8 +187,8 @@ struct Waiter {
// Every node is a struct on the stack of a waiting thread. // Every node is a struct on the stack of a waiting thread.
// Will wake up the waiters when it gets dropped, i.e. also on panic. // Will wake up the waiters when it gets dropped, i.e. also on panic.
struct WaiterQueue<'a> { struct WaiterQueue<'a> {
state_and_queue: &'a AtomicUsize, state_and_queue: &'a AtomicPtr<Masked>,
set_state_on_drop_to: usize, set_state_on_drop_to: *mut Masked,
} }
impl Once { impl Once {
@ -195,7 +198,10 @@ impl Once {
#[rustc_const_stable(feature = "const_once_new", since = "1.32.0")] #[rustc_const_stable(feature = "const_once_new", since = "1.32.0")]
#[must_use] #[must_use]
pub const fn new() -> Once { pub const fn new() -> Once {
Once { state_and_queue: AtomicUsize::new(INCOMPLETE), _marker: marker::PhantomData } Once {
state_and_queue: AtomicPtr::new(ptr::invalid_mut(INCOMPLETE)),
_marker: marker::PhantomData,
}
} }
/// Performs an initialization routine once and only once. The given closure /// Performs an initialization routine once and only once. The given closure
@ -376,7 +382,7 @@ impl Once {
// operations visible to us, and, this being a fast path, weaker // operations visible to us, and, this being a fast path, weaker
// ordering helps with performance. This `Acquire` synchronizes with // ordering helps with performance. This `Acquire` synchronizes with
// `Release` operations on the slow path. // `Release` operations on the slow path.
self.state_and_queue.load(Ordering::Acquire) == COMPLETE self.state_and_queue.load(Ordering::Acquire).addr() == COMPLETE
} }
// This is a non-generic function to reduce the monomorphization cost of // This is a non-generic function to reduce the monomorphization cost of
@ -395,7 +401,7 @@ impl Once {
fn call_inner(&self, ignore_poisoning: bool, init: &mut dyn FnMut(&OnceState)) { fn call_inner(&self, ignore_poisoning: bool, init: &mut dyn FnMut(&OnceState)) {
let mut state_and_queue = self.state_and_queue.load(Ordering::Acquire); let mut state_and_queue = self.state_and_queue.load(Ordering::Acquire);
loop { loop {
match state_and_queue { match state_and_queue.addr() {
COMPLETE => break, COMPLETE => break,
POISONED if !ignore_poisoning => { POISONED if !ignore_poisoning => {
// Panic to propagate the poison. // Panic to propagate the poison.
@ -405,7 +411,7 @@ impl Once {
// Try to register this thread as the one RUNNING. // Try to register this thread as the one RUNNING.
let exchange_result = self.state_and_queue.compare_exchange( let exchange_result = self.state_and_queue.compare_exchange(
state_and_queue, state_and_queue,
RUNNING, ptr::invalid_mut(RUNNING),
Ordering::Acquire, Ordering::Acquire,
Ordering::Acquire, Ordering::Acquire,
); );
@ -417,13 +423,13 @@ impl Once {
// wake them up on drop. // wake them up on drop.
let mut waiter_queue = WaiterQueue { let mut waiter_queue = WaiterQueue {
state_and_queue: &self.state_and_queue, state_and_queue: &self.state_and_queue,
set_state_on_drop_to: POISONED, set_state_on_drop_to: ptr::invalid_mut(POISONED),
}; };
// Run the initialization function, letting it know if we're // Run the initialization function, letting it know if we're
// poisoned or not. // poisoned or not.
let init_state = OnceState { let init_state = OnceState {
poisoned: state_and_queue == POISONED, poisoned: state_and_queue.addr() == POISONED,
set_state_on_drop_to: Cell::new(COMPLETE), set_state_on_drop_to: Cell::new(ptr::invalid_mut(COMPLETE)),
}; };
init(&init_state); init(&init_state);
waiter_queue.set_state_on_drop_to = init_state.set_state_on_drop_to.get(); waiter_queue.set_state_on_drop_to = init_state.set_state_on_drop_to.get();
@ -432,7 +438,7 @@ impl Once {
_ => { _ => {
// All other values must be RUNNING with possibly a // All other values must be RUNNING with possibly a
// pointer to the waiter queue in the more significant bits. // pointer to the waiter queue in the more significant bits.
assert!(state_and_queue & STATE_MASK == RUNNING); assert!(state_and_queue.addr() & STATE_MASK == RUNNING);
wait(&self.state_and_queue, state_and_queue); wait(&self.state_and_queue, state_and_queue);
state_and_queue = self.state_and_queue.load(Ordering::Acquire); state_and_queue = self.state_and_queue.load(Ordering::Acquire);
} }
@ -441,13 +447,13 @@ impl Once {
} }
} }
fn wait(state_and_queue: &AtomicUsize, mut current_state: usize) { fn wait(state_and_queue: &AtomicPtr<Masked>, mut current_state: *mut Masked) {
// Note: the following code was carefully written to avoid creating a // Note: the following code was carefully written to avoid creating a
// mutable reference to `node` that gets aliased. // mutable reference to `node` that gets aliased.
loop { loop {
// Don't queue this thread if the status is no longer running, // Don't queue this thread if the status is no longer running,
// otherwise we will not be woken up. // otherwise we will not be woken up.
if current_state & STATE_MASK != RUNNING { if current_state.addr() & STATE_MASK != RUNNING {
return; return;
} }
@ -455,15 +461,15 @@ fn wait(state_and_queue: &AtomicUsize, mut current_state: usize) {
let node = Waiter { let node = Waiter {
thread: Cell::new(Some(thread::current())), thread: Cell::new(Some(thread::current())),
signaled: AtomicBool::new(false), signaled: AtomicBool::new(false),
next: (current_state & !STATE_MASK) as *const Waiter, next: current_state.with_addr(current_state.addr() & !STATE_MASK) as *const Waiter,
}; };
let me = &node as *const Waiter as usize; let me = &node as *const Waiter as *const Masked as *mut Masked;
// Try to slide in the node at the head of the linked list, making sure // Try to slide in the node at the head of the linked list, making sure
// that another thread didn't just replace the head of the linked list. // that another thread didn't just replace the head of the linked list.
let exchange_result = state_and_queue.compare_exchange( let exchange_result = state_and_queue.compare_exchange(
current_state, current_state,
me | RUNNING, me.with_addr(me.addr() | RUNNING),
Ordering::Release, Ordering::Release,
Ordering::Relaxed, Ordering::Relaxed,
); );
@ -502,7 +508,7 @@ impl Drop for WaiterQueue<'_> {
self.state_and_queue.swap(self.set_state_on_drop_to, Ordering::AcqRel); self.state_and_queue.swap(self.set_state_on_drop_to, Ordering::AcqRel);
// We should only ever see an old state which was RUNNING. // We should only ever see an old state which was RUNNING.
assert_eq!(state_and_queue & STATE_MASK, RUNNING); assert_eq!(state_and_queue.addr() & STATE_MASK, RUNNING);
// Walk the entire linked list of waiters and wake them up (in lifo // Walk the entire linked list of waiters and wake them up (in lifo
// order, last to register is first to wake up). // order, last to register is first to wake up).
@ -511,7 +517,8 @@ impl Drop for WaiterQueue<'_> {
// free `node` if there happens to be has a spurious wakeup. // free `node` if there happens to be has a spurious wakeup.
// So we have to take out the `thread` field and copy the pointer to // So we have to take out the `thread` field and copy the pointer to
// `next` first. // `next` first.
let mut queue = (state_and_queue & !STATE_MASK) as *const Waiter; let mut queue =
state_and_queue.with_addr(state_and_queue.addr() & !STATE_MASK) as *const Waiter;
while !queue.is_null() { while !queue.is_null() {
let next = (*queue).next; let next = (*queue).next;
let thread = (*queue).thread.take().unwrap(); let thread = (*queue).thread.take().unwrap();
@ -568,6 +575,6 @@ impl OnceState {
/// Poison the associated [`Once`] without explicitly panicking. /// Poison the associated [`Once`] without explicitly panicking.
// NOTE: This is currently only exposed for the `lazy` module // NOTE: This is currently only exposed for the `lazy` module
pub(crate) fn poison(&self) { pub(crate) fn poison(&self) {
self.set_state_on_drop_to.set(POISONED); self.set_state_on_drop_to.set(ptr::invalid_mut(POISONED));
} }
} }

View file

@ -9,7 +9,7 @@ pub fn memchr(needle: u8, haystack: &[u8]) -> Option<usize> {
haystack.len(), haystack.len(),
) )
}; };
if p.is_null() { None } else { Some(p as usize - (haystack.as_ptr() as usize)) } if p.is_null() { None } else { Some(p.addr() - haystack.as_ptr().addr()) }
} }
pub fn memrchr(needle: u8, haystack: &[u8]) -> Option<usize> { pub fn memrchr(needle: u8, haystack: &[u8]) -> Option<usize> {
@ -26,7 +26,9 @@ pub fn memrchr(needle: u8, haystack: &[u8]) -> Option<usize> {
haystack.len(), haystack.len(),
) )
}; };
if p.is_null() { None } else { Some(p as usize - (haystack.as_ptr() as usize)) } // FIXME: this should *likely* use `offset_from`, but more
// investigation is needed (including running tests in miri).
if p.is_null() { None } else { Some(p.addr() - haystack.as_ptr().addr()) }
} }
#[cfg(not(target_os = "linux"))] #[cfg(not(target_os = "linux"))]

View file

@ -505,9 +505,8 @@ pub mod guard {
#[cfg(target_os = "macos")] #[cfg(target_os = "macos")]
unsafe fn get_stack_start() -> Option<*mut libc::c_void> { unsafe fn get_stack_start() -> Option<*mut libc::c_void> {
let th = libc::pthread_self(); let th = libc::pthread_self();
let stackaddr = let stackptr = libc::pthread_get_stackaddr_np(th);
libc::pthread_get_stackaddr_np(th) as usize - libc::pthread_get_stacksize_np(th); Some(stackptr.map_addr(|addr| addr - libc::pthread_get_stacksize_np(th)))
Some(stackaddr as *mut libc::c_void)
} }
#[cfg(target_os = "openbsd")] #[cfg(target_os = "openbsd")]
@ -515,14 +514,15 @@ pub mod guard {
let mut current_stack: libc::stack_t = crate::mem::zeroed(); let mut current_stack: libc::stack_t = crate::mem::zeroed();
assert_eq!(libc::pthread_stackseg_np(libc::pthread_self(), &mut current_stack), 0); assert_eq!(libc::pthread_stackseg_np(libc::pthread_self(), &mut current_stack), 0);
let stack_ptr = current_stack.ss_sp;
let stackaddr = if libc::pthread_main_np() == 1 { let stackaddr = if libc::pthread_main_np() == 1 {
// main thread // main thread
current_stack.ss_sp as usize - current_stack.ss_size + PAGE_SIZE.load(Ordering::Relaxed) stack_ptr.addr() - current_stack.ss_size + PAGE_SIZE.load(Ordering::Relaxed)
} else { } else {
// new thread // new thread
current_stack.ss_sp as usize - current_stack.ss_size stack_ptr.addr() - current_stack.ss_size
}; };
Some(stackaddr as *mut libc::c_void) Some(stack_ptr.with_addr(stack_addr))
} }
#[cfg(any( #[cfg(any(
@ -557,7 +557,8 @@ pub mod guard {
unsafe fn get_stack_start_aligned() -> Option<*mut libc::c_void> { unsafe fn get_stack_start_aligned() -> Option<*mut libc::c_void> {
let page_size = PAGE_SIZE.load(Ordering::Relaxed); let page_size = PAGE_SIZE.load(Ordering::Relaxed);
assert!(page_size != 0); assert!(page_size != 0);
let stackaddr = get_stack_start()?; let stackptr = get_stack_start()?;
let stackaddr = stackptr.addr();
// Ensure stackaddr is page aligned! A parent process might // Ensure stackaddr is page aligned! A parent process might
// have reset RLIMIT_STACK to be non-page aligned. The // have reset RLIMIT_STACK to be non-page aligned. The
@ -565,11 +566,11 @@ pub mod guard {
// stackaddr < stackaddr + stacksize, so if stackaddr is not // stackaddr < stackaddr + stacksize, so if stackaddr is not
// page-aligned, calculate the fix such that stackaddr < // page-aligned, calculate the fix such that stackaddr <
// new_page_aligned_stackaddr < stackaddr + stacksize // new_page_aligned_stackaddr < stackaddr + stacksize
let remainder = (stackaddr as usize) % page_size; let remainder = stackaddr % page_size;
Some(if remainder == 0 { Some(if remainder == 0 {
stackaddr stackptr
} else { } else {
((stackaddr as usize) + page_size - remainder) as *mut libc::c_void stackptr.with_addr(stackaddr + page_size - remainder)
}) })
} }
@ -588,8 +589,8 @@ pub mod guard {
// Instead, we'll just note where we expect rlimit to start // Instead, we'll just note where we expect rlimit to start
// faulting, so our handler can report "stack overflow", and // faulting, so our handler can report "stack overflow", and
// trust that the kernel's own stack guard will work. // trust that the kernel's own stack guard will work.
let stackaddr = get_stack_start_aligned()?; let stackptr = get_stack_start_aligned()?;
let stackaddr = stackaddr as usize; let stackaddr = stackptr.addr();
Some(stackaddr - page_size..stackaddr) Some(stackaddr - page_size..stackaddr)
} else if cfg!(all(target_os = "linux", target_env = "musl")) { } else if cfg!(all(target_os = "linux", target_env = "musl")) {
// For the main thread, the musl's pthread_attr_getstack // For the main thread, the musl's pthread_attr_getstack
@ -602,8 +603,8 @@ pub mod guard {
// at the bottom. If we try to remap the bottom of the stack // at the bottom. If we try to remap the bottom of the stack
// ourselves, FreeBSD's guard page moves upwards. So we'll just use // ourselves, FreeBSD's guard page moves upwards. So we'll just use
// the builtin guard page. // the builtin guard page.
let stackaddr = get_stack_start_aligned()?; let stackptr = get_stack_start_aligned()?;
let guardaddr = stackaddr as usize; let guardaddr = stackptr.addr();
// Technically the number of guard pages is tunable and controlled // Technically the number of guard pages is tunable and controlled
// by the security.bsd.stack_guard_page sysctl, but there are // by the security.bsd.stack_guard_page sysctl, but there are
// few reasons to change it from the default. The default value has // few reasons to change it from the default. The default value has
@ -620,25 +621,25 @@ pub mod guard {
// than the initial mmap() used, so we mmap() here with // than the initial mmap() used, so we mmap() here with
// read/write permissions and only then mprotect() it to // read/write permissions and only then mprotect() it to
// no permissions at all. See issue #50313. // no permissions at all. See issue #50313.
let stackaddr = get_stack_start_aligned()?; let stackptr = get_stack_start_aligned()?;
let result = mmap( let result = mmap(
stackaddr, stackptr,
page_size, page_size,
PROT_READ | PROT_WRITE, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANON | MAP_FIXED, MAP_PRIVATE | MAP_ANON | MAP_FIXED,
-1, -1,
0, 0,
); );
if result != stackaddr || result == MAP_FAILED { if result != stackptr || result == MAP_FAILED {
panic!("failed to allocate a guard page: {}", io::Error::last_os_error()); panic!("failed to allocate a guard page: {}", io::Error::last_os_error());
} }
let result = mprotect(stackaddr, page_size, PROT_NONE); let result = mprotect(stackptr, page_size, PROT_NONE);
if result != 0 { if result != 0 {
panic!("failed to protect the guard page: {}", io::Error::last_os_error()); panic!("failed to protect the guard page: {}", io::Error::last_os_error());
} }
let guardaddr = stackaddr as usize; let guardaddr = stackptr.addr();
Some(guardaddr..guardaddr + page_size) Some(guardaddr..guardaddr + page_size)
} }
@ -646,7 +647,8 @@ pub mod guard {
#[cfg(any(target_os = "macos", target_os = "openbsd", target_os = "solaris"))] #[cfg(any(target_os = "macos", target_os = "openbsd", target_os = "solaris"))]
pub unsafe fn current() -> Option<Guard> { pub unsafe fn current() -> Option<Guard> {
let stackaddr = get_stack_start()? as usize; let stackptr = get_stack_start()?;
let stackaddr = stackptr.addr();
Some(stackaddr - PAGE_SIZE.load(Ordering::Relaxed)..stackaddr) Some(stackaddr - PAGE_SIZE.load(Ordering::Relaxed)..stackaddr)
} }
@ -679,11 +681,11 @@ pub mod guard {
panic!("there is no guard page"); panic!("there is no guard page");
} }
} }
let mut stackaddr = crate::ptr::null_mut(); let mut stackptr = crate::ptr::null_mut::<libc::c_void>();
let mut size = 0; let mut size = 0;
assert_eq!(libc::pthread_attr_getstack(&attr, &mut stackaddr, &mut size), 0); assert_eq!(libc::pthread_attr_getstack(&attr, &mut stackptr, &mut size), 0);
let stackaddr = stackaddr as usize; let stackaddr = stackptr.addr();
ret = if cfg!(any(target_os = "freebsd", target_os = "netbsd")) { ret = if cfg!(any(target_os = "freebsd", target_os = "netbsd")) {
Some(stackaddr - guardsize..stackaddr) Some(stackaddr - guardsize..stackaddr)
} else if cfg!(all(target_os = "linux", target_env = "musl")) { } else if cfg!(all(target_os = "linux", target_env = "musl")) {

View file

@ -159,7 +159,7 @@ unsafe fn allocate(layout: Layout, zeroed: bool) -> *mut u8 {
// Create a correctly aligned pointer offset from the start of the allocated block, // Create a correctly aligned pointer offset from the start of the allocated block,
// and write a header before it. // and write a header before it.
let offset = layout.align() - (ptr as usize & (layout.align() - 1)); let offset = layout.align() - (ptr.addr() & (layout.align() - 1));
// SAFETY: `MIN_ALIGN` <= `offset` <= `layout.align()` and the size of the allocated // SAFETY: `MIN_ALIGN` <= `offset` <= `layout.align()` and the size of the allocated
// block is `layout.align() + layout.size()`. `aligned` will thus be a correctly aligned // block is `layout.align() + layout.size()`. `aligned` will thus be a correctly aligned
// pointer inside the allocated block with at least `layout.size()` bytes after it and at // pointer inside the allocated block with at least `layout.size()` bytes after it and at

View file

@ -173,7 +173,7 @@ pub const PROGRESS_CONTINUE: DWORD = 0;
pub const E_NOTIMPL: HRESULT = 0x80004001u32 as HRESULT; pub const E_NOTIMPL: HRESULT = 0x80004001u32 as HRESULT;
pub const INVALID_HANDLE_VALUE: HANDLE = !0 as HANDLE; pub const INVALID_HANDLE_VALUE: HANDLE = ptr::invalid_mut(!0);
pub const FACILITY_NT_BIT: DWORD = 0x1000_0000; pub const FACILITY_NT_BIT: DWORD = 0x1000_0000;

View file

@ -88,7 +88,7 @@ macro_rules! compat_fn {
let symbol_name: *const u8 = concat!(stringify!($symbol), "\0").as_ptr(); let symbol_name: *const u8 = concat!(stringify!($symbol), "\0").as_ptr();
let module_handle = $crate::sys::c::GetModuleHandleA(module_name as *const i8); let module_handle = $crate::sys::c::GetModuleHandleA(module_name as *const i8);
if !module_handle.is_null() { if !module_handle.is_null() {
match $crate::sys::c::GetProcAddress(module_handle, symbol_name as *const i8) as usize { match $crate::sys::c::GetProcAddress(module_handle, symbol_name as *const i8).addr() {
0 => {} 0 => {}
n => { n => {
PTR = Some(mem::transmute::<usize, F>(n)); PTR = Some(mem::transmute::<usize, F>(n));

View file

@ -57,6 +57,9 @@ pub struct DirEntry {
data: c::WIN32_FIND_DATAW, data: c::WIN32_FIND_DATAW,
} }
unsafe impl Send for OpenOptions {}
unsafe impl Sync for OpenOptions {}
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct OpenOptions { pub struct OpenOptions {
// generic // generic
@ -72,7 +75,7 @@ pub struct OpenOptions {
attributes: c::DWORD, attributes: c::DWORD,
share_mode: c::DWORD, share_mode: c::DWORD,
security_qos_flags: c::DWORD, security_qos_flags: c::DWORD,
security_attributes: usize, // FIXME: should be a reference security_attributes: c::LPSECURITY_ATTRIBUTES,
} }
#[derive(Clone, PartialEq, Eq, Debug)] #[derive(Clone, PartialEq, Eq, Debug)]
@ -187,7 +190,7 @@ impl OpenOptions {
share_mode: c::FILE_SHARE_READ | c::FILE_SHARE_WRITE | c::FILE_SHARE_DELETE, share_mode: c::FILE_SHARE_READ | c::FILE_SHARE_WRITE | c::FILE_SHARE_DELETE,
attributes: 0, attributes: 0,
security_qos_flags: 0, security_qos_flags: 0,
security_attributes: 0, security_attributes: ptr::null_mut(),
} }
} }
@ -228,7 +231,7 @@ impl OpenOptions {
self.security_qos_flags = flags | c::SECURITY_SQOS_PRESENT; self.security_qos_flags = flags | c::SECURITY_SQOS_PRESENT;
} }
pub fn security_attributes(&mut self, attrs: c::LPSECURITY_ATTRIBUTES) { pub fn security_attributes(&mut self, attrs: c::LPSECURITY_ATTRIBUTES) {
self.security_attributes = attrs as usize; self.security_attributes = attrs;
} }
fn get_access_mode(&self) -> io::Result<c::DWORD> { fn get_access_mode(&self) -> io::Result<c::DWORD> {
@ -289,7 +292,7 @@ impl File {
path.as_ptr(), path.as_ptr(),
opts.get_access_mode()?, opts.get_access_mode()?,
opts.share_mode, opts.share_mode,
opts.security_attributes as *mut _, opts.security_attributes,
opts.get_creation_mode()?, opts.get_creation_mode()?,
opts.get_flags_and_attributes(), opts.get_flags_and_attributes(),
ptr::null_mut(), ptr::null_mut(),

View file

@ -136,7 +136,7 @@ pub fn unrolled_find_u16s(needle: u16, haystack: &[u16]) -> Option<usize> {
($($n:literal,)+) => { ($($n:literal,)+) => {
$( $(
if start[$n] == needle { if start[$n] == needle {
return Some((&start[$n] as *const u16 as usize - ptr as usize) / 2); return Some(((&start[$n] as *const u16).addr() - ptr.addr()) / 2);
} }
)+ )+
} }
@ -149,7 +149,7 @@ pub fn unrolled_find_u16s(needle: u16, haystack: &[u16]) -> Option<usize> {
for c in start { for c in start {
if *c == needle { if *c == needle {
return Some((c as *const u16 as usize - ptr as usize) / 2); return Some(((c as *const u16).addr() - ptr.addr()) / 2);
} }
} }
None None

View file

@ -134,7 +134,7 @@ impl Drop for Env {
pub fn env() -> Env { pub fn env() -> Env {
unsafe { unsafe {
let ch = c::GetEnvironmentStringsW(); let ch = c::GetEnvironmentStringsW();
if ch as usize == 0 { if ch.is_null() {
panic!("failure getting env string from OS: {}", io::Error::last_os_error()); panic!("failure getting env string from OS: {}", io::Error::last_os_error());
} }
Env { base: ch, cur: ch } Env { base: ch, cur: ch }

View file

@ -60,7 +60,7 @@
use crate::convert::TryFrom; use crate::convert::TryFrom;
use crate::ptr; use crate::ptr;
use crate::sync::atomic::{ use crate::sync::atomic::{
AtomicI8, AtomicUsize, AtomicI8, AtomicPtr,
Ordering::{Acquire, Relaxed, Release}, Ordering::{Acquire, Relaxed, Release},
}; };
use crate::sys::{c, dur2timeout}; use crate::sys::{c, dur2timeout};
@ -217,8 +217,8 @@ impl Parker {
} }
fn keyed_event_handle() -> c::HANDLE { fn keyed_event_handle() -> c::HANDLE {
const INVALID: usize = !0; const INVALID: c::HANDLE = ptr::invalid_mut(!0);
static HANDLE: AtomicUsize = AtomicUsize::new(INVALID); static HANDLE: AtomicPtr<libc::c_void> = AtomicPtr::new(INVALID);
match HANDLE.load(Relaxed) { match HANDLE.load(Relaxed) {
INVALID => { INVALID => {
let mut handle = c::INVALID_HANDLE_VALUE; let mut handle = c::INVALID_HANDLE_VALUE;
@ -233,7 +233,7 @@ fn keyed_event_handle() -> c::HANDLE {
r => panic!("Unable to create keyed event handle: error {r}"), r => panic!("Unable to create keyed event handle: error {r}"),
} }
} }
match HANDLE.compare_exchange(INVALID, handle as usize, Relaxed, Relaxed) { match HANDLE.compare_exchange(INVALID, handle, Relaxed, Relaxed) {
Ok(_) => handle, Ok(_) => handle,
Err(h) => { Err(h) => {
// Lost the race to another thread initializing HANDLE before we did. // Lost the race to another thread initializing HANDLE before we did.
@ -241,10 +241,10 @@ fn keyed_event_handle() -> c::HANDLE {
unsafe { unsafe {
c::CloseHandle(handle); c::CloseHandle(handle);
} }
h as c::HANDLE h
} }
} }
} }
handle => handle as c::HANDLE, handle => handle,
} }
} }

View file

@ -1,4 +1,5 @@
use crate::sync::atomic::{AtomicUsize, Ordering}; use crate::ptr;
use crate::sync::atomic::{AtomicPtr, Ordering};
use crate::sys::locks as imp; use crate::sys::locks as imp;
use crate::sys_common::mutex::MovableMutex; use crate::sys_common::mutex::MovableMutex;
@ -13,17 +14,18 @@ impl CondvarCheck for Box<imp::Mutex> {
} }
pub struct SameMutexCheck { pub struct SameMutexCheck {
addr: AtomicUsize, addr: AtomicPtr<()>,
} }
#[allow(dead_code)] #[allow(dead_code)]
impl SameMutexCheck { impl SameMutexCheck {
pub const fn new() -> Self { pub const fn new() -> Self {
Self { addr: AtomicUsize::new(0) } Self { addr: AtomicPtr::new(ptr::null_mut()) }
} }
pub fn verify(&self, mutex: &MovableMutex) { pub fn verify(&self, mutex: &MovableMutex) {
let addr = mutex.raw() as *const imp::Mutex as usize; let addr = mutex.raw() as *const imp::Mutex as *const () as *mut _;
match self.addr.compare_exchange(0, addr, Ordering::SeqCst, Ordering::SeqCst) { match self.addr.compare_exchange(ptr::null_mut(), addr, Ordering::SeqCst, Ordering::SeqCst)
{
Ok(_) => {} // Stored the address Ok(_) => {} // Stored the address
Err(n) if n == addr => {} // Lost a race to store the same address Err(n) if n == addr => {} // Lost a race to store the same address
_ => panic!("attempted to use a condition variable with two mutexes"), _ => panic!("attempted to use a condition variable with two mutexes"),

View file

@ -1071,7 +1071,7 @@ pub mod os {
pub unsafe fn get(&'static self, init: impl FnOnce() -> T) -> Option<&'static T> { pub unsafe fn get(&'static self, init: impl FnOnce() -> T) -> Option<&'static T> {
// SAFETY: See the documentation for this method. // SAFETY: See the documentation for this method.
let ptr = unsafe { self.os.get() as *mut Value<T> }; let ptr = unsafe { self.os.get() as *mut Value<T> };
if ptr as usize > 1 { if ptr.addr() > 1 {
// SAFETY: the check ensured the pointer is safe (its destructor // SAFETY: the check ensured the pointer is safe (its destructor
// is not running) + it is coming from a trusted source (self). // is not running) + it is coming from a trusted source (self).
if let Some(ref value) = unsafe { (*ptr).inner.get() } { if let Some(ref value) = unsafe { (*ptr).inner.get() } {
@ -1090,7 +1090,7 @@ pub mod os {
// SAFETY: No mutable references are ever handed out meaning getting // SAFETY: No mutable references are ever handed out meaning getting
// the value is ok. // the value is ok.
let ptr = unsafe { self.os.get() as *mut Value<T> }; let ptr = unsafe { self.os.get() as *mut Value<T> };
if ptr as usize == 1 { if ptr.addr() == 1 {
// destructor is running // destructor is running
return None; return None;
} }
@ -1130,7 +1130,7 @@ pub mod os {
unsafe { unsafe {
let ptr = Box::from_raw(ptr as *mut Value<T>); let ptr = Box::from_raw(ptr as *mut Value<T>);
let key = ptr.key; let key = ptr.key;
key.os.set(1 as *mut u8); key.os.set(ptr::invalid_mut(1));
drop(ptr); drop(ptr);
key.os.set(ptr::null_mut()); key.os.set(ptr::null_mut());
} }