1
Fork 0

Make align_of behave like min_align_of.

This removes a footgun, since it is a reasonable assumption to make that
pointers to `T` will be aligned to `align_of::<T>()`. This also matches
the behaviour of C/C++. `min_align_of` is now deprecated.

Closes #21611.
This commit is contained in:
Huon Wilson 2015-05-20 19:18:03 +10:00
parent 2ad26e850e
commit 225b116829
8 changed files with 47 additions and 53 deletions

View file

@ -77,7 +77,7 @@ use core::atomic;
use core::atomic::Ordering::{Relaxed, Release, Acquire, SeqCst}; use core::atomic::Ordering::{Relaxed, Release, Acquire, SeqCst};
use core::fmt; use core::fmt;
use core::cmp::Ordering; use core::cmp::Ordering;
use core::mem::{min_align_of_val, size_of_val}; use core::mem::{align_of_val, size_of_val};
use core::intrinsics::drop_in_place; use core::intrinsics::drop_in_place;
use core::mem; use core::mem;
use core::nonzero::NonZero; use core::nonzero::NonZero;
@ -241,7 +241,7 @@ impl<T: ?Sized> Arc<T> {
if self.inner().weak.fetch_sub(1, Release) == 1 { if self.inner().weak.fetch_sub(1, Release) == 1 {
atomic::fence(Acquire); atomic::fence(Acquire);
deallocate(ptr as *mut u8, size_of_val(&*ptr), min_align_of_val(&*ptr)) deallocate(ptr as *mut u8, size_of_val(&*ptr), align_of_val(&*ptr))
} }
} }
} }
@ -565,7 +565,7 @@ impl<T: ?Sized> Drop for Weak<T> {
atomic::fence(Acquire); atomic::fence(Acquire);
unsafe { deallocate(ptr as *mut u8, unsafe { deallocate(ptr as *mut u8,
size_of_val(&*ptr), size_of_val(&*ptr),
min_align_of_val(&*ptr)) } align_of_val(&*ptr)) }
} }
} }
} }

View file

@ -162,7 +162,7 @@ use core::fmt;
use core::hash::{Hasher, Hash}; use core::hash::{Hasher, Hash};
use core::intrinsics::{assume, drop_in_place}; use core::intrinsics::{assume, drop_in_place};
use core::marker::{self, Unsize}; use core::marker::{self, Unsize};
use core::mem::{self, min_align_of, size_of, min_align_of_val, size_of_val, forget}; use core::mem::{self, align_of, size_of, align_of_val, size_of_val, forget};
use core::nonzero::NonZero; use core::nonzero::NonZero;
use core::ops::{CoerceUnsized, Deref}; use core::ops::{CoerceUnsized, Deref};
use core::ptr; use core::ptr;
@ -246,7 +246,7 @@ impl<T> Rc<T> {
// destruct the box and skip our Drop // destruct the box and skip our Drop
// we can ignore the refcounts because we know we're unique // we can ignore the refcounts because we know we're unique
deallocate(*rc._ptr as *mut u8, size_of::<RcBox<T>>(), deallocate(*rc._ptr as *mut u8, size_of::<RcBox<T>>(),
min_align_of::<RcBox<T>>()); align_of::<RcBox<T>>());
forget(rc); forget(rc);
Ok(val) Ok(val)
} }
@ -496,7 +496,7 @@ impl<T: ?Sized> Drop for Rc<T> {
if self.weak() == 0 { if self.weak() == 0 {
deallocate(ptr as *mut u8, deallocate(ptr as *mut u8,
size_of_val(&*ptr), size_of_val(&*ptr),
min_align_of_val(&*ptr)) align_of_val(&*ptr))
} }
} }
} }
@ -805,7 +805,7 @@ impl<T: ?Sized> Drop for Weak<T> {
// the strong pointers have disappeared. // the strong pointers have disappeared.
if self.weak() == 0 { if self.weak() == 0 {
deallocate(ptr as *mut u8, size_of_val(&*ptr), deallocate(ptr as *mut u8, size_of_val(&*ptr),
min_align_of_val(&*ptr)) align_of_val(&*ptr))
} }
} }
} }

View file

@ -244,7 +244,7 @@ impl<'longer_than_self> Arena<'longer_than_self> {
fn alloc_copy<T, F>(&self, op: F) -> &mut T where F: FnOnce() -> T { fn alloc_copy<T, F>(&self, op: F) -> &mut T where F: FnOnce() -> T {
unsafe { unsafe {
let ptr = self.alloc_copy_inner(mem::size_of::<T>(), let ptr = self.alloc_copy_inner(mem::size_of::<T>(),
mem::min_align_of::<T>()); mem::align_of::<T>());
let ptr = ptr as *mut T; let ptr = ptr as *mut T;
ptr::write(&mut (*ptr), op()); ptr::write(&mut (*ptr), op());
return &mut *ptr; return &mut *ptr;
@ -300,7 +300,7 @@ impl<'longer_than_self> Arena<'longer_than_self> {
let tydesc = get_tydesc::<T>(); let tydesc = get_tydesc::<T>();
let (ty_ptr, ptr) = let (ty_ptr, ptr) =
self.alloc_noncopy_inner(mem::size_of::<T>(), self.alloc_noncopy_inner(mem::size_of::<T>(),
mem::min_align_of::<T>()); mem::align_of::<T>());
let ty_ptr = ty_ptr as *mut usize; let ty_ptr = ty_ptr as *mut usize;
let ptr = ptr as *mut T; let ptr = ptr as *mut T;
// Write in our tydesc along with a bit indicating that it // Write in our tydesc along with a bit indicating that it
@ -393,7 +393,7 @@ struct TypedArenaChunk<T> {
fn calculate_size<T>(capacity: usize) -> usize { fn calculate_size<T>(capacity: usize) -> usize {
let mut size = mem::size_of::<TypedArenaChunk<T>>(); let mut size = mem::size_of::<TypedArenaChunk<T>>();
size = round_up(size, mem::min_align_of::<T>()); size = round_up(size, mem::align_of::<T>());
let elem_size = mem::size_of::<T>(); let elem_size = mem::size_of::<T>();
let elems_size = elem_size.checked_mul(capacity).unwrap(); let elems_size = elem_size.checked_mul(capacity).unwrap();
size = size.checked_add(elems_size).unwrap(); size = size.checked_add(elems_size).unwrap();
@ -405,7 +405,7 @@ impl<T> TypedArenaChunk<T> {
unsafe fn new(next: *mut TypedArenaChunk<T>, capacity: usize) unsafe fn new(next: *mut TypedArenaChunk<T>, capacity: usize)
-> *mut TypedArenaChunk<T> { -> *mut TypedArenaChunk<T> {
let size = calculate_size::<T>(capacity); let size = calculate_size::<T>(capacity);
let chunk = allocate(size, mem::min_align_of::<TypedArenaChunk<T>>()) let chunk = allocate(size, mem::align_of::<TypedArenaChunk<T>>())
as *mut TypedArenaChunk<T>; as *mut TypedArenaChunk<T>;
if chunk.is_null() { alloc::oom() } if chunk.is_null() { alloc::oom() }
(*chunk).next = next; (*chunk).next = next;
@ -431,7 +431,7 @@ impl<T> TypedArenaChunk<T> {
let size = calculate_size::<T>(self.capacity); let size = calculate_size::<T>(self.capacity);
let self_ptr: *mut TypedArenaChunk<T> = self; let self_ptr: *mut TypedArenaChunk<T> = self;
deallocate(self_ptr as *mut u8, size, deallocate(self_ptr as *mut u8, size,
mem::min_align_of::<TypedArenaChunk<T>>()); mem::align_of::<TypedArenaChunk<T>>());
if !next.is_null() { if !next.is_null() {
let capacity = (*next).capacity; let capacity = (*next).capacity;
(*next).destroy(capacity); (*next).destroy(capacity);
@ -444,7 +444,7 @@ impl<T> TypedArenaChunk<T> {
let this: *const TypedArenaChunk<T> = self; let this: *const TypedArenaChunk<T> = self;
unsafe { unsafe {
mem::transmute(round_up(this.offset(1) as usize, mem::transmute(round_up(this.offset(1) as usize,
mem::min_align_of::<T>())) mem::align_of::<T>()))
} }
} }

View file

@ -163,12 +163,12 @@ fn test_offset_calculation() {
} }
fn calculate_allocation_generic<K, V>(capacity: usize, is_leaf: bool) -> (usize, usize) { fn calculate_allocation_generic<K, V>(capacity: usize, is_leaf: bool) -> (usize, usize) {
let (keys_size, keys_align) = (capacity * mem::size_of::<K>(), mem::min_align_of::<K>()); let (keys_size, keys_align) = (capacity * mem::size_of::<K>(), mem::align_of::<K>());
let (vals_size, vals_align) = (capacity * mem::size_of::<V>(), mem::min_align_of::<V>()); let (vals_size, vals_align) = (capacity * mem::size_of::<V>(), mem::align_of::<V>());
let (edges_size, edges_align) = if is_leaf { let (edges_size, edges_align) = if is_leaf {
(0, 1) (0, 1)
} else { } else {
((capacity + 1) * mem::size_of::<Node<K, V>>(), mem::min_align_of::<Node<K, V>>()) ((capacity + 1) * mem::size_of::<Node<K, V>>(), mem::align_of::<Node<K, V>>())
}; };
calculate_allocation( calculate_allocation(
@ -181,11 +181,11 @@ fn calculate_allocation_generic<K, V>(capacity: usize, is_leaf: bool) -> (usize,
fn calculate_offsets_generic<K, V>(capacity: usize, is_leaf: bool) -> (usize, usize) { fn calculate_offsets_generic<K, V>(capacity: usize, is_leaf: bool) -> (usize, usize) {
let keys_size = capacity * mem::size_of::<K>(); let keys_size = capacity * mem::size_of::<K>();
let vals_size = capacity * mem::size_of::<V>(); let vals_size = capacity * mem::size_of::<V>();
let vals_align = mem::min_align_of::<V>(); let vals_align = mem::align_of::<V>();
let edges_align = if is_leaf { let edges_align = if is_leaf {
1 1
} else { } else {
mem::min_align_of::<Node<K, V>>() mem::align_of::<Node<K, V>>()
}; };
calculate_offsets( calculate_offsets(

View file

@ -219,7 +219,7 @@ impl<T> Vec<T> {
} else { } else {
let size = capacity.checked_mul(mem::size_of::<T>()) let size = capacity.checked_mul(mem::size_of::<T>())
.expect("capacity overflow"); .expect("capacity overflow");
let ptr = unsafe { allocate(size, mem::min_align_of::<T>()) }; let ptr = unsafe { allocate(size, mem::align_of::<T>()) };
if ptr.is_null() { ::alloc::oom() } if ptr.is_null() { ::alloc::oom() }
unsafe { Vec::from_raw_parts(ptr as *mut T, 0, capacity) } unsafe { Vec::from_raw_parts(ptr as *mut T, 0, capacity) }
} }
@ -393,7 +393,7 @@ impl<T> Vec<T> {
let ptr = reallocate(*self.ptr as *mut u8, let ptr = reallocate(*self.ptr as *mut u8,
self.cap * mem::size_of::<T>(), self.cap * mem::size_of::<T>(),
self.len * mem::size_of::<T>(), self.len * mem::size_of::<T>(),
mem::min_align_of::<T>()) as *mut T; mem::align_of::<T>()) as *mut T;
if ptr.is_null() { ::alloc::oom() } if ptr.is_null() { ::alloc::oom() }
self.ptr = Unique::new(ptr); self.ptr = Unique::new(ptr);
} }
@ -866,9 +866,9 @@ impl<T> Vec<T> {
// FIXME: Assert statically that the types `T` and `U` have the // FIXME: Assert statically that the types `T` and `U` have the
// same minimal alignment in case they are not zero-sized. // same minimal alignment in case they are not zero-sized.
// These asserts are necessary because the `min_align_of` of the // These asserts are necessary because the `align_of` of the
// types are passed to the allocator by `Vec`. // types are passed to the allocator by `Vec`.
assert!(mem::min_align_of::<T>() == mem::min_align_of::<U>()); assert!(mem::align_of::<T>() == mem::align_of::<U>());
// This `as isize` cast is safe, because the size of the elements of the // This `as isize` cast is safe, because the size of the elements of the
// vector is not 0, and: // vector is not 0, and:
@ -1269,9 +1269,9 @@ impl<T> Vec<T> {
#[inline(never)] #[inline(never)]
unsafe fn alloc_or_realloc<T>(ptr: *mut T, old_size: usize, size: usize) -> *mut T { unsafe fn alloc_or_realloc<T>(ptr: *mut T, old_size: usize, size: usize) -> *mut T {
if old_size == 0 { if old_size == 0 {
allocate(size, mem::min_align_of::<T>()) as *mut T allocate(size, mem::align_of::<T>()) as *mut T
} else { } else {
reallocate(ptr as *mut u8, old_size, size, mem::min_align_of::<T>()) as *mut T reallocate(ptr as *mut u8, old_size, size, mem::align_of::<T>()) as *mut T
} }
} }
@ -1280,7 +1280,7 @@ unsafe fn dealloc<T>(ptr: *mut T, len: usize) {
if mem::size_of::<T>() != 0 { if mem::size_of::<T>() != 0 {
deallocate(ptr as *mut u8, deallocate(ptr as *mut u8,
len * mem::size_of::<T>(), len * mem::size_of::<T>(),
mem::min_align_of::<T>()) mem::align_of::<T>())
} }
} }

View file

@ -67,7 +67,7 @@ impl<T> Drop for VecDeque<T> {
if mem::size_of::<T>() != 0 { if mem::size_of::<T>() != 0 {
heap::deallocate(*self.ptr as *mut u8, heap::deallocate(*self.ptr as *mut u8,
self.cap * mem::size_of::<T>(), self.cap * mem::size_of::<T>(),
mem::min_align_of::<T>()) mem::align_of::<T>())
} }
} }
} }
@ -172,7 +172,7 @@ impl<T> VecDeque<T> {
let ptr = unsafe { let ptr = unsafe {
if mem::size_of::<T>() != 0 { if mem::size_of::<T>() != 0 {
let ptr = heap::allocate(size, mem::min_align_of::<T>()) as *mut T;; let ptr = heap::allocate(size, mem::align_of::<T>()) as *mut T;;
if ptr.is_null() { ::alloc::oom() } if ptr.is_null() { ::alloc::oom() }
Unique::new(ptr) Unique::new(ptr)
} else { } else {
@ -340,7 +340,7 @@ impl<T> VecDeque<T> {
let ptr = heap::reallocate(*self.ptr as *mut u8, let ptr = heap::reallocate(*self.ptr as *mut u8,
old, old,
new, new,
mem::min_align_of::<T>()) as *mut T; mem::align_of::<T>()) as *mut T;
if ptr.is_null() { ::alloc::oom() } if ptr.is_null() { ::alloc::oom() }
self.ptr = Unique::new(ptr); self.ptr = Unique::new(ptr);
} }
@ -460,7 +460,7 @@ impl<T> VecDeque<T> {
let ptr = heap::reallocate(*self.ptr as *mut u8, let ptr = heap::reallocate(*self.ptr as *mut u8,
old, old,
new_size, new_size,
mem::min_align_of::<T>()) as *mut T; mem::align_of::<T>()) as *mut T;
if ptr.is_null() { ::alloc::oom() } if ptr.is_null() { ::alloc::oom() }
self.ptr = Unique::new(ptr); self.ptr = Unique::new(ptr);
} }

View file

@ -155,6 +155,7 @@ pub fn size_of_val<T: ?Sized>(val: &T) -> usize {
/// ``` /// ```
#[inline] #[inline]
#[stable(feature = "rust1", since = "1.0.0")] #[stable(feature = "rust1", since = "1.0.0")]
#[deprecated(reason = "use `align_of` instead", since = "1.1.0")]
pub fn min_align_of<T>() -> usize { pub fn min_align_of<T>() -> usize {
unsafe { intrinsics::min_align_of::<T>() } unsafe { intrinsics::min_align_of::<T>() }
} }
@ -170,14 +171,14 @@ pub fn min_align_of<T>() -> usize {
/// ``` /// ```
#[inline] #[inline]
#[stable(feature = "rust1", since = "1.0.0")] #[stable(feature = "rust1", since = "1.0.0")]
#[deprecated(reason = "use `align_of_val` instead", since = "1.1.0")]
pub fn min_align_of_val<T: ?Sized>(val: &T) -> usize { pub fn min_align_of_val<T: ?Sized>(val: &T) -> usize {
unsafe { intrinsics::min_align_of_val(val) } unsafe { intrinsics::min_align_of_val(val) }
} }
/// Returns the alignment in memory for a type. /// Returns the alignment in memory for a type.
/// ///
/// This function will return the alignment, in bytes, of a type in memory. If the alignment /// This is the alignment used for struct fields. It may be smaller than the preferred alignment.
/// returned is adhered to, then the type is guaranteed to function properly.
/// ///
/// # Examples /// # Examples
/// ///
@ -189,17 +190,10 @@ pub fn min_align_of_val<T: ?Sized>(val: &T) -> usize {
#[inline] #[inline]
#[stable(feature = "rust1", since = "1.0.0")] #[stable(feature = "rust1", since = "1.0.0")]
pub fn align_of<T>() -> usize { pub fn align_of<T>() -> usize {
// We use the preferred alignment as the default alignment for a type. This unsafe { intrinsics::min_align_of::<T>() }
// appears to be what clang migrated towards as well:
//
// http://lists.cs.uiuc.edu/pipermail/cfe-commits/Week-of-Mon-20110725/044411.html
unsafe { intrinsics::pref_align_of::<T>() }
} }
/// Returns the alignment of the type of the value that `_val` points to. /// Returns the ABI-required minimum alignment of the type of the value that `val` points to
///
/// This is similar to `align_of`, but function will properly handle types such as trait objects
/// (in the future), returning the alignment for an arbitrary value at runtime.
/// ///
/// # Examples /// # Examples
/// ///
@ -210,8 +204,8 @@ pub fn align_of<T>() -> usize {
/// ``` /// ```
#[inline] #[inline]
#[stable(feature = "rust1", since = "1.0.0")] #[stable(feature = "rust1", since = "1.0.0")]
pub fn align_of_val<T>(_val: &T) -> usize { pub fn align_of_val<T: ?Sized>(val: &T) -> usize {
align_of::<T>() unsafe { intrinsics::min_align_of_val(val) }
} }
/// Creates a value initialized to zero. /// Creates a value initialized to zero.

View file

@ -15,7 +15,7 @@ use cmp;
use hash::{Hash, Hasher}; use hash::{Hash, Hasher};
use iter::{Iterator, ExactSizeIterator}; use iter::{Iterator, ExactSizeIterator};
use marker::{Copy, Send, Sync, Sized, self}; use marker::{Copy, Send, Sync, Sized, self};
use mem::{min_align_of, size_of}; use mem::{align_of, size_of};
use mem; use mem;
use num::wrapping::OverflowingOps; use num::wrapping::OverflowingOps;
use ops::{Deref, DerefMut, Drop}; use ops::{Deref, DerefMut, Drop};
@ -553,9 +553,9 @@ fn calculate_allocation(hash_size: usize, hash_align: usize,
vals_align); vals_align);
let (end_of_vals, oflo2) = vals_offset.overflowing_add(vals_size); let (end_of_vals, oflo2) = vals_offset.overflowing_add(vals_size);
let min_align = cmp::max(hash_align, cmp::max(keys_align, vals_align)); let align = cmp::max(hash_align, cmp::max(keys_align, vals_align));
(min_align, hash_offset, end_of_vals, oflo || oflo2) (align, hash_offset, end_of_vals, oflo || oflo2)
} }
#[test] #[test]
@ -597,9 +597,9 @@ impl<K, V> RawTable<K, V> {
// factored out into a different function. // factored out into a different function.
let (malloc_alignment, hash_offset, size, oflo) = let (malloc_alignment, hash_offset, size, oflo) =
calculate_allocation( calculate_allocation(
hashes_size, min_align_of::<u64>(), hashes_size, align_of::<u64>(),
keys_size, min_align_of::< K >(), keys_size, align_of::< K >(),
vals_size, min_align_of::< V >()); vals_size, align_of::< V >());
assert!(!oflo, "capacity overflow"); assert!(!oflo, "capacity overflow");
@ -630,8 +630,8 @@ impl<K, V> RawTable<K, V> {
let buffer = *self.hashes as *mut u8; let buffer = *self.hashes as *mut u8;
let (keys_offset, vals_offset, oflo) = let (keys_offset, vals_offset, oflo) =
calculate_offsets(hashes_size, calculate_offsets(hashes_size,
keys_size, min_align_of::<K>(), keys_size, align_of::<K>(),
min_align_of::<V>()); align_of::<V>());
debug_assert!(!oflo, "capacity overflow"); debug_assert!(!oflo, "capacity overflow");
unsafe { unsafe {
RawBucket { RawBucket {
@ -1005,9 +1005,9 @@ impl<K, V> Drop for RawTable<K, V> {
let keys_size = self.capacity * size_of::<K>(); let keys_size = self.capacity * size_of::<K>();
let vals_size = self.capacity * size_of::<V>(); let vals_size = self.capacity * size_of::<V>();
let (align, _, size, oflo) = let (align, _, size, oflo) =
calculate_allocation(hashes_size, min_align_of::<u64>(), calculate_allocation(hashes_size, align_of::<u64>(),
keys_size, min_align_of::<K>(), keys_size, align_of::<K>(),
vals_size, min_align_of::<V>()); vals_size, align_of::<V>());
debug_assert!(!oflo, "should be impossible"); debug_assert!(!oflo, "should be impossible");