1
Fork 0

Auto merge of #29026 - steveklabnik:rollup, r=steveklabnik

- Successful merges: #28988, #28989, #28990, #28997, #29007, #29015
- Failed merges: #28906
This commit is contained in:
bors 2015-10-14 00:06:32 +00:00
commit c0dc2cb81d
7 changed files with 124 additions and 114 deletions

View file

@ -1898,7 +1898,7 @@ for pop in search(&data_file, &city) {
In this piece of code, we take `file` (which has the type In this piece of code, we take `file` (which has the type
`Option<String>`), and convert it to a type that `search` can use, in `Option<String>`), and convert it to a type that `search` can use, in
this case, `&Option<AsRef<Path>>`. Do do this, we take a reference of this case, `&Option<AsRef<Path>>`. To do this, we take a reference of
file, and map `Path::new` onto it. In this case, `as_ref()` converts file, and map `Path::new` onto it. In this case, `as_ref()` converts
the `Option<String>` into an `Option<&str>`, and from there, we can the `Option<String>` into an `Option<&str>`, and from there, we can
execute `Path::new` to the content of the optional, and return the execute `Path::new` to the content of the optional, and return the

View file

@ -44,7 +44,7 @@ own iterator involves implementing the `Iterator` trait. While doing that is
outside of the scope of this guide, Rust provides a number of useful iterators outside of the scope of this guide, Rust provides a number of useful iterators
to accomplish various tasks. But first, a few notes about limitations of ranges. to accomplish various tasks. But first, a few notes about limitations of ranges.
Ranges are very primitive, and we often can use better alternatives. Consider Ranges are very primitive, and we often can use better alternatives. Consider the
following Rust anti-pattern: using ranges to emulate a C-style `for` loop. Lets following Rust anti-pattern: using ranges to emulate a C-style `for` loop. Lets
suppose you needed to iterate over the contents of a vector. You may be tempted suppose you needed to iterate over the contents of a vector. You may be tempted
to write this: to write this:

View file

@ -1,6 +1,6 @@
% Lifetimes % Lifetimes
This guide is one of three presenting Rusts ownership system. This is one of This guide is three of three presenting Rusts ownership system. This is one of
Rusts most unique and compelling features, with which Rust developers should Rusts most unique and compelling features, with which Rust developers should
become quite acquainted. Ownership is how Rust achieves its largest goal, become quite acquainted. Ownership is how Rust achieves its largest goal,
memory safety. There are a few distinct concepts, each with its own chapter: memory safety. There are a few distinct concepts, each with its own chapter:

View file

@ -1,6 +1,6 @@
% References and Borrowing % References and Borrowing
This guide is one of three presenting Rusts ownership system. This is one of This guide is two of three presenting Rusts ownership system. This is one of
Rusts most unique and compelling features, with which Rust developers should Rusts most unique and compelling features, with which Rust developers should
become quite acquainted. Ownership is how Rust achieves its largest goal, become quite acquainted. Ownership is how Rust achieves its largest goal,
memory safety. There are a few distinct concepts, each with its own memory safety. There are a few distinct concepts, each with its own

View file

@ -43,8 +43,7 @@ use libc::{c_int, c_void, size_t};
extern { extern {
fn je_mallocx(size: size_t, flags: c_int) -> *mut c_void; fn je_mallocx(size: size_t, flags: c_int) -> *mut c_void;
fn je_rallocx(ptr: *mut c_void, size: size_t, flags: c_int) -> *mut c_void; fn je_rallocx(ptr: *mut c_void, size: size_t, flags: c_int) -> *mut c_void;
fn je_xallocx(ptr: *mut c_void, size: size_t, extra: size_t, fn je_xallocx(ptr: *mut c_void, size: size_t, extra: size_t, flags: c_int) -> size_t;
flags: c_int) -> size_t;
fn je_sdallocx(ptr: *mut c_void, size: size_t, flags: c_int); fn je_sdallocx(ptr: *mut c_void, size: size_t, flags: c_int);
fn je_nallocx(size: size_t, flags: c_int) -> size_t; fn je_nallocx(size: size_t, flags: c_int) -> size_t;
} }
@ -63,40 +62,52 @@ const MIN_ALIGN: usize = 8;
const MIN_ALIGN: usize = 16; const MIN_ALIGN: usize = 16;
// MALLOCX_ALIGN(a) macro // MALLOCX_ALIGN(a) macro
fn mallocx_align(a: usize) -> c_int { a.trailing_zeros() as c_int } fn mallocx_align(a: usize) -> c_int {
a.trailing_zeros() as c_int
}
fn align_to_flags(align: usize) -> c_int { fn align_to_flags(align: usize) -> c_int {
if align <= MIN_ALIGN { 0 } else { mallocx_align(align) } if align <= MIN_ALIGN {
0
} else {
mallocx_align(align)
}
} }
#[no_mangle] #[no_mangle]
pub extern fn __rust_allocate(size: usize, align: usize) -> *mut u8 { pub extern "C" fn __rust_allocate(size: usize, align: usize) -> *mut u8 {
let flags = align_to_flags(align); let flags = align_to_flags(align);
unsafe { je_mallocx(size as size_t, flags) as *mut u8 } unsafe { je_mallocx(size as size_t, flags) as *mut u8 }
} }
#[no_mangle] #[no_mangle]
pub extern fn __rust_reallocate(ptr: *mut u8, _old_size: usize, size: usize, pub extern "C" fn __rust_reallocate(ptr: *mut u8,
align: usize) -> *mut u8 { _old_size: usize,
size: usize,
align: usize)
-> *mut u8 {
let flags = align_to_flags(align); let flags = align_to_flags(align);
unsafe { je_rallocx(ptr as *mut c_void, size as size_t, flags) as *mut u8 } unsafe { je_rallocx(ptr as *mut c_void, size as size_t, flags) as *mut u8 }
} }
#[no_mangle] #[no_mangle]
pub extern fn __rust_reallocate_inplace(ptr: *mut u8, _old_size: usize, pub extern "C" fn __rust_reallocate_inplace(ptr: *mut u8,
size: usize, align: usize) -> usize { _old_size: usize,
size: usize,
align: usize)
-> usize {
let flags = align_to_flags(align); let flags = align_to_flags(align);
unsafe { je_xallocx(ptr as *mut c_void, size as size_t, 0, flags) as usize } unsafe { je_xallocx(ptr as *mut c_void, size as size_t, 0, flags) as usize }
} }
#[no_mangle] #[no_mangle]
pub extern fn __rust_deallocate(ptr: *mut u8, old_size: usize, align: usize) { pub extern "C" fn __rust_deallocate(ptr: *mut u8, old_size: usize, align: usize) {
let flags = align_to_flags(align); let flags = align_to_flags(align);
unsafe { je_sdallocx(ptr as *mut c_void, old_size as size_t, flags) } unsafe { je_sdallocx(ptr as *mut c_void, old_size as size_t, flags) }
} }
#[no_mangle] #[no_mangle]
pub extern fn __rust_usable_size(size: usize, align: usize) -> usize { pub extern "C" fn __rust_usable_size(size: usize, align: usize) -> usize {
let flags = align_to_flags(align); let flags = align_to_flags(align);
unsafe { je_nallocx(size as size_t, flags) as usize } unsafe { je_nallocx(size as size_t, flags) as usize }
} }

View file

@ -39,29 +39,35 @@ const MIN_ALIGN: usize = 8;
const MIN_ALIGN: usize = 16; const MIN_ALIGN: usize = 16;
#[no_mangle] #[no_mangle]
pub extern fn __rust_allocate(size: usize, align: usize) -> *mut u8 { pub extern "C" fn __rust_allocate(size: usize, align: usize) -> *mut u8 {
unsafe { imp::allocate(size, align) } unsafe { imp::allocate(size, align) }
} }
#[no_mangle] #[no_mangle]
pub extern fn __rust_deallocate(ptr: *mut u8, old_size: usize, align: usize) { pub extern "C" fn __rust_deallocate(ptr: *mut u8, old_size: usize, align: usize) {
unsafe { imp::deallocate(ptr, old_size, align) } unsafe { imp::deallocate(ptr, old_size, align) }
} }
#[no_mangle] #[no_mangle]
pub extern fn __rust_reallocate(ptr: *mut u8, old_size: usize, size: usize, pub extern "C" fn __rust_reallocate(ptr: *mut u8,
align: usize) -> *mut u8 { old_size: usize,
size: usize,
align: usize)
-> *mut u8 {
unsafe { imp::reallocate(ptr, old_size, size, align) } unsafe { imp::reallocate(ptr, old_size, size, align) }
} }
#[no_mangle] #[no_mangle]
pub extern fn __rust_reallocate_inplace(ptr: *mut u8, old_size: usize, pub extern "C" fn __rust_reallocate_inplace(ptr: *mut u8,
size: usize, align: usize) -> usize { old_size: usize,
size: usize,
align: usize)
-> usize {
unsafe { imp::reallocate_inplace(ptr, old_size, size, align) } unsafe { imp::reallocate_inplace(ptr, old_size, size, align) }
} }
#[no_mangle] #[no_mangle]
pub extern fn __rust_usable_size(size: usize, align: usize) -> usize { pub extern "C" fn __rust_usable_size(size: usize, align: usize) -> usize {
imp::usable_size(size, align) imp::usable_size(size, align)
} }
@ -80,7 +86,8 @@ mod imp {
#[cfg(not(target_os = "android"))] #[cfg(not(target_os = "android"))]
fn posix_memalign(memptr: *mut *mut libc::c_void, fn posix_memalign(memptr: *mut *mut libc::c_void,
align: libc::size_t, align: libc::size_t,
size: libc::size_t) -> libc::c_int; size: libc::size_t)
-> libc::c_int;
} }
pub unsafe fn allocate(size: usize, align: usize) -> *mut u8 { pub unsafe fn allocate(size: usize, align: usize) -> *mut u8 {
@ -94,9 +101,7 @@ mod imp {
#[cfg(not(target_os = "android"))] #[cfg(not(target_os = "android"))]
unsafe fn more_aligned_malloc(size: usize, align: usize) -> *mut u8 { unsafe fn more_aligned_malloc(size: usize, align: usize) -> *mut u8 {
let mut out = ptr::null_mut(); let mut out = ptr::null_mut();
let ret = posix_memalign(&mut out, let ret = posix_memalign(&mut out, align as libc::size_t, size as libc::size_t);
align as libc::size_t,
size as libc::size_t);
if ret != 0 { if ret != 0 {
ptr::null_mut() ptr::null_mut()
} else { } else {
@ -107,8 +112,7 @@ mod imp {
} }
} }
pub unsafe fn reallocate(ptr: *mut u8, old_size: usize, size: usize, pub unsafe fn reallocate(ptr: *mut u8, old_size: usize, size: usize, align: usize) -> *mut u8 {
align: usize) -> *mut u8 {
if align <= MIN_ALIGN { if align <= MIN_ALIGN {
libc::realloc(ptr as *mut libc::c_void, size as libc::size_t) as *mut u8 libc::realloc(ptr as *mut libc::c_void, size as libc::size_t) as *mut u8
} else { } else {
@ -119,8 +123,11 @@ mod imp {
} }
} }
pub unsafe fn reallocate_inplace(_ptr: *mut u8, old_size: usize, _size: usize, pub unsafe fn reallocate_inplace(_ptr: *mut u8,
_align: usize) -> usize { old_size: usize,
_size: usize,
_align: usize)
-> usize {
old_size old_size
} }
@ -141,8 +148,7 @@ mod imp {
extern "system" { extern "system" {
fn GetProcessHeap() -> HANDLE; fn GetProcessHeap() -> HANDLE;
fn HeapAlloc(hHeap: HANDLE, dwFlags: DWORD, dwBytes: SIZE_T) -> LPVOID; fn HeapAlloc(hHeap: HANDLE, dwFlags: DWORD, dwBytes: SIZE_T) -> LPVOID;
fn HeapReAlloc(hHeap: HANDLE, dwFlags: DWORD, lpMem: LPVOID, fn HeapReAlloc(hHeap: HANDLE, dwFlags: DWORD, lpMem: LPVOID, dwBytes: SIZE_T) -> LPVOID;
dwBytes: SIZE_T) -> LPVOID;
fn HeapFree(hHeap: HANDLE, dwFlags: DWORD, lpMem: LPVOID) -> BOOL; fn HeapFree(hHeap: HANDLE, dwFlags: DWORD, lpMem: LPVOID) -> BOOL;
} }
@ -165,32 +171,45 @@ mod imp {
if align <= MIN_ALIGN { if align <= MIN_ALIGN {
HeapAlloc(GetProcessHeap(), 0, size as SIZE_T) as *mut u8 HeapAlloc(GetProcessHeap(), 0, size as SIZE_T) as *mut u8
} else { } else {
let ptr = HeapAlloc(GetProcessHeap(), 0, let ptr = HeapAlloc(GetProcessHeap(), 0, (size + align) as SIZE_T) as *mut u8;
(size + align) as SIZE_T) as *mut u8; if ptr.is_null() {
if ptr.is_null() { return ptr } return ptr
}
align_ptr(ptr, align) align_ptr(ptr, align)
} }
} }
pub unsafe fn reallocate(ptr: *mut u8, _old_size: usize, size: usize, pub unsafe fn reallocate(ptr: *mut u8, _old_size: usize, size: usize, align: usize) -> *mut u8 {
align: usize) -> *mut u8 {
if align <= MIN_ALIGN { if align <= MIN_ALIGN {
HeapReAlloc(GetProcessHeap(), 0, ptr as LPVOID, size as SIZE_T) as *mut u8 HeapReAlloc(GetProcessHeap(), 0, ptr as LPVOID, size as SIZE_T) as *mut u8
} else { } else {
let header = get_header(ptr); let header = get_header(ptr);
let new = HeapReAlloc(GetProcessHeap(), 0, header.0 as LPVOID, let new = HeapReAlloc(GetProcessHeap(),
0,
header.0 as LPVOID,
(size + align) as SIZE_T) as *mut u8; (size + align) as SIZE_T) as *mut u8;
if new.is_null() { return new } if new.is_null() {
return new
}
align_ptr(new, align) align_ptr(new, align)
} }
} }
pub unsafe fn reallocate_inplace(ptr: *mut u8, old_size: usize, size: usize, pub unsafe fn reallocate_inplace(ptr: *mut u8,
align: usize) -> usize { old_size: usize,
size: usize,
align: usize)
-> usize {
if align <= MIN_ALIGN { if align <= MIN_ALIGN {
let new = HeapReAlloc(GetProcessHeap(), HEAP_REALLOC_IN_PLACE_ONLY, let new = HeapReAlloc(GetProcessHeap(),
ptr as LPVOID, size as SIZE_T) as *mut u8; HEAP_REALLOC_IN_PLACE_ONLY,
if new.is_null() { old_size } else { size } ptr as LPVOID,
size as SIZE_T) as *mut u8;
if new.is_null() {
old_size
} else {
size
}
} else { } else {
old_size old_size
} }

View file

@ -105,7 +105,7 @@ pub struct Arena<'longer_than_self> {
head: RefCell<Chunk>, head: RefCell<Chunk>,
copy_head: RefCell<Chunk>, copy_head: RefCell<Chunk>,
chunks: RefCell<Vec<Chunk>>, chunks: RefCell<Vec<Chunk>>,
_marker: marker::PhantomData<*mut &'longer_than_self()>, _marker: marker::PhantomData<*mut &'longer_than_self ()>,
} }
impl<'a> Arena<'a> { impl<'a> Arena<'a> {
@ -197,7 +197,7 @@ fn un_bitpack_tydesc_ptr(p: usize) -> (*const TyDesc, bool) {
struct TyDesc { struct TyDesc {
drop_glue: fn(*const i8), drop_glue: fn(*const i8),
size: usize, size: usize,
align: usize align: usize,
} }
trait AllTypes { fn dummy(&self) { } } trait AllTypes { fn dummy(&self) { } }
@ -224,8 +224,7 @@ impl<'longer_than_self> Arena<'longer_than_self> {
let new_min_chunk_size = cmp::max(n_bytes, self.chunk_size()); let new_min_chunk_size = cmp::max(n_bytes, self.chunk_size());
self.chunks.borrow_mut().push(self.copy_head.borrow().clone()); self.chunks.borrow_mut().push(self.copy_head.borrow().clone());
*self.copy_head.borrow_mut() = *self.copy_head.borrow_mut() = chunk((new_min_chunk_size + 1).next_power_of_two(), true);
chunk((new_min_chunk_size + 1).next_power_of_two(), true);
self.alloc_copy_inner(n_bytes, align) self.alloc_copy_inner(n_bytes, align)
} }
@ -242,16 +241,13 @@ impl<'longer_than_self> Arena<'longer_than_self> {
let copy_head = self.copy_head.borrow(); let copy_head = self.copy_head.borrow();
copy_head.fill.set(end); copy_head.fill.set(end);
unsafe { unsafe { copy_head.as_ptr().offset(start as isize) }
copy_head.as_ptr().offset(start as isize)
}
} }
#[inline] #[inline]
fn alloc_copy<T, F>(&self, op: F) -> &mut T where F: FnOnce() -> T { fn alloc_copy<T, F>(&self, op: F) -> &mut T where F: FnOnce() -> T {
unsafe { unsafe {
let ptr = self.alloc_copy_inner(mem::size_of::<T>(), let ptr = self.alloc_copy_inner(mem::size_of::<T>(), mem::align_of::<T>());
mem::align_of::<T>());
let ptr = ptr as *mut T; let ptr = ptr as *mut T;
ptr::write(&mut (*ptr), op()); ptr::write(&mut (*ptr), op());
&mut *ptr &mut *ptr
@ -259,21 +255,18 @@ impl<'longer_than_self> Arena<'longer_than_self> {
} }
// Functions for the non-POD part of the arena // Functions for the non-POD part of the arena
fn alloc_noncopy_grow(&self, n_bytes: usize, fn alloc_noncopy_grow(&self, n_bytes: usize, align: usize) -> (*const u8, *const u8) {
align: usize) -> (*const u8, *const u8) {
// Allocate a new chunk. // Allocate a new chunk.
let new_min_chunk_size = cmp::max(n_bytes, self.chunk_size()); let new_min_chunk_size = cmp::max(n_bytes, self.chunk_size());
self.chunks.borrow_mut().push(self.head.borrow().clone()); self.chunks.borrow_mut().push(self.head.borrow().clone());
*self.head.borrow_mut() = *self.head.borrow_mut() = chunk((new_min_chunk_size + 1).next_power_of_two(), false);
chunk((new_min_chunk_size + 1).next_power_of_two(), false);
self.alloc_noncopy_inner(n_bytes, align) self.alloc_noncopy_inner(n_bytes, align)
} }
#[inline] #[inline]
fn alloc_noncopy_inner(&self, n_bytes: usize, fn alloc_noncopy_inner(&self, n_bytes: usize, align: usize) -> (*const u8, *const u8) {
align: usize) -> (*const u8, *const u8) {
// Be careful to not maintain any `head` borrows active, because // Be careful to not maintain any `head` borrows active, because
// `alloc_noncopy_grow` borrows it mutably. // `alloc_noncopy_grow` borrows it mutably.
let (start, end, tydesc_start, head_capacity) = { let (start, end, tydesc_start, head_capacity) = {
@ -297,7 +290,8 @@ impl<'longer_than_self> Arena<'longer_than_self> {
unsafe { unsafe {
let buf = head.as_ptr(); let buf = head.as_ptr();
(buf.offset(tydesc_start as isize), buf.offset(start as isize)) (buf.offset(tydesc_start as isize),
buf.offset(start as isize))
} }
} }
@ -305,16 +299,14 @@ impl<'longer_than_self> Arena<'longer_than_self> {
fn alloc_noncopy<T, F>(&self, op: F) -> &mut T where F: FnOnce() -> T { fn alloc_noncopy<T, F>(&self, op: F) -> &mut T where F: FnOnce() -> T {
unsafe { unsafe {
let tydesc = get_tydesc::<T>(); let tydesc = get_tydesc::<T>();
let (ty_ptr, ptr) = let (ty_ptr, ptr) = self.alloc_noncopy_inner(mem::size_of::<T>(), mem::align_of::<T>());
self.alloc_noncopy_inner(mem::size_of::<T>(),
mem::align_of::<T>());
let ty_ptr = ty_ptr as *mut usize; let ty_ptr = ty_ptr as *mut usize;
let ptr = ptr as *mut T; let ptr = ptr as *mut T;
// Write in our tydesc along with a bit indicating that it // Write in our tydesc along with a bit indicating that it
// has *not* been initialized yet. // has *not* been initialized yet.
*ty_ptr = bitpack_tydesc_ptr(tydesc, false); *ty_ptr = bitpack_tydesc_ptr(tydesc, false);
// Actually initialize it // Actually initialize it
ptr::write(&mut(*ptr), op()); ptr::write(&mut (*ptr), op());
// Now that we are done, update the tydesc to indicate that // Now that we are done, update the tydesc to indicate that
// the object is there. // the object is there.
*ty_ptr = bitpack_tydesc_ptr(tydesc, true); *ty_ptr = bitpack_tydesc_ptr(tydesc, true);
@ -358,10 +350,10 @@ fn test_arena_destructors_fail() {
for i in 0..10 { for i in 0..10 {
// Arena allocate something with drop glue to make sure it // Arena allocate something with drop glue to make sure it
// doesn't leak. // doesn't leak.
arena.alloc(|| { Rc::new(i) }); arena.alloc(|| Rc::new(i));
// Allocate something with funny size and alignment, to keep // Allocate something with funny size and alignment, to keep
// things interesting. // things interesting.
arena.alloc(|| { [0u8, 1, 2] }); arena.alloc(|| [0u8, 1, 2]);
} }
// Now, panic while allocating // Now, panic while allocating
arena.alloc::<Rc<i32>, _>(|| { arena.alloc::<Rc<i32>, _>(|| {
@ -409,12 +401,13 @@ fn calculate_size<T>(capacity: usize) -> usize {
impl<T> TypedArenaChunk<T> { impl<T> TypedArenaChunk<T> {
#[inline] #[inline]
unsafe fn new(next: *mut TypedArenaChunk<T>, capacity: usize) unsafe fn new(next: *mut TypedArenaChunk<T>, capacity: usize) -> *mut TypedArenaChunk<T> {
-> *mut TypedArenaChunk<T> {
let size = calculate_size::<T>(capacity); let size = calculate_size::<T>(capacity);
let chunk = allocate(size, mem::align_of::<TypedArenaChunk<T>>()) let chunk =
as *mut TypedArenaChunk<T>; allocate(size, mem::align_of::<TypedArenaChunk<T>>()) as *mut TypedArenaChunk<T>;
if chunk.is_null() { alloc::oom() } if chunk.is_null() {
alloc::oom()
}
(*chunk).next = next; (*chunk).next = next;
(*chunk).capacity = capacity; (*chunk).capacity = capacity;
chunk chunk
@ -437,7 +430,8 @@ impl<T> TypedArenaChunk<T> {
let next = self.next; let next = self.next;
let size = calculate_size::<T>(self.capacity); let size = calculate_size::<T>(self.capacity);
let self_ptr: *mut TypedArenaChunk<T> = self; let self_ptr: *mut TypedArenaChunk<T> = self;
deallocate(self_ptr as *mut u8, size, deallocate(self_ptr as *mut u8,
size,
mem::align_of::<TypedArenaChunk<T>>()); mem::align_of::<TypedArenaChunk<T>>());
if !next.is_null() { if !next.is_null() {
let capacity = (*next).capacity; let capacity = (*next).capacity;
@ -449,9 +443,7 @@ impl<T> TypedArenaChunk<T> {
#[inline] #[inline]
fn start(&self) -> *const u8 { fn start(&self) -> *const u8 {
let this: *const TypedArenaChunk<T> = self; let this: *const TypedArenaChunk<T> = self;
unsafe { unsafe { round_up(this.offset(1) as usize, mem::align_of::<T>()) as *const u8 }
round_up(this.offset(1) as usize, mem::align_of::<T>()) as *const u8
}
} }
// Returns a pointer to the end of the allocated space. // Returns a pointer to the end of the allocated space.
@ -545,14 +537,21 @@ mod tests {
#[test] #[test]
fn test_arena_alloc_nested() { fn test_arena_alloc_nested() {
struct Inner { value: u8 } struct Inner {
struct Outer<'a> { inner: &'a Inner } value: u8,
enum EI<'e> { I(Inner), O(Outer<'e>) } }
struct Outer<'a> {
inner: &'a Inner,
}
enum EI<'e> {
I(Inner),
O(Outer<'e>),
}
struct Wrap<'a>(TypedArena<EI<'a>>); struct Wrap<'a>(TypedArena<EI<'a>>);
impl<'a> Wrap<'a> { impl<'a> Wrap<'a> {
fn alloc_inner<F:Fn() -> Inner>(&self, f: F) -> &Inner { fn alloc_inner<F: Fn() -> Inner>(&self, f: F) -> &Inner {
let r: &EI = self.0.alloc(EI::I(f())); let r: &EI = self.0.alloc(EI::I(f()));
if let &EI::I(ref i) = r { if let &EI::I(ref i) = r {
i i
@ -560,7 +559,7 @@ mod tests {
panic!("mismatch"); panic!("mismatch");
} }
} }
fn alloc_outer<F:Fn() -> Outer<'a>>(&self, f: F) -> &Outer { fn alloc_outer<F: Fn() -> Outer<'a>>(&self, f: F) -> &Outer {
let r: &EI = self.0.alloc(EI::O(f())); let r: &EI = self.0.alloc(EI::O(f()));
if let &EI::O(ref o) = r { if let &EI::O(ref o) = r {
o o
@ -572,8 +571,9 @@ mod tests {
let arena = Wrap(TypedArena::new()); let arena = Wrap(TypedArena::new());
let result = arena.alloc_outer(|| Outer { let result = arena.alloc_outer(|| {
inner: arena.alloc_inner(|| Inner { value: 10 }) }); Outer { inner: arena.alloc_inner(|| Inner { value: 10 }) }
});
assert_eq!(result.inner.value, 10); assert_eq!(result.inner.value, 10);
} }
@ -582,49 +582,27 @@ mod tests {
pub fn test_copy() { pub fn test_copy() {
let arena = TypedArena::new(); let arena = TypedArena::new();
for _ in 0..100000 { for _ in 0..100000 {
arena.alloc(Point { arena.alloc(Point { x: 1, y: 2, z: 3 });
x: 1,
y: 2,
z: 3,
});
} }
} }
#[bench] #[bench]
pub fn bench_copy(b: &mut Bencher) { pub fn bench_copy(b: &mut Bencher) {
let arena = TypedArena::new(); let arena = TypedArena::new();
b.iter(|| { b.iter(|| arena.alloc(Point { x: 1, y: 2, z: 3 }))
arena.alloc(Point {
x: 1,
y: 2,
z: 3,
})
})
} }
#[bench] #[bench]
pub fn bench_copy_nonarena(b: &mut Bencher) { pub fn bench_copy_nonarena(b: &mut Bencher) {
b.iter(|| { b.iter(|| {
let _: Box<_> = box Point { let _: Box<_> = box Point { x: 1, y: 2, z: 3 };
x: 1,
y: 2,
z: 3,
};
}) })
} }
#[bench] #[bench]
pub fn bench_copy_old_arena(b: &mut Bencher) { pub fn bench_copy_old_arena(b: &mut Bencher) {
let arena = Arena::new(); let arena = Arena::new();
b.iter(|| { b.iter(|| arena.alloc(|| Point { x: 1, y: 2, z: 3 }))
arena.alloc(|| {
Point {
x: 1,
y: 2,
z: 3,
}
})
})
} }
#[allow(dead_code)] #[allow(dead_code)]
@ -639,7 +617,7 @@ mod tests {
for _ in 0..100000 { for _ in 0..100000 {
arena.alloc(Noncopy { arena.alloc(Noncopy {
string: "hello world".to_string(), string: "hello world".to_string(),
array: vec!( 1, 2, 3, 4, 5 ), array: vec!(1, 2, 3, 4, 5),
}); });
} }
} }
@ -650,7 +628,7 @@ mod tests {
b.iter(|| { b.iter(|| {
arena.alloc(Noncopy { arena.alloc(Noncopy {
string: "hello world".to_string(), string: "hello world".to_string(),
array: vec!( 1, 2, 3, 4, 5 ), array: vec!(1, 2, 3, 4, 5),
}) })
}) })
} }
@ -660,7 +638,7 @@ mod tests {
b.iter(|| { b.iter(|| {
let _: Box<_> = box Noncopy { let _: Box<_> = box Noncopy {
string: "hello world".to_string(), string: "hello world".to_string(),
array: vec!( 1, 2, 3, 4, 5 ), array: vec!(1, 2, 3, 4, 5),
}; };
}) })
} }
@ -669,9 +647,11 @@ mod tests {
pub fn bench_noncopy_old_arena(b: &mut Bencher) { pub fn bench_noncopy_old_arena(b: &mut Bencher) {
let arena = Arena::new(); let arena = Arena::new();
b.iter(|| { b.iter(|| {
arena.alloc(|| Noncopy { arena.alloc(|| {
string: "hello world".to_string(), Noncopy {
array: vec!( 1, 2, 3, 4, 5 ), string: "hello world".to_string(),
array: vec!(1, 2, 3, 4, 5),
}
}) })
}) })
} }