1
Fork 0

Auto merge of #116370 - nnethercote:more-arena-stuff, r=cjgillot

Remove the `TypedArena::alloc_from_iter` specialization.

It was added in #78569. It's complicated and doesn't actually help
performance.

r? `@cjgillot`
This commit is contained in:
bors 2023-10-04 22:32:46 +00:00
commit afe67fa2ef
6 changed files with 74 additions and 128 deletions

View file

@ -15,7 +15,6 @@
#![feature(dropck_eyepatch)] #![feature(dropck_eyepatch)]
#![feature(new_uninit)] #![feature(new_uninit)]
#![feature(maybe_uninit_slice)] #![feature(maybe_uninit_slice)]
#![feature(min_specialization)]
#![feature(decl_macro)] #![feature(decl_macro)]
#![feature(pointer_byte_offsets)] #![feature(pointer_byte_offsets)]
#![feature(rustc_attrs)] #![feature(rustc_attrs)]
@ -44,23 +43,6 @@ fn outline<F: FnOnce() -> R, R>(f: F) -> R {
f() f()
} }
/// An arena that can hold objects of only one type.
pub struct TypedArena<T> {
/// A pointer to the next object to be allocated.
ptr: Cell<*mut T>,
/// A pointer to the end of the allocated area. When this pointer is
/// reached, a new chunk is allocated.
end: Cell<*mut T>,
/// A vector of arena chunks.
chunks: RefCell<Vec<ArenaChunk<T>>>,
/// Marker indicating that dropping the arena causes its owned
/// instances of `T` to be dropped.
_own: PhantomData<T>,
}
struct ArenaChunk<T = u8> { struct ArenaChunk<T = u8> {
/// The raw storage for the arena chunk. /// The raw storage for the arena chunk.
storage: NonNull<[MaybeUninit<T>]>, storage: NonNull<[MaybeUninit<T>]>,
@ -130,6 +112,23 @@ impl<T> ArenaChunk<T> {
const PAGE: usize = 4096; const PAGE: usize = 4096;
const HUGE_PAGE: usize = 2 * 1024 * 1024; const HUGE_PAGE: usize = 2 * 1024 * 1024;
/// An arena that can hold objects of only one type.
pub struct TypedArena<T> {
/// A pointer to the next object to be allocated.
ptr: Cell<*mut T>,
/// A pointer to the end of the allocated area. When this pointer is
/// reached, a new chunk is allocated.
end: Cell<*mut T>,
/// A vector of arena chunks.
chunks: RefCell<Vec<ArenaChunk<T>>>,
/// Marker indicating that dropping the arena causes its owned
/// instances of `T` to be dropped.
_own: PhantomData<T>,
}
impl<T> Default for TypedArena<T> { impl<T> Default for TypedArena<T> {
/// Creates a new `TypedArena`. /// Creates a new `TypedArena`.
fn default() -> TypedArena<T> { fn default() -> TypedArena<T> {
@ -144,77 +143,6 @@ impl<T> Default for TypedArena<T> {
} }
} }
trait IterExt<T> {
fn alloc_from_iter(self, arena: &TypedArena<T>) -> &mut [T];
}
impl<I, T> IterExt<T> for I
where
I: IntoIterator<Item = T>,
{
// This default collects into a `SmallVec` and then allocates by copying
// from it. The specializations below for types like `Vec` are more
// efficient, copying directly without the intermediate collecting step.
// This default could be made more efficient, like
// `DroplessArena::alloc_from_iter`, but it's not hot enough to bother.
#[inline]
default fn alloc_from_iter(self, arena: &TypedArena<T>) -> &mut [T] {
let vec: SmallVec<[_; 8]> = self.into_iter().collect();
vec.alloc_from_iter(arena)
}
}
impl<T, const N: usize> IterExt<T> for std::array::IntoIter<T, N> {
#[inline]
fn alloc_from_iter(self, arena: &TypedArena<T>) -> &mut [T] {
let len = self.len();
if len == 0 {
return &mut [];
}
// Move the content to the arena by copying and then forgetting it.
let start_ptr = arena.alloc_raw_slice(len);
unsafe {
self.as_slice().as_ptr().copy_to_nonoverlapping(start_ptr, len);
mem::forget(self);
slice::from_raw_parts_mut(start_ptr, len)
}
}
}
impl<T> IterExt<T> for Vec<T> {
#[inline]
fn alloc_from_iter(mut self, arena: &TypedArena<T>) -> &mut [T] {
let len = self.len();
if len == 0 {
return &mut [];
}
// Move the content to the arena by copying and then forgetting it.
let start_ptr = arena.alloc_raw_slice(len);
unsafe {
self.as_ptr().copy_to_nonoverlapping(start_ptr, len);
self.set_len(0);
slice::from_raw_parts_mut(start_ptr, len)
}
}
}
impl<A: smallvec::Array> IterExt<A::Item> for SmallVec<A> {
#[inline]
fn alloc_from_iter(mut self, arena: &TypedArena<A::Item>) -> &mut [A::Item] {
let len = self.len();
if len == 0 {
return &mut [];
}
// Move the content to the arena by copying and then forgetting it.
let start_ptr = arena.alloc_raw_slice(len);
unsafe {
self.as_ptr().copy_to_nonoverlapping(start_ptr, len);
self.set_len(0);
slice::from_raw_parts_mut(start_ptr, len)
}
}
}
impl<T> TypedArena<T> { impl<T> TypedArena<T> {
/// Allocates an object in the `TypedArena`, returning a reference to it. /// Allocates an object in the `TypedArena`, returning a reference to it.
#[inline] #[inline]
@ -270,8 +198,35 @@ impl<T> TypedArena<T> {
#[inline] #[inline]
pub fn alloc_from_iter<I: IntoIterator<Item = T>>(&self, iter: I) -> &mut [T] { pub fn alloc_from_iter<I: IntoIterator<Item = T>>(&self, iter: I) -> &mut [T] {
// This implementation is entirely separate to
// `DroplessIterator::alloc_from_iter`, even though conceptually they
// are the same.
//
// `DroplessIterator` (in the fast case) writes elements from the
// iterator one at a time into the allocated memory. That's easy
// because the elements don't implement `Drop`. But for `TypedArena`
// they do implement `Drop`, which means that if the iterator panics we
// could end up with some allocated-but-uninitialized elements, which
// will then cause UB in `TypedArena::drop`.
//
// Instead we use an approach where any iterator panic will occur
// before the memory is allocated. This function is much less hot than
// `DroplessArena::alloc_from_iter`, so it doesn't need to be
// hyper-optimized.
assert!(mem::size_of::<T>() != 0); assert!(mem::size_of::<T>() != 0);
iter.alloc_from_iter(self)
let mut vec: SmallVec<[_; 8]> = iter.into_iter().collect();
if vec.is_empty() {
return &mut [];
}
// Move the content to the arena by copying and then forgetting it.
let len = vec.len();
let start_ptr = self.alloc_raw_slice(len);
unsafe {
vec.as_ptr().copy_to_nonoverlapping(start_ptr, len);
vec.set_len(0);
slice::from_raw_parts_mut(start_ptr, len)
}
} }
/// Grows the arena. /// Grows the arena.

View file

@ -410,15 +410,11 @@ fn expand_format_args<'hir>(
let format_options = use_format_options.then(|| { let format_options = use_format_options.then(|| {
// Generate: // Generate:
// &[format_spec_0, format_spec_1, format_spec_2] // &[format_spec_0, format_spec_1, format_spec_2]
let elements: Vec<_> = fmt let elements = ctx.arena.alloc_from_iter(fmt.template.iter().filter_map(|piece| {
.template
.iter()
.filter_map(|piece| {
let FormatArgsPiece::Placeholder(placeholder) = piece else { return None }; let FormatArgsPiece::Placeholder(placeholder) = piece else { return None };
Some(make_format_spec(ctx, macsp, placeholder, &mut argmap)) Some(make_format_spec(ctx, macsp, placeholder, &mut argmap))
}) }));
.collect(); ctx.expr_array_ref(macsp, elements)
ctx.expr_array_ref(macsp, ctx.arena.alloc_from_iter(elements))
}); });
let arguments = fmt.arguments.all_args(); let arguments = fmt.arguments.all_args();
@ -477,10 +473,8 @@ fn expand_format_args<'hir>(
// <core::fmt::Argument>::new_debug(&arg2), // <core::fmt::Argument>::new_debug(&arg2),
// … // …
// ] // ]
let elements: Vec<_> = arguments let elements = ctx.arena.alloc_from_iter(arguments.iter().zip(argmap).map(
.iter() |(arg, ((_, ty), placeholder_span))| {
.zip(argmap)
.map(|(arg, ((_, ty), placeholder_span))| {
let placeholder_span = let placeholder_span =
placeholder_span.unwrap_or(arg.expr.span).with_ctxt(macsp.ctxt()); placeholder_span.unwrap_or(arg.expr.span).with_ctxt(macsp.ctxt());
let arg_span = match arg.kind { let arg_span = match arg.kind {
@ -493,9 +487,9 @@ fn expand_format_args<'hir>(
hir::ExprKind::AddrOf(hir::BorrowKind::Ref, hir::Mutability::Not, arg), hir::ExprKind::AddrOf(hir::BorrowKind::Ref, hir::Mutability::Not, arg),
)); ));
make_argument(ctx, placeholder_span, ref_arg, ty) make_argument(ctx, placeholder_span, ref_arg, ty)
}) },
.collect(); ));
ctx.expr_array_ref(macsp, ctx.arena.alloc_from_iter(elements)) ctx.expr_array_ref(macsp, elements)
} else { } else {
// Generate: // Generate:
// &match (&arg0, &arg1, &…) { // &match (&arg0, &arg1, &…) {
@ -528,19 +522,14 @@ fn expand_format_args<'hir>(
make_argument(ctx, placeholder_span, arg, ty) make_argument(ctx, placeholder_span, arg, ty)
}, },
)); ));
let elements: Vec<_> = arguments let elements = ctx.arena.alloc_from_iter(arguments.iter().map(|arg| {
.iter()
.map(|arg| {
let arg_expr = ctx.lower_expr(&arg.expr); let arg_expr = ctx.lower_expr(&arg.expr);
ctx.expr( ctx.expr(
arg.expr.span.with_ctxt(macsp.ctxt()), arg.expr.span.with_ctxt(macsp.ctxt()),
hir::ExprKind::AddrOf(hir::BorrowKind::Ref, hir::Mutability::Not, arg_expr), hir::ExprKind::AddrOf(hir::BorrowKind::Ref, hir::Mutability::Not, arg_expr),
) )
}) }));
.collect(); let args_tuple = ctx.arena.alloc(ctx.expr(macsp, hir::ExprKind::Tup(elements)));
let args_tuple = ctx
.arena
.alloc(ctx.expr(macsp, hir::ExprKind::Tup(ctx.arena.alloc_from_iter(elements))));
let array = ctx.arena.alloc(ctx.expr(macsp, hir::ExprKind::Array(args))); let array = ctx.arena.alloc(ctx.expr(macsp, hir::ExprKind::Array(args)));
let match_arms = ctx.arena.alloc_from_iter([ctx.arm(args_pat, array)]); let match_arms = ctx.arena.alloc_from_iter([ctx.arm(args_pat, array)]);
let match_expr = ctx.arena.alloc(ctx.expr_match( let match_expr = ctx.arena.alloc(ctx.expr_match(

View file

@ -192,5 +192,5 @@ fn variance_of_opaque(tcx: TyCtxt<'_>, item_def_id: LocalDefId) -> &[ty::Varianc
} }
} }
} }
tcx.arena.alloc_from_iter(collector.variances.into_iter()) tcx.arena.alloc_from_iter(collector.variances)
} }

View file

@ -348,9 +348,10 @@ impl<'tcx, D: TyDecoder<I = TyCtxt<'tcx>>> Decodable<D> for ty::Const<'tcx> {
impl<'tcx, D: TyDecoder<I = TyCtxt<'tcx>>> RefDecodable<'tcx, D> for [ty::ValTree<'tcx>] { impl<'tcx, D: TyDecoder<I = TyCtxt<'tcx>>> RefDecodable<'tcx, D> for [ty::ValTree<'tcx>] {
fn decode(decoder: &mut D) -> &'tcx Self { fn decode(decoder: &mut D) -> &'tcx Self {
decoder.interner().arena.alloc_from_iter( decoder
(0..decoder.read_usize()).map(|_| Decodable::decode(decoder)).collect::<Vec<_>>(), .interner()
) .arena
.alloc_from_iter((0..decoder.read_usize()).map(|_| Decodable::decode(decoder)))
} }
} }
@ -368,9 +369,10 @@ impl<'tcx, D: TyDecoder<I = TyCtxt<'tcx>>> Decodable<D> for AdtDef<'tcx> {
impl<'tcx, D: TyDecoder<I = TyCtxt<'tcx>>> RefDecodable<'tcx, D> for [(ty::Clause<'tcx>, Span)] { impl<'tcx, D: TyDecoder<I = TyCtxt<'tcx>>> RefDecodable<'tcx, D> for [(ty::Clause<'tcx>, Span)] {
fn decode(decoder: &mut D) -> &'tcx Self { fn decode(decoder: &mut D) -> &'tcx Self {
decoder.interner().arena.alloc_from_iter( decoder
(0..decoder.read_usize()).map(|_| Decodable::decode(decoder)).collect::<Vec<_>>(), .interner()
) .arena
.alloc_from_iter((0..decoder.read_usize()).map(|_| Decodable::decode(decoder)))
} }
} }

View file

@ -316,7 +316,7 @@ fn vtable_entries<'tcx>(
dump_vtable_entries(tcx, sp, trait_ref, &entries); dump_vtable_entries(tcx, sp, trait_ref, &entries);
} }
tcx.arena.alloc_from_iter(entries.into_iter()) tcx.arena.alloc_from_iter(entries)
} }
/// Find slot base for trait methods within vtable entries of another trait /// Find slot base for trait methods within vtable entries of another trait

View file

@ -71,7 +71,7 @@ pub(crate) fn destructure_const<'tcx>(
_ => bug!("cannot destructure constant {:?}", const_), _ => bug!("cannot destructure constant {:?}", const_),
}; };
let fields = tcx.arena.alloc_from_iter(fields.into_iter()); let fields = tcx.arena.alloc_from_iter(fields);
ty::DestructuredConst { variant, fields } ty::DestructuredConst { variant, fields }
} }