1
Fork 0

Rollup merge of #120302 - oli-obk:const_intern_cleanups, r=RalfJung

various const interning cleanups

After #119044 I noticed that some things can be simplified and refactored.

This is also a requirement for https://github.com/rust-lang/rust/pull/116564 as there we'll need to treat the base allocation differently from the others

r? ````@RalfJung````
This commit is contained in:
Guillaume Boisseau 2024-02-07 18:24:42 +01:00 committed by GitHub
commit 62c2628eba
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
2 changed files with 61 additions and 63 deletions

View file

@ -1,5 +1,3 @@
use std::mem;
use either::{Left, Right}; use either::{Left, Right};
use rustc_hir::def::DefKind; use rustc_hir::def::DefKind;
@ -24,12 +22,13 @@ use crate::interpret::{
}; };
// Returns a pointer to where the result lives // Returns a pointer to where the result lives
#[instrument(level = "trace", skip(ecx, body), ret)]
fn eval_body_using_ecx<'mir, 'tcx>( fn eval_body_using_ecx<'mir, 'tcx>(
ecx: &mut CompileTimeEvalContext<'mir, 'tcx>, ecx: &mut CompileTimeEvalContext<'mir, 'tcx>,
cid: GlobalId<'tcx>, cid: GlobalId<'tcx>,
body: &'mir mir::Body<'tcx>, body: &'mir mir::Body<'tcx>,
) -> InterpResult<'tcx, MPlaceTy<'tcx>> { ) -> InterpResult<'tcx, MPlaceTy<'tcx>> {
debug!("eval_body_using_ecx: {:?}, {:?}", cid, ecx.param_env); trace!(?ecx.param_env);
let tcx = *ecx.tcx; let tcx = *ecx.tcx;
assert!( assert!(
cid.promoted.is_some() cid.promoted.is_some()
@ -75,11 +74,8 @@ fn eval_body_using_ecx<'mir, 'tcx>(
None => InternKind::Constant, None => InternKind::Constant,
} }
}; };
let check_alignment = mem::replace(&mut ecx.machine.check_alignment, CheckAlignment::No); // interning doesn't need to respect alignment
intern_const_alloc_recursive(ecx, intern_kind, &ret)?; intern_const_alloc_recursive(ecx, intern_kind, &ret)?;
ecx.machine.check_alignment = check_alignment;
debug!("eval_body_using_ecx done: {:?}", ret);
Ok(ret) Ok(ret)
} }

View file

@ -41,13 +41,12 @@ pub trait CompileTimeMachine<'mir, 'tcx: 'mir, T> = Machine<
/// allocation is interned immutably; if it is `Mutability::Mut`, then the allocation *must be* /// allocation is interned immutably; if it is `Mutability::Mut`, then the allocation *must be*
/// already mutable (as a sanity check). /// already mutable (as a sanity check).
/// ///
/// `recursive_alloc` is called for all recursively encountered allocations. /// Returns an iterator over all relocations referred to by this allocation.
fn intern_shallow<'rt, 'mir, 'tcx, T, M: CompileTimeMachine<'mir, 'tcx, T>>( fn intern_shallow<'rt, 'mir, 'tcx, T, M: CompileTimeMachine<'mir, 'tcx, T>>(
ecx: &'rt mut InterpCx<'mir, 'tcx, M>, ecx: &'rt mut InterpCx<'mir, 'tcx, M>,
alloc_id: AllocId, alloc_id: AllocId,
mutability: Mutability, mutability: Mutability,
mut recursive_alloc: impl FnMut(&InterpCx<'mir, 'tcx, M>, CtfeProvenance), ) -> Result<impl Iterator<Item = CtfeProvenance> + 'tcx, ()> {
) -> Result<(), ()> {
trace!("intern_shallow {:?}", alloc_id); trace!("intern_shallow {:?}", alloc_id);
// remove allocation // remove allocation
let Some((_kind, mut alloc)) = ecx.memory.alloc_map.remove(&alloc_id) else { let Some((_kind, mut alloc)) = ecx.memory.alloc_map.remove(&alloc_id) else {
@ -65,14 +64,10 @@ fn intern_shallow<'rt, 'mir, 'tcx, T, M: CompileTimeMachine<'mir, 'tcx, T>>(
assert_eq!(alloc.mutability, Mutability::Mut); assert_eq!(alloc.mutability, Mutability::Mut);
} }
} }
// record child allocations
for &(_, prov) in alloc.provenance().ptrs().iter() {
recursive_alloc(ecx, prov);
}
// link the alloc id to the actual allocation // link the alloc id to the actual allocation
let alloc = ecx.tcx.mk_const_alloc(alloc); let alloc = ecx.tcx.mk_const_alloc(alloc);
ecx.tcx.set_alloc_id_memory(alloc_id, alloc); ecx.tcx.set_alloc_id_memory(alloc_id, alloc);
Ok(()) Ok(alloc.0.0.provenance().ptrs().iter().map(|&(_, prov)| prov))
} }
/// How a constant value should be interned. /// How a constant value should be interned.
@ -128,12 +123,16 @@ pub fn intern_const_alloc_recursive<
} }
}; };
// Initialize recursive interning. // Intern the base allocation, and initialize todo list for recursive interning.
let base_alloc_id = ret.ptr().provenance.unwrap().alloc_id(); let base_alloc_id = ret.ptr().provenance.unwrap().alloc_id();
let mut todo = vec![(base_alloc_id, base_mutability)]; // First we intern the base allocation, as it requires a different mutability.
// This gives us the initial set of nested allocations, which will then all be processed
// recursively in the loop below.
let mut todo: Vec<_> =
intern_shallow(ecx, base_alloc_id, base_mutability).unwrap().map(|prov| prov).collect();
// We need to distinguish "has just been interned" from "was already in `tcx`", // We need to distinguish "has just been interned" from "was already in `tcx`",
// so we track this in a separate set. // so we track this in a separate set.
let mut just_interned = FxHashSet::default(); let mut just_interned: FxHashSet<_> = std::iter::once(base_alloc_id).collect();
// Whether we encountered a bad mutable pointer. // Whether we encountered a bad mutable pointer.
// We want to first report "dangling" and then "mutable", so we need to delay reporting these // We want to first report "dangling" and then "mutable", so we need to delay reporting these
// errors. // errors.
@ -147,52 +146,56 @@ pub fn intern_const_alloc_recursive<
// raw pointers, so we cannot rely on validation to catch them -- and since interning runs // raw pointers, so we cannot rely on validation to catch them -- and since interning runs
// before validation, and interning doesn't know the type of anything, this means we can't show // before validation, and interning doesn't know the type of anything, this means we can't show
// better errors. Maybe we should consider doing validation before interning in the future. // better errors. Maybe we should consider doing validation before interning in the future.
while let Some((alloc_id, mutability)) = todo.pop() { while let Some(prov) = todo.pop() {
let alloc_id = prov.alloc_id();
// Crucially, we check this *before* checking whether the `alloc_id`
// has already been interned. The point of this check is to ensure that when
// there are multiple pointers to the same allocation, they are *all* immutable.
// Therefore it would be bad if we only checked the first pointer to any given
// allocation.
// (It is likely not possible to actually have multiple pointers to the same allocation,
// so alternatively we could also check that and ICE if there are multiple such pointers.)
if intern_kind != InternKind::Promoted
&& inner_mutability == Mutability::Not
&& !prov.immutable()
{
if ecx.tcx.try_get_global_alloc(alloc_id).is_some()
&& !just_interned.contains(&alloc_id)
{
// This is a pointer to some memory from another constant. We encounter mutable
// pointers to such memory since we do not always track immutability through
// these "global" pointers. Allowing them is harmless; the point of these checks
// during interning is to justify why we intern the *new* allocations immutably,
// so we can completely ignore existing allocations. We also don't need to add
// this to the todo list, since after all it is already interned.
continue;
}
// Found a mutable pointer inside a const where inner allocations should be
// immutable. We exclude promoteds from this, since things like `&mut []` and
// `&None::<Cell<i32>>` lead to promotion that can produce mutable pointers. We rely
// on the promotion analysis not screwing up to ensure that it is sound to intern
// promoteds as immutable.
found_bad_mutable_pointer = true;
}
if ecx.tcx.try_get_global_alloc(alloc_id).is_some() { if ecx.tcx.try_get_global_alloc(alloc_id).is_some() {
// Already interned. // Already interned.
debug_assert!(!ecx.memory.alloc_map.contains_key(&alloc_id)); debug_assert!(!ecx.memory.alloc_map.contains_key(&alloc_id));
continue; continue;
} }
just_interned.insert(alloc_id); just_interned.insert(alloc_id);
intern_shallow(ecx, alloc_id, mutability, |ecx, prov| { // We always intern with `inner_mutability`, and furthermore we ensured above that if
let alloc_id = prov.alloc_id(); // that is "immutable", then there are *no* mutable pointers anywhere in the newly
if intern_kind != InternKind::Promoted // interned memory -- justifying that we can indeed intern immutably. However this also
&& inner_mutability == Mutability::Not // means we can *not* easily intern immutably here if `prov.immutable()` is true and
&& !prov.immutable() // `inner_mutability` is `Mut`: there might be other pointers to that allocation, and
{ // we'd have to somehow check that they are *all* immutable before deciding that this
if ecx.tcx.try_get_global_alloc(alloc_id).is_some() // allocation can be made immutable. In the future we could consider analyzing all
&& !just_interned.contains(&alloc_id) // pointers before deciding which allocations can be made immutable; but for now we are
{ // okay with losing some potential for immutability here. This can anyway only affect
// This is a pointer to some memory from another constant. We encounter mutable // `static mut`.
// pointers to such memory since we do not always track immutability through todo.extend(intern_shallow(ecx, alloc_id, inner_mutability).map_err(|()| {
// these "global" pointers. Allowing them is harmless; the point of these checks
// during interning is to justify why we intern the *new* allocations immutably,
// so we can completely ignore existing allocations. We also don't need to add
// this to the todo list, since after all it is already interned.
return;
}
// Found a mutable pointer inside a const where inner allocations should be
// immutable. We exclude promoteds from this, since things like `&mut []` and
// `&None::<Cell<i32>>` lead to promotion that can produce mutable pointers. We rely
// on the promotion analysis not screwing up to ensure that it is sound to intern
// promoteds as immutable.
found_bad_mutable_pointer = true;
}
// We always intern with `inner_mutability`, and furthermore we ensured above that if
// that is "immutable", then there are *no* mutable pointers anywhere in the newly
// interned memory -- justifying that we can indeed intern immutably. However this also
// means we can *not* easily intern immutably here if `prov.immutable()` is true and
// `inner_mutability` is `Mut`: there might be other pointers to that allocation, and
// we'd have to somehow check that they are *all* immutable before deciding that this
// allocation can be made immutable. In the future we could consider analyzing all
// pointers before deciding which allocations can be made immutable; but for now we are
// okay with losing some potential for immutability here. This can anyway only affect
// `static mut`.
todo.push((alloc_id, inner_mutability));
})
.map_err(|()| {
ecx.tcx.dcx().emit_err(DanglingPtrInFinal { span: ecx.tcx.span, kind: intern_kind }) ecx.tcx.dcx().emit_err(DanglingPtrInFinal { span: ecx.tcx.span, kind: intern_kind })
})?; })?);
} }
if found_bad_mutable_pointer { if found_bad_mutable_pointer {
return Err(ecx return Err(ecx
@ -220,13 +223,13 @@ pub fn intern_const_alloc_for_constprop<
return Ok(()); return Ok(());
} }
// Move allocation to `tcx`. // Move allocation to `tcx`.
intern_shallow(ecx, alloc_id, Mutability::Not, |_ecx, _| { for _ in intern_shallow(ecx, alloc_id, Mutability::Not).map_err(|()| err_ub!(DeadLocal))? {
// We are not doing recursive interning, so we don't currently support provenance. // We are not doing recursive interning, so we don't currently support provenance.
// (If this assertion ever triggers, we should just implement a // (If this assertion ever triggers, we should just implement a
// proper recursive interning loop -- or just call `intern_const_alloc_recursive`. // proper recursive interning loop -- or just call `intern_const_alloc_recursive`.
panic!("`intern_const_alloc_for_constprop` called on allocation with nested provenance") panic!("`intern_const_alloc_for_constprop` called on allocation with nested provenance")
}) }
.map_err(|()| err_ub!(DeadLocal).into()) Ok(())
} }
impl<'mir, 'tcx: 'mir, M: super::intern::CompileTimeMachine<'mir, 'tcx, !>> impl<'mir, 'tcx: 'mir, M: super::intern::CompileTimeMachine<'mir, 'tcx, !>>
@ -247,15 +250,14 @@ impl<'mir, 'tcx: 'mir, M: super::intern::CompileTimeMachine<'mir, 'tcx, !>>
let dest = self.allocate(layout, MemoryKind::Stack)?; let dest = self.allocate(layout, MemoryKind::Stack)?;
f(self, &dest.clone().into())?; f(self, &dest.clone().into())?;
let alloc_id = dest.ptr().provenance.unwrap().alloc_id(); // this was just allocated, it must have provenance let alloc_id = dest.ptr().provenance.unwrap().alloc_id(); // this was just allocated, it must have provenance
intern_shallow(self, alloc_id, Mutability::Not, |ecx, prov| { for prov in intern_shallow(self, alloc_id, Mutability::Not).unwrap() {
// We are not doing recursive interning, so we don't currently support provenance. // We are not doing recursive interning, so we don't currently support provenance.
// (If this assertion ever triggers, we should just implement a // (If this assertion ever triggers, we should just implement a
// proper recursive interning loop -- or just call `intern_const_alloc_recursive`. // proper recursive interning loop -- or just call `intern_const_alloc_recursive`.
if !ecx.tcx.try_get_global_alloc(prov.alloc_id()).is_some() { if !self.tcx.try_get_global_alloc(prov.alloc_id()).is_some() {
panic!("`intern_with_temp_alloc` with nested allocations"); panic!("`intern_with_temp_alloc` with nested allocations");
} }
}) }
.unwrap();
Ok(alloc_id) Ok(alloc_id)
} }
} }