1
Fork 0

Turn tcx.vtable_allocation() into a query.

This commit is contained in:
Michael Woerister 2021-10-07 11:29:01 +02:00 committed by Michael Woerister
parent a1e2c0f0ad
commit b7cc99142a
9 changed files with 101 additions and 72 deletions

View file

@ -68,7 +68,7 @@ pub(crate) fn get_vtable<'tcx>(
ty: Ty<'tcx>,
trait_ref: Option<ty::PolyExistentialTraitRef<'tcx>>,
) -> Value {
let alloc_id = fx.tcx.vtable_allocation(ty, trait_ref);
let alloc_id = fx.tcx.vtable_allocation((ty, trait_ref));
let data_id =
data_id_for_alloc_id(&mut fx.constants_cx, &mut *fx.module, alloc_id, Mutability::Not);
let local_data_id = fx.module.declare_data_in_func(data_id, &mut fx.bcx.func);

View file

@ -72,7 +72,7 @@ pub fn get_vtable<'tcx, Cx: CodegenMethods<'tcx>>(
return val;
}
let vtable_alloc_id = tcx.vtable_allocation(ty, trait_ref);
let vtable_alloc_id = tcx.vtable_allocation((ty, trait_ref));
let vtable_allocation = tcx.global_alloc(vtable_alloc_id).unwrap_memory();
let vtable_const = cx.const_data_from_alloc(vtable_allocation);
let align = cx.data_layout().pointer_align.abi;

View file

@ -30,7 +30,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
ensure_monomorphic_enough(*self.tcx, ty)?;
ensure_monomorphic_enough(*self.tcx, poly_trait_ref)?;
let vtable_allocation = self.tcx.vtable_allocation(ty, poly_trait_ref);
let vtable_allocation = self.tcx.vtable_allocation((ty, poly_trait_ref));
let vtable_ptr = self.memory.global_base_pointer(Pointer::from(vtable_allocation))?;

View file

@ -1012,6 +1012,13 @@ rustc_queries! {
key.1, key.0 }
}
query vtable_allocation(key: (Ty<'tcx>, Option<ty::PolyExistentialTraitRef<'tcx>>)) -> mir::interpret::AllocId {
desc { |tcx| "vtable const allocation for <{} as {}>",
key.0,
key.1.map(|trait_ref| format!("{}", trait_ref)).unwrap_or("_".to_owned())
}
}
query codegen_fulfill_obligation(
key: (ty::ParamEnv<'tcx>, ty::PolyTraitRef<'tcx>)
) -> Result<ImplSource<'tcx, ()>, ErrorReported> {

View file

@ -2050,6 +2050,7 @@ pub fn provide(providers: &mut ty::query::Providers) {
trait_impls_of: trait_def::trait_impls_of_provider,
type_uninhabited_from: inhabitedness::type_uninhabited_from,
const_param_default: consts::const_param_default,
vtable_allocation: vtable::vtable_allocation_provider,
..*providers
};
}

View file

@ -2192,6 +2192,7 @@ forward_display_to_print! {
// because `for<'tcx>` isn't possible yet.
ty::Binder<'tcx, ty::ExistentialPredicate<'tcx>>,
ty::Binder<'tcx, ty::TraitRef<'tcx>>,
ty::Binder<'tcx, ty::ExistentialTraitRef<'tcx>>,
ty::Binder<'tcx, TraitRefPrintOnlyTraitPath<'tcx>>,
ty::Binder<'tcx, ty::FnSig<'tcx>>,
ty::Binder<'tcx, ty::TraitPredicate<'tcx>>,

View file

@ -43,77 +43,72 @@ pub const COMMON_VTABLE_ENTRIES_DROPINPLACE: usize = 0;
pub const COMMON_VTABLE_ENTRIES_SIZE: usize = 1;
pub const COMMON_VTABLE_ENTRIES_ALIGN: usize = 2;
impl<'tcx> TyCtxt<'tcx> {
/// Retrieves an allocation that represents the contents of a vtable.
/// There's a cache within `TyCtxt` so it will be deduplicated.
pub fn vtable_allocation(
self,
ty: Ty<'tcx>,
poly_trait_ref: Option<ty::PolyExistentialTraitRef<'tcx>>,
) -> AllocId {
let tcx = self;
/// Retrieves an allocation that represents the contents of a vtable.
/// Since this is a query, allocations are cached and not duplicated.
pub(super) fn vtable_allocation_provider<'tcx>(
tcx: TyCtxt<'tcx>,
key: (Ty<'tcx>, Option<ty::PolyExistentialTraitRef<'tcx>>),
) -> AllocId {
let (ty, poly_trait_ref) = key;
let vtable_entries = if let Some(poly_trait_ref) = poly_trait_ref {
let trait_ref = poly_trait_ref.with_self_ty(tcx, ty);
let trait_ref = tcx.erase_regions(trait_ref);
let vtable_entries = if let Some(poly_trait_ref) = poly_trait_ref {
let trait_ref = poly_trait_ref.with_self_ty(tcx, ty);
let trait_ref = tcx.erase_regions(trait_ref);
tcx.vtable_entries(trait_ref)
} else {
COMMON_VTABLE_ENTRIES
tcx.vtable_entries(trait_ref)
} else {
COMMON_VTABLE_ENTRIES
};
let layout = tcx
.layout_of(ty::ParamEnv::reveal_all().and(ty))
.expect("failed to build vtable representation");
assert!(!layout.is_unsized(), "can't create a vtable for an unsized type");
let size = layout.size.bytes();
let align = layout.align.abi.bytes();
let ptr_size = tcx.data_layout.pointer_size;
let ptr_align = tcx.data_layout.pointer_align.abi;
let vtable_size = ptr_size * u64::try_from(vtable_entries.len()).unwrap();
let mut vtable = Allocation::uninit(vtable_size, ptr_align, /* panic_on_fail */ true).unwrap();
// No need to do any alignment checks on the memory accesses below, because we know the
// allocation is correctly aligned as we created it above. Also we're only offsetting by
// multiples of `ptr_align`, which means that it will stay aligned to `ptr_align`.
for (idx, entry) in vtable_entries.iter().enumerate() {
let idx: u64 = u64::try_from(idx).unwrap();
let scalar = match entry {
VtblEntry::MetadataDropInPlace => {
let instance = ty::Instance::resolve_drop_in_place(tcx, ty);
let fn_alloc_id = tcx.create_fn_alloc(instance);
let fn_ptr = Pointer::from(fn_alloc_id);
ScalarMaybeUninit::from_pointer(fn_ptr, &tcx)
}
VtblEntry::MetadataSize => Scalar::from_uint(size, ptr_size).into(),
VtblEntry::MetadataAlign => Scalar::from_uint(align, ptr_size).into(),
VtblEntry::Vacant => continue,
VtblEntry::Method(instance) => {
// Prepare the fn ptr we write into the vtable.
let instance = instance.polymorphize(tcx);
let fn_alloc_id = tcx.create_fn_alloc(instance);
let fn_ptr = Pointer::from(fn_alloc_id);
ScalarMaybeUninit::from_pointer(fn_ptr, &tcx)
}
VtblEntry::TraitVPtr(trait_ref) => {
let super_trait_ref = trait_ref
.map_bound(|trait_ref| ty::ExistentialTraitRef::erase_self_ty(tcx, trait_ref));
let supertrait_alloc_id = tcx.vtable_allocation((ty, Some(super_trait_ref)));
let vptr = Pointer::from(supertrait_alloc_id);
ScalarMaybeUninit::from_pointer(vptr, &tcx)
}
};
let layout = tcx
.layout_of(ty::ParamEnv::reveal_all().and(ty))
vtable
.write_scalar(&tcx, alloc_range(ptr_size * idx, ptr_size), scalar)
.expect("failed to build vtable representation");
assert!(!layout.is_unsized(), "can't create a vtable for an unsized type");
let size = layout.size.bytes();
let align = layout.align.abi.bytes();
let ptr_size = tcx.data_layout.pointer_size;
let ptr_align = tcx.data_layout.pointer_align.abi;
let vtable_size = ptr_size * u64::try_from(vtable_entries.len()).unwrap();
let mut vtable =
Allocation::uninit(vtable_size, ptr_align, /* panic_on_fail */ true).unwrap();
// No need to do any alignment checks on the memory accesses below, because we know the
// allocation is correctly aligned as we created it above. Also we're only offsetting by
// multiples of `ptr_align`, which means that it will stay aligned to `ptr_align`.
for (idx, entry) in vtable_entries.iter().enumerate() {
let idx: u64 = u64::try_from(idx).unwrap();
let scalar = match entry {
VtblEntry::MetadataDropInPlace => {
let instance = ty::Instance::resolve_drop_in_place(tcx, ty);
let fn_alloc_id = tcx.create_fn_alloc(instance);
let fn_ptr = Pointer::from(fn_alloc_id);
ScalarMaybeUninit::from_pointer(fn_ptr, &tcx)
}
VtblEntry::MetadataSize => Scalar::from_uint(size, ptr_size).into(),
VtblEntry::MetadataAlign => Scalar::from_uint(align, ptr_size).into(),
VtblEntry::Vacant => continue,
VtblEntry::Method(instance) => {
// Prepare the fn ptr we write into the vtable.
let instance = instance.polymorphize(tcx);
let fn_alloc_id = tcx.create_fn_alloc(instance);
let fn_ptr = Pointer::from(fn_alloc_id);
ScalarMaybeUninit::from_pointer(fn_ptr, &tcx)
}
VtblEntry::TraitVPtr(trait_ref) => {
let super_trait_ref = trait_ref.map_bound(|trait_ref| {
ty::ExistentialTraitRef::erase_self_ty(tcx, trait_ref)
});
let supertrait_alloc_id = self.vtable_allocation(ty, Some(super_trait_ref));
let vptr = Pointer::from(supertrait_alloc_id);
ScalarMaybeUninit::from_pointer(vptr, &tcx)
}
};
vtable
.write_scalar(&tcx, alloc_range(ptr_size * idx, ptr_size), scalar)
.expect("failed to build vtable representation");
}
vtable.mutability = Mutability::Not;
tcx.create_memory_alloc(tcx.intern_const_alloc(vtable))
}
vtable.mutability = Mutability::Not;
tcx.create_memory_alloc(tcx.intern_const_alloc(vtable))
}

View file

@ -72,6 +72,17 @@ impl<'tcx> Key for mir::interpret::GlobalId<'tcx> {
}
}
impl<'tcx> Key for (Ty<'tcx>, Option<ty::PolyExistentialTraitRef<'tcx>>) {
#[inline(always)]
fn query_crate_is_local(&self) -> bool {
true
}
fn default_span(&self, _: TyCtxt<'_>) -> Span {
DUMMY_SP
}
}
impl<'tcx> Key for mir::interpret::LitToConstInput<'tcx> {
#[inline(always)]
fn query_crate_is_local(&self) -> bool {

View file

@ -1,5 +1,10 @@
// revisions:rpass1 rpass2
// This test case makes sure re-order the methods in a vtable will
// trigger recompilation of codegen units that instantiate it.
//
// See https://github.com/rust-lang/rust/issues/89598
trait Foo {
#[cfg(rpass1)]
fn method1(&self) -> u32;
@ -16,12 +21,21 @@ impl Foo for u32 {
}
fn main() {
// Before #89598 was fixed, the vtable allocation would be cached during
// a MIR optimization pass and then the codegen pass for the main object
// file would not register a dependency on it (because of the missing
// dep-tracking).
//
// In the rpass2 session, the main object file would not be re-compiled,
// thus the mod1::foo(x) call would pass in an outdated vtable, while the
// mod1 object would expect the new, re-ordered vtable, resulting in a
// call to the wrong method.
let x: &dyn Foo = &0u32;
assert_eq!(mod1::foo(x), 17);
}
mod mod1 {
pub fn foo(x: &dyn super::Foo) -> u32 {
pub(super) fn foo(x: &dyn super::Foo) -> u32 {
x.method1()
}
}