interpret: refactor projection code to work on a common trait, and use that for visitors

This commit is contained in:
Ralf Jung 2023-07-24 11:44:58 +02:00
parent a593de4fab
commit a2bcafa500
44 changed files with 863 additions and 1210 deletions

View file

@ -1189,7 +1189,7 @@ impl FieldsShape {
} }
FieldsShape::Array { stride, count } => { FieldsShape::Array { stride, count } => {
let i = u64::try_from(i).unwrap(); let i = u64::try_from(i).unwrap();
assert!(i < count); assert!(i < count, "tried to access field {} of array with {} fields", i, count);
stride * i stride * i
} }
FieldsShape::Arbitrary { ref offsets, .. } => offsets[FieldIdx::from_usize(i)], FieldsShape::Arbitrary { ref offsets, .. } => offsets[FieldIdx::from_usize(i)],

View file

@ -408,8 +408,11 @@ const_eval_undefined_behavior =
const_eval_undefined_behavior_note = const_eval_undefined_behavior_note =
The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rustc repository if you believe it should not be considered undefined behavior. The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rustc repository if you believe it should not be considered undefined behavior.
const_eval_uninhabited_enum_tag = {$front_matter}: encountered an uninhabited enum variant
const_eval_uninhabited_enum_variant_read =
read discriminant of an uninhabited enum variant
const_eval_uninhabited_enum_variant_written = const_eval_uninhabited_enum_variant_written =
writing discriminant of an uninhabited enum writing discriminant of an uninhabited enum variant
const_eval_uninhabited_val = {$front_matter}: encountered a value of uninhabited type `{$ty}` const_eval_uninhabited_val = {$front_matter}: encountered a value of uninhabited type `{$ty}`
const_eval_uninit = {$front_matter}: encountered uninitialized bytes const_eval_uninit = {$front_matter}: encountered uninitialized bytes
const_eval_uninit_bool = {$front_matter}: encountered uninitialized memory, but expected a boolean const_eval_uninit_bool = {$front_matter}: encountered uninitialized memory, but expected a boolean

View file

@ -102,7 +102,7 @@ pub(crate) fn try_destructure_mir_constant_for_diagnostics<'tcx>(
} }
ty::Adt(def, _) => { ty::Adt(def, _) => {
let variant = ecx.read_discriminant(&op).ok()?.1; let variant = ecx.read_discriminant(&op).ok()?.1;
let down = ecx.operand_downcast(&op, variant).ok()?; let down = ecx.project_downcast(&op, variant).ok()?;
(def.variants()[variant].fields.len(), Some(variant), down) (def.variants()[variant].fields.len(), Some(variant), down)
} }
ty::Tuple(args) => (args.len(), None, op), ty::Tuple(args) => (args.len(), None, op),
@ -111,7 +111,7 @@ pub(crate) fn try_destructure_mir_constant_for_diagnostics<'tcx>(
let fields_iter = (0..field_count) let fields_iter = (0..field_count)
.map(|i| { .map(|i| {
let field_op = ecx.operand_field(&down, i).ok()?; let field_op = ecx.project_field(&down, i).ok()?;
let val = op_to_const(&ecx, &field_op); let val = op_to_const(&ecx, &field_op);
Some((val, field_op.layout.ty)) Some((val, field_op.layout.ty))
}) })

View file

@ -2,11 +2,11 @@ use super::eval_queries::{mk_eval_cx, op_to_const};
use super::machine::CompileTimeEvalContext; use super::machine::CompileTimeEvalContext;
use super::{ValTreeCreationError, ValTreeCreationResult, VALTREE_MAX_NODES}; use super::{ValTreeCreationError, ValTreeCreationResult, VALTREE_MAX_NODES};
use crate::const_eval::CanAccessStatics; use crate::const_eval::CanAccessStatics;
use crate::interpret::MPlaceTy;
use crate::interpret::{ use crate::interpret::{
intern_const_alloc_recursive, ConstValue, ImmTy, Immediate, InternKind, MemPlaceMeta, intern_const_alloc_recursive, ConstValue, ImmTy, Immediate, InternKind, MemPlaceMeta,
MemoryKind, PlaceTy, Scalar, MemoryKind, PlaceTy, Projectable, Scalar,
}; };
use crate::interpret::{MPlaceTy, Value};
use rustc_middle::ty::{self, ScalarInt, Ty, TyCtxt}; use rustc_middle::ty::{self, ScalarInt, Ty, TyCtxt};
use rustc_span::source_map::DUMMY_SP; use rustc_span::source_map::DUMMY_SP;
use rustc_target::abi::{Align, FieldIdx, VariantIdx, FIRST_VARIANT}; use rustc_target::abi::{Align, FieldIdx, VariantIdx, FIRST_VARIANT};
@ -20,7 +20,7 @@ fn branches<'tcx>(
num_nodes: &mut usize, num_nodes: &mut usize,
) -> ValTreeCreationResult<'tcx> { ) -> ValTreeCreationResult<'tcx> {
let place = match variant { let place = match variant {
Some(variant) => ecx.mplace_downcast(&place, variant).unwrap(), Some(variant) => ecx.project_downcast(place, variant).unwrap(),
None => *place, None => *place,
}; };
let variant = variant.map(|variant| Some(ty::ValTree::Leaf(ScalarInt::from(variant.as_u32())))); let variant = variant.map(|variant| Some(ty::ValTree::Leaf(ScalarInt::from(variant.as_u32()))));
@ -28,7 +28,7 @@ fn branches<'tcx>(
let mut fields = Vec::with_capacity(n); let mut fields = Vec::with_capacity(n);
for i in 0..n { for i in 0..n {
let field = ecx.mplace_field(&place, i).unwrap(); let field = ecx.project_field(&place, i).unwrap();
let valtree = const_to_valtree_inner(ecx, &field, num_nodes)?; let valtree = const_to_valtree_inner(ecx, &field, num_nodes)?;
fields.push(Some(valtree)); fields.push(Some(valtree));
} }
@ -55,13 +55,11 @@ fn slice_branches<'tcx>(
place: &MPlaceTy<'tcx>, place: &MPlaceTy<'tcx>,
num_nodes: &mut usize, num_nodes: &mut usize,
) -> ValTreeCreationResult<'tcx> { ) -> ValTreeCreationResult<'tcx> {
let n = place let n = place.len(ecx).unwrap_or_else(|_| panic!("expected to use len of place {:?}", place));
.len(&ecx.tcx.tcx)
.unwrap_or_else(|_| panic!("expected to use len of place {:?}", place));
let mut elems = Vec::with_capacity(n as usize); let mut elems = Vec::with_capacity(n as usize);
for i in 0..n { for i in 0..n {
let place_elem = ecx.mplace_index(place, i).unwrap(); let place_elem = ecx.project_index(place, i).unwrap();
let valtree = const_to_valtree_inner(ecx, &place_elem, num_nodes)?; let valtree = const_to_valtree_inner(ecx, &place_elem, num_nodes)?;
elems.push(valtree); elems.push(valtree);
} }
@ -386,7 +384,7 @@ fn valtree_into_mplace<'tcx>(
debug!(?variant); debug!(?variant);
( (
place.project_downcast(ecx, variant_idx).unwrap(), ecx.project_downcast(place, variant_idx).unwrap(),
&branches[1..], &branches[1..],
Some(variant_idx), Some(variant_idx),
) )
@ -401,7 +399,7 @@ fn valtree_into_mplace<'tcx>(
debug!(?i, ?inner_valtree); debug!(?i, ?inner_valtree);
let mut place_inner = match ty.kind() { let mut place_inner = match ty.kind() {
ty::Str | ty::Slice(_) => ecx.mplace_index(&place, i as u64).unwrap(), ty::Str | ty::Slice(_) => ecx.project_index(place, i as u64).unwrap(),
_ if !ty.is_sized(*ecx.tcx, ty::ParamEnv::empty()) _ if !ty.is_sized(*ecx.tcx, ty::ParamEnv::empty())
&& i == branches.len() - 1 => && i == branches.len() - 1 =>
{ {
@ -441,7 +439,7 @@ fn valtree_into_mplace<'tcx>(
) )
.unwrap() .unwrap()
} }
_ => ecx.mplace_field(&place_adjusted, i).unwrap(), _ => ecx.project_field(&place_adjusted, i).unwrap(),
}; };
debug!(?place_inner); debug!(?place_inner);

View file

@ -511,7 +511,8 @@ impl<'a> ReportErrorExt for UndefinedBehaviorInfo<'a> {
InvalidUninitBytes(Some(_)) => const_eval_invalid_uninit_bytes, InvalidUninitBytes(Some(_)) => const_eval_invalid_uninit_bytes,
DeadLocal => const_eval_dead_local, DeadLocal => const_eval_dead_local,
ScalarSizeMismatch(_) => const_eval_scalar_size_mismatch, ScalarSizeMismatch(_) => const_eval_scalar_size_mismatch,
UninhabitedEnumVariantWritten => const_eval_uninhabited_enum_variant_written, UninhabitedEnumVariantWritten(_) => const_eval_uninhabited_enum_variant_written,
UninhabitedEnumVariantRead(_) => const_eval_uninhabited_enum_variant_read,
Validation(e) => e.diagnostic_message(), Validation(e) => e.diagnostic_message(),
Custom(x) => (x.msg)(), Custom(x) => (x.msg)(),
} }
@ -535,7 +536,8 @@ impl<'a> ReportErrorExt for UndefinedBehaviorInfo<'a> {
| InvalidMeta(InvalidMetaKind::TooBig) | InvalidMeta(InvalidMetaKind::TooBig)
| InvalidUninitBytes(None) | InvalidUninitBytes(None)
| DeadLocal | DeadLocal
| UninhabitedEnumVariantWritten => {} | UninhabitedEnumVariantWritten(_)
| UninhabitedEnumVariantRead(_) => {}
BoundsCheckFailed { len, index } => { BoundsCheckFailed { len, index } => {
builder.set_arg("len", len); builder.set_arg("len", len);
builder.set_arg("index", index); builder.set_arg("index", index);
@ -623,6 +625,7 @@ impl<'tcx> ReportErrorExt for ValidationErrorInfo<'tcx> {
UnsafeCell => const_eval_unsafe_cell, UnsafeCell => const_eval_unsafe_cell,
UninhabitedVal { .. } => const_eval_uninhabited_val, UninhabitedVal { .. } => const_eval_uninhabited_val,
InvalidEnumTag { .. } => const_eval_invalid_enum_tag, InvalidEnumTag { .. } => const_eval_invalid_enum_tag,
UninhabitedEnumTag => const_eval_uninhabited_enum_tag,
UninitEnumTag => const_eval_uninit_enum_tag, UninitEnumTag => const_eval_uninit_enum_tag,
UninitStr => const_eval_uninit_str, UninitStr => const_eval_uninit_str,
Uninit { expected: ExpectedKind::Bool } => const_eval_uninit_bool, Uninit { expected: ExpectedKind::Bool } => const_eval_uninit_bool,
@ -760,7 +763,8 @@ impl<'tcx> ReportErrorExt for ValidationErrorInfo<'tcx> {
| InvalidMetaSliceTooLarge { .. } | InvalidMetaSliceTooLarge { .. }
| InvalidMetaTooLarge { .. } | InvalidMetaTooLarge { .. }
| DanglingPtrUseAfterFree { .. } | DanglingPtrUseAfterFree { .. }
| DanglingPtrOutOfBounds { .. } => {} | DanglingPtrOutOfBounds { .. }
| UninhabitedEnumTag => {}
} }
} }
} }
@ -835,7 +839,9 @@ impl<'tcx> ReportErrorExt for InvalidProgramInfo<'tcx> {
rustc_middle::error::middle_adjust_for_foreign_abi_error rustc_middle::error::middle_adjust_for_foreign_abi_error
} }
InvalidProgramInfo::SizeOfUnsizedType(_) => const_eval_size_of_unsized, InvalidProgramInfo::SizeOfUnsizedType(_) => const_eval_size_of_unsized,
InvalidProgramInfo::ConstPropNonsense => panic!("We had const-prop nonsense, this should never be printed"), InvalidProgramInfo::ConstPropNonsense => {
panic!("We had const-prop nonsense, this should never be printed")
}
} }
} }
fn add_args<G: EmissionGuarantee>( fn add_args<G: EmissionGuarantee>(

View file

@ -420,8 +420,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
if cast_ty_field.is_zst() { if cast_ty_field.is_zst() {
continue; continue;
} }
let src_field = self.operand_field(src, i)?; let src_field = self.project_field(src, i)?;
let dst_field = self.place_field(dest, i)?; let dst_field = self.project_field(dest, i)?;
if src_field.layout.ty == cast_ty_field.ty { if src_field.layout.ty == cast_ty_field.ty {
self.copy_op(&src_field, &dst_field, /*allow_transmute*/ false)?; self.copy_op(&src_field, &dst_field, /*allow_transmute*/ false)?;
} else { } else {

View file

@ -22,7 +22,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// When evaluating we will always error before even getting here, but ConstProp 'executes' // When evaluating we will always error before even getting here, but ConstProp 'executes'
// dead code, so we cannot ICE here. // dead code, so we cannot ICE here.
if dest.layout.for_variant(self, variant_index).abi.is_uninhabited() { if dest.layout.for_variant(self, variant_index).abi.is_uninhabited() {
throw_ub!(UninhabitedEnumVariantWritten) throw_ub!(UninhabitedEnumVariantWritten(variant_index))
} }
match dest.layout.variants { match dest.layout.variants {
@ -47,7 +47,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let size = tag_layout.size(self); let size = tag_layout.size(self);
let tag_val = size.truncate(discr_val); let tag_val = size.truncate(discr_val);
let tag_dest = self.place_field(dest, tag_field)?; let tag_dest = self.project_field(dest, tag_field)?;
self.write_scalar(Scalar::from_uint(tag_val, size), &tag_dest)?; self.write_scalar(Scalar::from_uint(tag_val, size), &tag_dest)?;
} }
abi::Variants::Multiple { abi::Variants::Multiple {
@ -78,7 +78,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
&niche_start_val, &niche_start_val,
)?; )?;
// Write result. // Write result.
let niche_dest = self.place_field(dest, tag_field)?; let niche_dest = self.project_field(dest, tag_field)?;
self.write_immediate(*tag_val, &niche_dest)?; self.write_immediate(*tag_val, &niche_dest)?;
} }
} }
@ -106,6 +106,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// straight-forward (`TagEncoding::Direct`) or with a niche (`TagEncoding::Niche`). // straight-forward (`TagEncoding::Direct`) or with a niche (`TagEncoding::Niche`).
let (tag_scalar_layout, tag_encoding, tag_field) = match op.layout.variants { let (tag_scalar_layout, tag_encoding, tag_field) = match op.layout.variants {
Variants::Single { index } => { Variants::Single { index } => {
// Hilariously, `Single` is used even for 0-variant enums.
// (See https://github.com/rust-lang/rust/issues/89765).
if matches!(op.layout.ty.kind(), ty::Adt(def, ..) if def.variants().is_empty()) {
throw_ub!(UninhabitedEnumVariantRead(index))
}
let discr = match op.layout.ty.discriminant_for_variant(*self.tcx, index) { let discr = match op.layout.ty.discriminant_for_variant(*self.tcx, index) {
Some(discr) => { Some(discr) => {
// This type actually has discriminants. // This type actually has discriminants.
@ -118,6 +123,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
Scalar::from_uint(index.as_u32(), discr_layout.size) Scalar::from_uint(index.as_u32(), discr_layout.size)
} }
}; };
// For consisteny with `write_discriminant`, and to make sure that
// `project_downcast` cannot fail due to strange layouts, we declare immediate UB
// for uninhabited variants.
if op.layout.ty.is_enum() && op.layout.for_variant(self, index).abi.is_uninhabited() {
throw_ub!(UninhabitedEnumVariantRead(index))
}
return Ok((discr, index)); return Ok((discr, index));
} }
Variants::Multiple { tag, ref tag_encoding, tag_field, .. } => { Variants::Multiple { tag, ref tag_encoding, tag_field, .. } => {
@ -138,13 +149,13 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let tag_layout = self.layout_of(tag_scalar_layout.primitive().to_int_ty(*self.tcx))?; let tag_layout = self.layout_of(tag_scalar_layout.primitive().to_int_ty(*self.tcx))?;
// Read tag and sanity-check `tag_layout`. // Read tag and sanity-check `tag_layout`.
let tag_val = self.read_immediate(&self.operand_field(op, tag_field)?)?; let tag_val = self.read_immediate(&self.project_field(op, tag_field)?)?;
assert_eq!(tag_layout.size, tag_val.layout.size); assert_eq!(tag_layout.size, tag_val.layout.size);
assert_eq!(tag_layout.abi.is_signed(), tag_val.layout.abi.is_signed()); assert_eq!(tag_layout.abi.is_signed(), tag_val.layout.abi.is_signed());
trace!("tag value: {}", tag_val); trace!("tag value: {}", tag_val);
// Figure out which discriminant and variant this corresponds to. // Figure out which discriminant and variant this corresponds to.
Ok(match *tag_encoding { let (discr, index) = match *tag_encoding {
TagEncoding::Direct => { TagEncoding::Direct => {
let scalar = tag_val.to_scalar(); let scalar = tag_val.to_scalar();
// Generate a specific error if `tag_val` is not an integer. // Generate a specific error if `tag_val` is not an integer.
@ -232,6 +243,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// encoded in the tag. // encoded in the tag.
(Scalar::from_uint(variant.as_u32(), discr_layout.size), variant) (Scalar::from_uint(variant.as_u32(), discr_layout.size), variant)
} }
}) };
// For consisteny with `write_discriminant`, and to make sure that `project_downcast` cannot fail due to strange layouts, we declare immediate UB for uninhabited variants.
if op.layout.for_variant(self, index).abi.is_uninhabited() {
throw_ub!(UninhabitedEnumVariantRead(index))
}
Ok((discr, index))
} }
} }

View file

@ -164,75 +164,6 @@ impl<'rt, 'mir, 'tcx: 'mir, M: CompileTimeMachine<'mir, 'tcx, const_eval::Memory
&self.ecx &self.ecx
} }
fn visit_aggregate(
&mut self,
mplace: &MPlaceTy<'tcx>,
fields: impl Iterator<Item = InterpResult<'tcx, Self::V>>,
) -> InterpResult<'tcx> {
// We want to walk the aggregate to look for references to intern. While doing that we
// also need to take special care of interior mutability.
//
// As an optimization, however, if the allocation does not contain any references: we don't
// need to do the walk. It can be costly for big arrays for example (e.g. issue #93215).
let is_walk_needed = |mplace: &MPlaceTy<'tcx>| -> InterpResult<'tcx, bool> {
// ZSTs cannot contain pointers, we can avoid the interning walk.
if mplace.layout.is_zst() {
return Ok(false);
}
// Now, check whether this allocation could contain references.
//
// Note, this check may sometimes not be cheap, so we only do it when the walk we'd like
// to avoid could be expensive: on the potentially larger types, arrays and slices,
// rather than on all aggregates unconditionally.
if matches!(mplace.layout.ty.kind(), ty::Array(..) | ty::Slice(..)) {
let Some((size, align)) = self.ecx.size_and_align_of_mplace(&mplace)? else {
// We do the walk if we can't determine the size of the mplace: we may be
// dealing with extern types here in the future.
return Ok(true);
};
// If there is no provenance in this allocation, it does not contain references
// that point to another allocation, and we can avoid the interning walk.
if let Some(alloc) = self.ecx.get_ptr_alloc(mplace.ptr, size, align)? {
if !alloc.has_provenance() {
return Ok(false);
}
} else {
// We're encountering a ZST here, and can avoid the walk as well.
return Ok(false);
}
}
// In the general case, we do the walk.
Ok(true)
};
// If this allocation contains no references to intern, we avoid the potentially costly
// walk.
//
// We can do this before the checks for interior mutability below, because only references
// are relevant in that situation, and we're checking if there are any here.
if !is_walk_needed(mplace)? {
return Ok(());
}
if let Some(def) = mplace.layout.ty.ty_adt_def() {
if def.is_unsafe_cell() {
// We are crossing over an `UnsafeCell`, we can mutate again. This means that
// References we encounter inside here are interned as pointing to mutable
// allocations.
// Remember the `old` value to handle nested `UnsafeCell`.
let old = std::mem::replace(&mut self.inside_unsafe_cell, true);
let walked = self.walk_aggregate(mplace, fields);
self.inside_unsafe_cell = old;
return walked;
}
}
self.walk_aggregate(mplace, fields)
}
fn visit_value(&mut self, mplace: &MPlaceTy<'tcx>) -> InterpResult<'tcx> { fn visit_value(&mut self, mplace: &MPlaceTy<'tcx>) -> InterpResult<'tcx> {
// Handle Reference types, as these are the only types with provenance supported by const eval. // Handle Reference types, as these are the only types with provenance supported by const eval.
// Raw pointers (and boxes) are handled by the `leftover_allocations` logic. // Raw pointers (and boxes) are handled by the `leftover_allocations` logic.
@ -315,7 +246,63 @@ impl<'rt, 'mir, 'tcx: 'mir, M: CompileTimeMachine<'mir, 'tcx, const_eval::Memory
} }
Ok(()) Ok(())
} else { } else {
// Not a reference -- proceed recursively. // Not a reference. Check if we want to recurse.
let is_walk_needed = |mplace: &MPlaceTy<'tcx>| -> InterpResult<'tcx, bool> {
// ZSTs cannot contain pointers, we can avoid the interning walk.
if mplace.layout.is_zst() {
return Ok(false);
}
// Now, check whether this allocation could contain references.
//
// Note, this check may sometimes not be cheap, so we only do it when the walk we'd like
// to avoid could be expensive: on the potentially larger types, arrays and slices,
// rather than on all aggregates unconditionally.
if matches!(mplace.layout.ty.kind(), ty::Array(..) | ty::Slice(..)) {
let Some((size, align)) = self.ecx.size_and_align_of_mplace(&mplace)? else {
// We do the walk if we can't determine the size of the mplace: we may be
// dealing with extern types here in the future.
return Ok(true);
};
// If there is no provenance in this allocation, it does not contain references
// that point to another allocation, and we can avoid the interning walk.
if let Some(alloc) = self.ecx.get_ptr_alloc(mplace.ptr, size, align)? {
if !alloc.has_provenance() {
return Ok(false);
}
} else {
// We're encountering a ZST here, and can avoid the walk as well.
return Ok(false);
}
}
// In the general case, we do the walk.
Ok(true)
};
// If this allocation contains no references to intern, we avoid the potentially costly
// walk.
//
// We can do this before the checks for interior mutability below, because only references
// are relevant in that situation, and we're checking if there are any here.
if !is_walk_needed(mplace)? {
return Ok(());
}
if let Some(def) = mplace.layout.ty.ty_adt_def() {
if def.is_unsafe_cell() {
// We are crossing over an `UnsafeCell`, we can mutate again. This means that
// References we encounter inside here are interned as pointing to mutable
// allocations.
// Remember the `old` value to handle nested `UnsafeCell`.
let old = std::mem::replace(&mut self.inside_unsafe_cell, true);
let walked = self.walk_value(mplace);
self.inside_unsafe_cell = old;
return walked;
}
}
self.walk_value(mplace) self.walk_value(mplace)
} }
} }

View file

@ -425,11 +425,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
); );
for i in 0..dest_len { for i in 0..dest_len {
let place = self.mplace_index(&dest, i)?; let place = self.project_index(&dest, i)?;
let value = if i == index { let value = if i == index {
elem.clone() elem.clone()
} else { } else {
self.mplace_index(&input, i)?.into() self.project_index(&input, i)?.into()
}; };
self.copy_op(&value, &place.into(), /*allow_transmute*/ false)?; self.copy_op(&value, &place.into(), /*allow_transmute*/ false)?;
} }
@ -444,7 +444,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
input_len input_len
); );
self.copy_op( self.copy_op(
&self.mplace_index(&input, index)?.into(), &self.project_index(&input, index)?.into(),
dest, dest,
/*allow_transmute*/ false, /*allow_transmute*/ false,
)?; )?;

View file

@ -101,11 +101,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let location = self.allocate(loc_layout, MemoryKind::CallerLocation).unwrap(); let location = self.allocate(loc_layout, MemoryKind::CallerLocation).unwrap();
// Initialize fields. // Initialize fields.
self.write_immediate(file.to_ref(self), &self.mplace_field(&location, 0).unwrap().into()) self.write_immediate(file.to_ref(self), &self.project_field(&location, 0).unwrap().into())
.expect("writing to memory we just allocated cannot fail"); .expect("writing to memory we just allocated cannot fail");
self.write_scalar(line, &self.mplace_field(&location, 1).unwrap().into()) self.write_scalar(line, &self.project_field(&location, 1).unwrap().into())
.expect("writing to memory we just allocated cannot fail"); .expect("writing to memory we just allocated cannot fail");
self.write_scalar(col, &self.mplace_field(&location, 2).unwrap().into()) self.write_scalar(col, &self.project_field(&location, 2).unwrap().into())
.expect("writing to memory we just allocated cannot fail"); .expect("writing to memory we just allocated cannot fail");
location location

View file

@ -26,9 +26,10 @@ pub use self::machine::{compile_time_machine, AllocMap, Machine, MayLeak, StackP
pub use self::memory::{AllocKind, AllocRef, AllocRefMut, FnVal, Memory, MemoryKind}; pub use self::memory::{AllocKind, AllocRef, AllocRefMut, FnVal, Memory, MemoryKind};
pub use self::operand::{ImmTy, Immediate, OpTy, Operand}; pub use self::operand::{ImmTy, Immediate, OpTy, Operand};
pub use self::place::{MPlaceTy, MemPlace, MemPlaceMeta, Place, PlaceTy}; pub use self::place::{MPlaceTy, MemPlace, MemPlaceMeta, Place, PlaceTy};
pub use self::projection::Projectable;
pub use self::terminator::FnArg; pub use self::terminator::FnArg;
pub use self::validity::{CtfeValidationMode, RefTracking}; pub use self::validity::{CtfeValidationMode, RefTracking};
pub use self::visitor::{MutValueVisitor, Value, ValueVisitor}; pub use self::visitor::ValueVisitor;
pub(crate) use self::intrinsics::eval_nullary_intrinsic; pub(crate) use self::intrinsics::eval_nullary_intrinsic;
use eval_context::{from_known_layout, mir_assign_valid_types}; use eval_context::{from_known_layout, mir_assign_valid_types};

View file

@ -1,6 +1,8 @@
//! Functions concerning immediate values and operands, and reading from operands. //! Functions concerning immediate values and operands, and reading from operands.
//! All high-level functions to read from memory work on operands as sources. //! All high-level functions to read from memory work on operands as sources.
use std::assert_matches::assert_matches;
use either::{Either, Left, Right}; use either::{Either, Left, Right};
use rustc_hir::def::Namespace; use rustc_hir::def::Namespace;
@ -14,7 +16,7 @@ use rustc_target::abi::{self, Abi, Align, HasDataLayout, Size};
use super::{ use super::{
alloc_range, from_known_layout, mir_assign_valid_types, AllocId, ConstValue, Frame, GlobalId, alloc_range, from_known_layout, mir_assign_valid_types, AllocId, ConstValue, Frame, GlobalId,
InterpCx, InterpResult, MPlaceTy, Machine, MemPlace, MemPlaceMeta, PlaceTy, Pointer, InterpCx, InterpResult, MPlaceTy, Machine, MemPlace, MemPlaceMeta, PlaceTy, Pointer,
Provenance, Scalar, Projectable, Provenance, Scalar,
}; };
/// An `Immediate` represents a single immediate self-contained Rust value. /// An `Immediate` represents a single immediate self-contained Rust value.
@ -199,6 +201,20 @@ impl<'tcx, Prov: Provenance> From<ImmTy<'tcx, Prov>> for OpTy<'tcx, Prov> {
} }
} }
impl<'tcx, Prov: Provenance> From<&'_ ImmTy<'tcx, Prov>> for OpTy<'tcx, Prov> {
#[inline(always)]
fn from(val: &ImmTy<'tcx, Prov>) -> Self {
OpTy { op: Operand::Immediate(val.imm), layout: val.layout, align: None }
}
}
impl<'tcx, Prov: Provenance> From<&'_ mut ImmTy<'tcx, Prov>> for OpTy<'tcx, Prov> {
#[inline(always)]
fn from(val: &mut ImmTy<'tcx, Prov>) -> Self {
OpTy { op: Operand::Immediate(val.imm), layout: val.layout, align: None }
}
}
impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> { impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
#[inline] #[inline]
pub fn from_scalar(val: Scalar<Prov>, layout: TyAndLayout<'tcx>) -> Self { pub fn from_scalar(val: Scalar<Prov>, layout: TyAndLayout<'tcx>) -> Self {
@ -243,12 +259,8 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
/// Compute the "sub-immediate" that is located within the `base` at the given offset with the /// Compute the "sub-immediate" that is located within the `base` at the given offset with the
/// given layout. /// given layout.
pub(super) fn offset( // Not called `offset` to avoid confusion with the trait method.
&self, fn offset_(&self, offset: Size, layout: TyAndLayout<'tcx>, cx: &impl HasDataLayout) -> Self {
offset: Size,
layout: TyAndLayout<'tcx>,
cx: &impl HasDataLayout,
) -> Self {
// This makes several assumptions about what layouts we will encounter; we match what // This makes several assumptions about what layouts we will encounter; we match what
// codegen does as good as we can (see `extract_field` in `rustc_codegen_ssa/src/mir/operand.rs`). // codegen does as good as we can (see `extract_field` in `rustc_codegen_ssa/src/mir/operand.rs`).
let inner_val: Immediate<_> = match (**self, self.layout.abi) { let inner_val: Immediate<_> = match (**self, self.layout.abi) {
@ -256,14 +268,28 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
(Immediate::Uninit, _) => Immediate::Uninit, (Immediate::Uninit, _) => Immediate::Uninit,
// the field contains no information, can be left uninit // the field contains no information, can be left uninit
_ if layout.is_zst() => Immediate::Uninit, _ if layout.is_zst() => Immediate::Uninit,
// some fieldless enum variants can have non-zero size but still `Aggregate` ABI... try
// to detect those here and also give them no data
_ if matches!(layout.abi, Abi::Aggregate { .. })
&& matches!(&layout.fields, abi::FieldsShape::Arbitrary { offsets, .. } if offsets.len() == 0) =>
{
Immediate::Uninit
}
// the field covers the entire type // the field covers the entire type
_ if layout.size == self.layout.size => { _ if layout.size == self.layout.size => {
assert!(match (self.layout.abi, layout.abi) { assert_eq!(offset.bytes(), 0);
assert!(
match (self.layout.abi, layout.abi) {
(Abi::Scalar(..), Abi::Scalar(..)) => true, (Abi::Scalar(..), Abi::Scalar(..)) => true,
(Abi::ScalarPair(..), Abi::ScalarPair(..)) => true, (Abi::ScalarPair(..), Abi::ScalarPair(..)) => true,
_ => false, _ => false,
}); },
assert!(offset.bytes() == 0); "cannot project into {} immediate with equally-sized field {}\nouter ABI: {:#?}\nfield ABI: {:#?}",
self.layout.ty,
layout.ty,
self.layout.abi,
layout.abi,
);
**self **self
} }
// extract fields from types with `ScalarPair` ABI // extract fields from types with `ScalarPair` ABI
@ -286,8 +312,42 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
} }
} }
impl<'mir, 'tcx: 'mir, Prov: Provenance> Projectable<'mir, 'tcx, Prov> for ImmTy<'tcx, Prov> {
#[inline(always)]
fn layout(&self) -> TyAndLayout<'tcx> {
self.layout
}
fn meta<M: Machine<'mir, 'tcx, Provenance = Prov>>(
&self,
_ecx: &InterpCx<'mir, 'tcx, M>,
) -> InterpResult<'tcx, MemPlaceMeta<M::Provenance>> {
assert!(self.layout.is_sized()); // unsized ImmTy can only exist temporarily and should never reach this here
Ok(MemPlaceMeta::None)
}
fn offset_with_meta(
&self,
offset: Size,
meta: MemPlaceMeta<Prov>,
layout: TyAndLayout<'tcx>,
cx: &impl HasDataLayout,
) -> InterpResult<'tcx, Self> {
assert_matches!(meta, MemPlaceMeta::None); // we can't store this anywhere anyway
Ok(self.offset_(offset, layout, cx))
}
fn to_op<M: Machine<'mir, 'tcx, Provenance = Prov>>(
&self,
_ecx: &InterpCx<'mir, 'tcx, M>,
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
Ok(self.into())
}
}
impl<'tcx, Prov: Provenance> OpTy<'tcx, Prov> { impl<'tcx, Prov: Provenance> OpTy<'tcx, Prov> {
pub(super) fn meta(&self) -> InterpResult<'tcx, MemPlaceMeta<Prov>> { // Provided as inherent method since it doesn't need the `ecx` of `Projectable::meta`.
pub fn meta(&self) -> InterpResult<'tcx, MemPlaceMeta<Prov>> {
Ok(if self.layout.is_unsized() { Ok(if self.layout.is_unsized() {
if matches!(self.op, Operand::Immediate(_)) { if matches!(self.op, Operand::Immediate(_)) {
// Unsized immediate OpTy cannot occur. We create a MemPlace for all unsized locals during argument passing. // Unsized immediate OpTy cannot occur. We create a MemPlace for all unsized locals during argument passing.
@ -300,15 +360,24 @@ impl<'tcx, Prov: Provenance> OpTy<'tcx, Prov> {
MemPlaceMeta::None MemPlaceMeta::None
}) })
} }
}
pub fn len(&self, cx: &impl HasDataLayout) -> InterpResult<'tcx, u64> { impl<'mir, 'tcx: 'mir, Prov: Provenance + 'static> Projectable<'mir, 'tcx, Prov>
self.meta()?.len(self.layout, cx) for OpTy<'tcx, Prov>
{
#[inline(always)]
fn layout(&self) -> TyAndLayout<'tcx> {
self.layout
} }
/// Offset the operand in memory (if possible) and change its metadata. fn meta<M: Machine<'mir, 'tcx, Provenance = Prov>>(
/// &self,
/// This can go wrong very easily if you give the wrong layout for the new place! _ecx: &InterpCx<'mir, 'tcx, M>,
pub(super) fn offset_with_meta( ) -> InterpResult<'tcx, MemPlaceMeta<M::Provenance>> {
self.meta()
}
fn offset_with_meta(
&self, &self,
offset: Size, offset: Size,
meta: MemPlaceMeta<Prov>, meta: MemPlaceMeta<Prov>,
@ -320,22 +389,16 @@ impl<'tcx, Prov: Provenance> OpTy<'tcx, Prov> {
Right(imm) => { Right(imm) => {
assert!(!meta.has_meta()); // no place to store metadata here assert!(!meta.has_meta()); // no place to store metadata here
// Every part of an uninit is uninit. // Every part of an uninit is uninit.
Ok(imm.offset(offset, layout, cx).into()) Ok(imm.offset(offset, layout, cx)?.into())
} }
} }
} }
/// Offset the operand in memory (if possible). fn to_op<M: Machine<'mir, 'tcx, Provenance = Prov>>(
///
/// This can go wrong very easily if you give the wrong layout for the new place!
pub fn offset(
&self, &self,
offset: Size, _ecx: &InterpCx<'mir, 'tcx, M>,
layout: TyAndLayout<'tcx>, ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
cx: &impl HasDataLayout, Ok(self.clone())
) -> InterpResult<'tcx, Self> {
assert!(layout.is_sized());
self.offset_with_meta(offset, MemPlaceMeta::None, layout, cx)
} }
} }
@ -525,7 +588,6 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
/// Every place can be read from, so we can turn them into an operand. /// Every place can be read from, so we can turn them into an operand.
/// This will definitely return `Indirect` if the place is a `Ptr`, i.e., this /// This will definitely return `Indirect` if the place is a `Ptr`, i.e., this
/// will never actually read from memory. /// will never actually read from memory.
#[inline(always)]
pub fn place_to_op( pub fn place_to_op(
&self, &self,
place: &PlaceTy<'tcx, M::Provenance>, place: &PlaceTy<'tcx, M::Provenance>,
@ -564,7 +626,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let mut op = self.local_to_op(self.frame(), mir_place.local, layout)?; let mut op = self.local_to_op(self.frame(), mir_place.local, layout)?;
// Using `try_fold` turned out to be bad for performance, hence the loop. // Using `try_fold` turned out to be bad for performance, hence the loop.
for elem in mir_place.projection.iter() { for elem in mir_place.projection.iter() {
op = self.operand_projection(&op, elem)? op = self.project(&op, elem)?
} }
trace!("eval_place_to_op: got {:?}", *op); trace!("eval_place_to_op: got {:?}", *op);

View file

@ -38,9 +38,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// With randomized layout, `(int, bool)` might cease to be a `ScalarPair`, so we have to // With randomized layout, `(int, bool)` might cease to be a `ScalarPair`, so we have to
// do a component-wise write here. This code path is slower than the above because // do a component-wise write here. This code path is slower than the above because
// `place_field` will have to `force_allocate` locals here. // `place_field` will have to `force_allocate` locals here.
let val_field = self.place_field(&dest, 0)?; let val_field = self.project_field(dest, 0)?;
self.write_scalar(val, &val_field)?; self.write_scalar(val, &val_field)?;
let overflowed_field = self.place_field(&dest, 1)?; let overflowed_field = self.project_field(dest, 1)?;
self.write_scalar(Scalar::from_bool(overflowed), &overflowed_field)?; self.write_scalar(Scalar::from_bool(overflowed), &overflowed_field)?;
} }
Ok(()) Ok(())

View file

@ -18,7 +18,7 @@ use rustc_target::abi::{self, Abi, Align, FieldIdx, HasDataLayout, Size, FIRST_V
use super::{ use super::{
alloc_range, mir_assign_valid_types, AllocId, AllocRef, AllocRefMut, CheckInAllocMsg, alloc_range, mir_assign_valid_types, AllocId, AllocRef, AllocRefMut, CheckInAllocMsg,
ConstAlloc, ImmTy, Immediate, InterpCx, InterpResult, Machine, MemoryKind, OpTy, Operand, ConstAlloc, ImmTy, Immediate, InterpCx, InterpResult, Machine, MemoryKind, OpTy, Operand,
Pointer, Provenance, Scalar, Pointer, Projectable, Provenance, Scalar,
}; };
#[derive(Copy, Clone, Hash, PartialEq, Eq, Debug)] #[derive(Copy, Clone, Hash, PartialEq, Eq, Debug)]
@ -183,7 +183,8 @@ impl<Prov: Provenance> MemPlace<Prov> {
} }
#[inline] #[inline]
fn offset_with_meta<'tcx>( // Not called `offset_with_meta` to avoid confusion with the trait method.
fn offset_with_meta_<'tcx>(
self, self,
offset: Size, offset: Size,
meta: MemPlaceMeta<Prov>, meta: MemPlaceMeta<Prov>,
@ -195,11 +196,6 @@ impl<Prov: Provenance> MemPlace<Prov> {
); );
Ok(MemPlace { ptr: self.ptr.offset(offset, cx)?, meta }) Ok(MemPlace { ptr: self.ptr.offset(offset, cx)?, meta })
} }
#[inline]
fn offset<'tcx>(&self, offset: Size, cx: &impl HasDataLayout) -> InterpResult<'tcx, Self> {
self.offset_with_meta(offset, MemPlaceMeta::None, cx)
}
} }
impl<'tcx, Prov: Provenance> MPlaceTy<'tcx, Prov> { impl<'tcx, Prov: Provenance> MPlaceTy<'tcx, Prov> {
@ -214,37 +210,6 @@ impl<'tcx, Prov: Provenance> MPlaceTy<'tcx, Prov> {
MPlaceTy { mplace: MemPlace { ptr, meta: MemPlaceMeta::None }, layout, align } MPlaceTy { mplace: MemPlace { ptr, meta: MemPlaceMeta::None }, layout, align }
} }
/// Offset the place in memory and change its metadata.
///
/// This can go wrong very easily if you give the wrong layout for the new place!
#[inline]
pub(crate) fn offset_with_meta(
&self,
offset: Size,
meta: MemPlaceMeta<Prov>,
layout: TyAndLayout<'tcx>,
cx: &impl HasDataLayout,
) -> InterpResult<'tcx, Self> {
Ok(MPlaceTy {
mplace: self.mplace.offset_with_meta(offset, meta, cx)?,
align: self.align.restrict_for_offset(offset),
layout,
})
}
/// Offset the place in memory.
///
/// This can go wrong very easily if you give the wrong layout for the new place!
pub fn offset(
&self,
offset: Size,
layout: TyAndLayout<'tcx>,
cx: &impl HasDataLayout,
) -> InterpResult<'tcx, Self> {
assert!(layout.is_sized());
self.offset_with_meta(offset, MemPlaceMeta::None, layout, cx)
}
#[inline] #[inline]
pub fn from_aligned_ptr(ptr: Pointer<Option<Prov>>, layout: TyAndLayout<'tcx>) -> Self { pub fn from_aligned_ptr(ptr: Pointer<Option<Prov>>, layout: TyAndLayout<'tcx>) -> Self {
MPlaceTy { mplace: MemPlace::from_ptr(ptr), layout, align: layout.align.abi } MPlaceTy { mplace: MemPlace::from_ptr(ptr), layout, align: layout.align.abi }
@ -262,10 +227,42 @@ impl<'tcx, Prov: Provenance> MPlaceTy<'tcx, Prov> {
align: layout.align.abi, align: layout.align.abi,
} }
} }
}
#[inline] impl<'mir, 'tcx: 'mir, Prov: Provenance + 'static> Projectable<'mir, 'tcx, Prov>
pub(crate) fn len(&self, cx: &impl HasDataLayout) -> InterpResult<'tcx, u64> { for MPlaceTy<'tcx, Prov>
self.mplace.meta.len(self.layout, cx) {
#[inline(always)]
fn layout(&self) -> TyAndLayout<'tcx> {
self.layout
}
fn meta<M: Machine<'mir, 'tcx, Provenance = Prov>>(
&self,
_ecx: &InterpCx<'mir, 'tcx, M>,
) -> InterpResult<'tcx, MemPlaceMeta<M::Provenance>> {
Ok(self.meta)
}
fn offset_with_meta(
&self,
offset: Size,
meta: MemPlaceMeta<Prov>,
layout: TyAndLayout<'tcx>,
cx: &impl HasDataLayout,
) -> InterpResult<'tcx, Self> {
Ok(MPlaceTy {
mplace: self.mplace.offset_with_meta_(offset, meta, cx)?,
align: self.align.restrict_for_offset(offset),
layout,
})
}
fn to_op<M: Machine<'mir, 'tcx, Provenance = Prov>>(
&self,
_ecx: &InterpCx<'mir, 'tcx, M>,
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
Ok(self.into())
} }
} }
@ -293,7 +290,7 @@ impl<'tcx, Prov: Provenance> OpTy<'tcx, Prov> {
} }
} }
impl<'tcx, Prov: Provenance> PlaceTy<'tcx, Prov> { impl<'tcx, Prov: Provenance + 'static> PlaceTy<'tcx, Prov> {
/// A place is either an mplace or some local. /// A place is either an mplace or some local.
#[inline] #[inline]
pub fn as_mplace_or_local( pub fn as_mplace_or_local(
@ -315,11 +312,24 @@ impl<'tcx, Prov: Provenance> PlaceTy<'tcx, Prov> {
) )
}) })
} }
}
/// Offset the place in memory and change its metadata. impl<'mir, 'tcx: 'mir, Prov: Provenance + 'static> Projectable<'mir, 'tcx, Prov>
/// for PlaceTy<'tcx, Prov>
/// This can go wrong very easily if you give the wrong layout for the new place! {
pub(crate) fn offset_with_meta( #[inline(always)]
fn layout(&self) -> TyAndLayout<'tcx> {
self.layout
}
fn meta<M: Machine<'mir, 'tcx, Provenance = Prov>>(
&self,
ecx: &InterpCx<'mir, 'tcx, M>,
) -> InterpResult<'tcx, MemPlaceMeta<M::Provenance>> {
ecx.place_meta(self)
}
fn offset_with_meta(
&self, &self,
offset: Size, offset: Size,
meta: MemPlaceMeta<Prov>, meta: MemPlaceMeta<Prov>,
@ -346,17 +356,11 @@ impl<'tcx, Prov: Provenance> PlaceTy<'tcx, Prov> {
}) })
} }
/// Offset the place in memory. fn to_op<M: Machine<'mir, 'tcx, Provenance = Prov>>(
///
/// This can go wrong very easily if you give the wrong layout for the new place!
pub fn offset(
&self, &self,
offset: Size, ecx: &InterpCx<'mir, 'tcx, M>,
layout: TyAndLayout<'tcx>, ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
cx: &impl HasDataLayout, ecx.place_to_op(self)
) -> InterpResult<'tcx, Self> {
assert!(layout.is_sized());
self.offset_with_meta(offset, MemPlaceMeta::None, layout, cx)
} }
} }
@ -506,7 +510,7 @@ where
let mut place = self.local_to_place(self.frame_idx(), mir_place.local)?; let mut place = self.local_to_place(self.frame_idx(), mir_place.local)?;
// Using `try_fold` turned out to be bad for performance, hence the loop. // Using `try_fold` turned out to be bad for performance, hence the loop.
for elem in mir_place.projection.iter() { for elem in mir_place.projection.iter() {
place = self.place_projection(&place, elem)? place = self.project(&place, elem)?
} }
trace!("{:?}", self.dump_place(place.place)); trace!("{:?}", self.dump_place(place.place));
@ -849,7 +853,7 @@ where
&mut Operand::Indirect(mplace) => mplace, // this already was an indirect local &mut Operand::Indirect(mplace) => mplace, // this already was an indirect local
}; };
if let Some(offset) = offset { if let Some(offset) = offset {
whole_local.offset(offset, self)? whole_local.offset_with_meta_(offset, MemPlaceMeta::None, self)?
} else { } else {
// Preserve wide place metadata, do not call `offset`. // Preserve wide place metadata, do not call `offset`.
whole_local whole_local
@ -902,7 +906,7 @@ where
self.write_uninit(&dest)?; self.write_uninit(&dest)?;
let (variant_index, variant_dest, active_field_index) = match *kind { let (variant_index, variant_dest, active_field_index) = match *kind {
mir::AggregateKind::Adt(_, variant_index, _, _, active_field_index) => { mir::AggregateKind::Adt(_, variant_index, _, _, active_field_index) => {
let variant_dest = self.place_downcast(&dest, variant_index)?; let variant_dest = self.project_downcast(dest, variant_index)?;
(variant_index, variant_dest, active_field_index) (variant_index, variant_dest, active_field_index)
} }
_ => (FIRST_VARIANT, dest.clone(), None), _ => (FIRST_VARIANT, dest.clone(), None),
@ -912,7 +916,7 @@ where
} }
for (field_index, operand) in operands.iter_enumerated() { for (field_index, operand) in operands.iter_enumerated() {
let field_index = active_field_index.unwrap_or(field_index); let field_index = active_field_index.unwrap_or(field_index);
let field_dest = self.place_field(&variant_dest, field_index.as_usize())?; let field_dest = self.project_field(&variant_dest, field_index.as_usize())?;
let op = self.eval_operand(operand, Some(field_dest.layout))?; let op = self.eval_operand(operand, Some(field_dest.layout))?;
self.copy_op(&op, &field_dest, /*allow_transmute*/ false)?; self.copy_op(&op, &field_dest, /*allow_transmute*/ false)?;
} }
@ -952,22 +956,24 @@ where
Ok((mplace, vtable)) Ok((mplace, vtable))
} }
/// Turn an operand with a `dyn* Trait` type into an operand with the actual dynamic type. /// Turn a `dyn* Trait` type into an value with the actual dynamic type.
/// Aso returns the vtable. /// Also returns the vtable.
pub(super) fn unpack_dyn_star( pub(super) fn unpack_dyn_star<P: Projectable<'mir, 'tcx, M::Provenance>>(
&self, &self,
op: &OpTy<'tcx, M::Provenance>, val: &P,
) -> InterpResult<'tcx, (OpTy<'tcx, M::Provenance>, Pointer<Option<M::Provenance>>)> { ) -> InterpResult<'tcx, (P, Pointer<Option<M::Provenance>>)> {
assert!( assert!(
matches!(op.layout.ty.kind(), ty::Dynamic(_, _, ty::DynStar)), matches!(val.layout().ty.kind(), ty::Dynamic(_, _, ty::DynStar)),
"`unpack_dyn_star` only makes sense on `dyn*` types" "`unpack_dyn_star` only makes sense on `dyn*` types"
); );
let data = self.operand_field(&op, 0)?; let data = self.project_field(val, 0)?;
let vtable = self.operand_field(&op, 1)?; let vtable = self.project_field(val, 1)?;
let vtable = self.read_pointer(&vtable)?; let vtable = self.read_pointer(&vtable.to_op(self)?)?;
let (ty, _) = self.get_ptr_vtable(vtable)?; let (ty, _) = self.get_ptr_vtable(vtable)?;
let layout = self.layout_of(ty)?; let layout = self.layout_of(ty)?;
let data = data.offset(Size::ZERO, layout, self)?; // `data` is already the right thing but has the wrong type. So we transmute it, by
// projecting with offset 0.
let data = data.transmute(layout, self)?;
Ok((data, vtable)) Ok((data, vtable))
} }
} }

View file

@ -11,12 +11,67 @@ use rustc_middle::mir;
use rustc_middle::ty; use rustc_middle::ty;
use rustc_middle::ty::layout::{LayoutOf, TyAndLayout}; use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
use rustc_middle::ty::Ty; use rustc_middle::ty::Ty;
use rustc_middle::ty::TyCtxt;
use rustc_target::abi::HasDataLayout;
use rustc_target::abi::Size; use rustc_target::abi::Size;
use rustc_target::abi::{self, VariantIdx}; use rustc_target::abi::{self, VariantIdx};
use super::{ use super::MPlaceTy;
InterpCx, InterpResult, MPlaceTy, Machine, MemPlaceMeta, OpTy, PlaceTy, Provenance, Scalar, use super::{InterpCx, InterpResult, Machine, MemPlaceMeta, OpTy, Provenance, Scalar};
};
/// A thing that we can project into, and that has a layout.
pub trait Projectable<'mir, 'tcx: 'mir, Prov: Provenance>: Sized {
/// Get the layout.
fn layout(&self) -> TyAndLayout<'tcx>;
/// Get the metadata of a wide value.
fn meta<M: Machine<'mir, 'tcx, Provenance = Prov>>(
&self,
ecx: &InterpCx<'mir, 'tcx, M>,
) -> InterpResult<'tcx, MemPlaceMeta<M::Provenance>>;
fn len<M: Machine<'mir, 'tcx, Provenance = Prov>>(
&self,
ecx: &InterpCx<'mir, 'tcx, M>,
) -> InterpResult<'tcx, u64> {
self.meta(ecx)?.len(self.layout(), ecx)
}
/// Offset the value by the given amount, replacing the layout and metadata.
fn offset_with_meta(
&self,
offset: Size,
meta: MemPlaceMeta<Prov>,
layout: TyAndLayout<'tcx>,
cx: &impl HasDataLayout,
) -> InterpResult<'tcx, Self>;
fn offset(
&self,
offset: Size,
layout: TyAndLayout<'tcx>,
cx: &impl HasDataLayout,
) -> InterpResult<'tcx, Self> {
assert!(layout.is_sized());
self.offset_with_meta(offset, MemPlaceMeta::None, layout, cx)
}
fn transmute(
&self,
layout: TyAndLayout<'tcx>,
cx: &impl HasDataLayout,
) -> InterpResult<'tcx, Self> {
assert_eq!(self.layout().size, layout.size);
self.offset_with_meta(Size::ZERO, MemPlaceMeta::None, layout, cx)
}
/// Convert this to an `OpTy`. This might be an irreversible transformation, but is useful for
/// reading from this thing.
fn to_op<M: Machine<'mir, 'tcx, Provenance = Prov>>(
&self,
ecx: &InterpCx<'mir, 'tcx, M>,
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>>;
}
// FIXME: Working around https://github.com/rust-lang/rust/issues/54385 // FIXME: Working around https://github.com/rust-lang/rust/issues/54385
impl<'mir, 'tcx: 'mir, Prov, M> InterpCx<'mir, 'tcx, M> impl<'mir, 'tcx: 'mir, Prov, M> InterpCx<'mir, 'tcx, M>
@ -24,24 +79,33 @@ where
Prov: Provenance + 'static, Prov: Provenance + 'static,
M: Machine<'mir, 'tcx, Provenance = Prov>, M: Machine<'mir, 'tcx, Provenance = Prov>,
{ {
//# Field access /// Offset a pointer to project to a field of a struct/union. Unlike `place_field`, this is
/// always possible without allocating, so it can take `&self`. Also return the field's layout.
fn project_field( /// This supports both struct and array fields, but not slices!
///
/// This also works for arrays, but then the `usize` index type is restricting.
/// For indexing into arrays, use `mplace_index`.
pub fn project_field<P: Projectable<'mir, 'tcx, M::Provenance>>(
&self, &self,
base_layout: TyAndLayout<'tcx>, base: &P,
base_meta: MemPlaceMeta<M::Provenance>,
field: usize, field: usize,
) -> InterpResult<'tcx, (Size, MemPlaceMeta<M::Provenance>, TyAndLayout<'tcx>)> { ) -> InterpResult<'tcx, P> {
let offset = base_layout.fields.offset(field); // Slices nominally have length 0, so they will panic somewhere in `fields.offset`.
let field_layout = base_layout.field(self, field); debug_assert!(
!matches!(base.layout().ty.kind(), ty::Slice(..)),
"`field` projection called on a slice -- call `index` projection instead"
);
let offset = base.layout().fields.offset(field);
let field_layout = base.layout().field(self, field);
// Offset may need adjustment for unsized fields. // Offset may need adjustment for unsized fields.
let (meta, offset) = if field_layout.is_unsized() { let (meta, offset) = if field_layout.is_unsized() {
if base_layout.is_sized() { if base.layout().is_sized() {
// An unsized field of a sized type? Sure... // An unsized field of a sized type? Sure...
// But const-prop actually feeds us such nonsense MIR! // But const-prop actually feeds us such nonsense MIR!
throw_inval!(ConstPropNonsense); throw_inval!(ConstPropNonsense);
} }
let base_meta = base.meta(self)?;
// Re-use parent metadata to determine dynamic field layout. // Re-use parent metadata to determine dynamic field layout.
// With custom DSTS, this *will* execute user-defined code, but the same // With custom DSTS, this *will* execute user-defined code, but the same
// happens at run-time so that's okay. // happens at run-time so that's okay.
@ -60,189 +124,68 @@ where
(MemPlaceMeta::None, offset) (MemPlaceMeta::None, offset)
}; };
Ok((offset, meta, field_layout))
}
/// Offset a pointer to project to a field of a struct/union. Unlike `place_field`, this is
/// always possible without allocating, so it can take `&self`. Also return the field's layout.
/// This supports both struct and array fields.
///
/// This also works for arrays, but then the `usize` index type is restricting.
/// For indexing into arrays, use `mplace_index`.
pub fn mplace_field(
&self,
base: &MPlaceTy<'tcx, M::Provenance>,
field: usize,
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
let (offset, meta, field_layout) = self.project_field(base.layout, base.meta, field)?;
// We do not look at `base.layout.align` nor `field_layout.align`, unlike
// codegen -- mostly to see if we can get away with that
base.offset_with_meta(offset, meta, field_layout, self) base.offset_with_meta(offset, meta, field_layout, self)
} }
/// Gets the place of a field inside the place, and also the field's type. /// Downcasting to an enum variant.
pub fn place_field( pub fn project_downcast<P: Projectable<'mir, 'tcx, M::Provenance>>(
&self, &self,
base: &PlaceTy<'tcx, M::Provenance>, base: &P,
field: usize,
) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>> {
let (offset, meta, field_layout) =
self.project_field(base.layout, self.place_meta(base)?, field)?;
base.offset_with_meta(offset, meta, field_layout, self)
}
pub fn operand_field(
&self,
base: &OpTy<'tcx, M::Provenance>,
field: usize,
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
let (offset, meta, field_layout) = self.project_field(base.layout, base.meta()?, field)?;
base.offset_with_meta(offset, meta, field_layout, self)
}
//# Downcasting
pub fn mplace_downcast(
&self,
base: &MPlaceTy<'tcx, M::Provenance>,
variant: VariantIdx, variant: VariantIdx,
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> { ) -> InterpResult<'tcx, P> {
assert!(!base.meta(self)?.has_meta());
// Downcasts only change the layout. // Downcasts only change the layout.
// (In particular, no check about whether this is even the active variant -- that's by design, // (In particular, no check about whether this is even the active variant -- that's by design,
// see https://github.com/rust-lang/rust/issues/93688#issuecomment-1032929496.) // see https://github.com/rust-lang/rust/issues/93688#issuecomment-1032929496.)
assert!(!base.meta.has_meta()); // So we just "offset" by 0.
let mut base = *base; let layout = base.layout().for_variant(self, variant);
base.layout = base.layout.for_variant(self, variant); if layout.abi.is_uninhabited() {
Ok(base) // `read_discriminant` should have excluded uninhabited variants... but ConstProp calls
// us on dead code.
throw_inval!(ConstPropNonsense)
} }
// This cannot be `transmute` as variants *can* have a smaller size than the entire enum.
pub fn place_downcast( base.offset(Size::ZERO, layout, self)
&self,
base: &PlaceTy<'tcx, M::Provenance>,
variant: VariantIdx,
) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>> {
// Downcast just changes the layout
let mut base = base.clone();
base.layout = base.layout.for_variant(self, variant);
Ok(base)
} }
pub fn operand_downcast(
&self,
base: &OpTy<'tcx, M::Provenance>,
variant: VariantIdx,
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
// Downcast just changes the layout
let mut base = base.clone();
base.layout = base.layout.for_variant(self, variant);
Ok(base)
}
//# Slice and array indexing
/// Compute the offset and field layout for accessing the given index. /// Compute the offset and field layout for accessing the given index.
fn project_index( pub fn project_index<P: Projectable<'mir, 'tcx, M::Provenance>>(
&self, &self,
base_layout: TyAndLayout<'tcx>, base: &P,
base_meta: MemPlaceMeta<M::Provenance>,
index: u64, index: u64,
) -> InterpResult<'tcx, (Size, TyAndLayout<'tcx>)> { ) -> InterpResult<'tcx, P> {
// Not using the layout method because we want to compute on u64 // Not using the layout method because we want to compute on u64
match base_layout.fields { let (offset, field_layout) = match base.layout().fields {
abi::FieldsShape::Array { stride, count: _ } => { abi::FieldsShape::Array { stride, count: _ } => {
// `count` is nonsense for slices, use the dynamic length instead. // `count` is nonsense for slices, use the dynamic length instead.
let len = base_meta.len(base_layout, self)?; let len = base.len(self)?;
if index >= len { if index >= len {
// This can only be reached in ConstProp and non-rustc-MIR. // This can only be reached in ConstProp and non-rustc-MIR.
throw_ub!(BoundsCheckFailed { len, index }); throw_ub!(BoundsCheckFailed { len, index });
} }
let offset = stride * index; // `Size` multiplication let offset = stride * index; // `Size` multiplication
// All fields have the same layout. // All fields have the same layout.
let field_layout = base_layout.field(self, 0); let field_layout = base.layout().field(self, 0);
Ok((offset, field_layout)) (offset, field_layout)
} }
_ => span_bug!( _ => span_bug!(
self.cur_span(), self.cur_span(),
"`mplace_index` called on non-array type {:?}", "`mplace_index` called on non-array type {:?}",
base_layout.ty base.layout().ty
), ),
}
}
#[inline(always)]
pub fn operand_index(
&self,
base: &OpTy<'tcx, M::Provenance>,
index: u64,
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
let (offset, field_layout) = self.project_index(base.layout, base.meta()?, index)?;
base.offset(offset, field_layout, self)
}
/// Index into an array.
pub fn mplace_index(
&self,
base: &MPlaceTy<'tcx, M::Provenance>,
index: u64,
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
let (offset, field_layout) = self.project_index(base.layout, base.meta, index)?;
base.offset(offset, field_layout, self)
}
pub fn place_index(
&self,
base: &PlaceTy<'tcx, M::Provenance>,
index: u64,
) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>> {
let (offset, field_layout) =
self.project_index(base.layout, self.place_meta(base)?, index)?;
base.offset(offset, field_layout, self)
}
/// Iterates over all fields of an array. Much more efficient than doing the
/// same by repeatedly calling `operand_index`.
pub fn operand_array_fields<'a>(
&self,
base: &'a OpTy<'tcx, Prov>,
) -> InterpResult<'tcx, impl Iterator<Item = InterpResult<'tcx, OpTy<'tcx, Prov>>> + 'a> {
let abi::FieldsShape::Array { stride, .. } = base.layout.fields else {
span_bug!(self.cur_span(), "operand_array_fields: expected an array layout");
}; };
let len = base.len(self)?;
let field_layout = base.layout.field(self, 0); base.offset(offset, field_layout, self)
let dl = &self.tcx.data_layout;
// `Size` multiplication
Ok((0..len).map(move |i| base.offset(stride * i, field_layout, dl)))
} }
/// Iterates over all fields of an array. Much more efficient than doing the fn project_constant_index<P: Projectable<'mir, 'tcx, M::Provenance>>(
/// same by repeatedly calling `place_index`.
pub fn place_array_fields<'a>(
&self, &self,
base: &'a PlaceTy<'tcx, Prov>, base: &P,
) -> InterpResult<'tcx, impl Iterator<Item = InterpResult<'tcx, PlaceTy<'tcx, Prov>>> + 'a> {
let abi::FieldsShape::Array { stride, .. } = base.layout.fields else {
span_bug!(self.cur_span(), "place_array_fields: expected an array layout");
};
let len = self.place_meta(base)?.len(base.layout, self)?;
let field_layout = base.layout.field(self, 0);
let dl = &self.tcx.data_layout;
// `Size` multiplication
Ok((0..len).map(move |i| base.offset(stride * i, field_layout, dl)))
}
//# ConstantIndex support
fn project_constant_index(
&self,
base_layout: TyAndLayout<'tcx>,
base_meta: MemPlaceMeta<M::Provenance>,
offset: u64, offset: u64,
min_length: u64, min_length: u64,
from_end: bool, from_end: bool,
) -> InterpResult<'tcx, (Size, TyAndLayout<'tcx>)> { ) -> InterpResult<'tcx, P> {
let n = base_meta.len(base_layout, self)?; let n = base.len(self)?;
if n < min_length { if n < min_length {
// This can only be reached in ConstProp and non-rustc-MIR. // This can only be reached in ConstProp and non-rustc-MIR.
throw_ub!(BoundsCheckFailed { len: min_length, index: n }); throw_ub!(BoundsCheckFailed { len: min_length, index: n });
@ -256,49 +199,39 @@ where
offset offset
}; };
self.project_index(base_layout, base_meta, index) self.project_index(base, index)
} }
fn operand_constant_index( /// Iterates over all fields of an array. Much more efficient than doing the
/// same by repeatedly calling `operand_index`.
pub fn project_array_fields<'a, P: Projectable<'mir, 'tcx, M::Provenance>>(
&self, &self,
base: &OpTy<'tcx, M::Provenance>, base: &'a P,
offset: u64, ) -> InterpResult<'tcx, impl Iterator<Item = InterpResult<'tcx, P>> + 'a>
min_length: u64, where
from_end: bool, 'tcx: 'a,
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> { {
let (offset, layout) = let abi::FieldsShape::Array { stride, .. } = base.layout().fields else {
self.project_constant_index(base.layout, base.meta()?, offset, min_length, from_end)?; span_bug!(self.cur_span(), "operand_array_fields: expected an array layout");
base.offset(offset, layout, self) };
let len = base.len(self)?;
let field_layout = base.layout().field(self, 0);
let tcx: TyCtxt<'tcx> = *self.tcx;
// `Size` multiplication
Ok((0..len).map(move |i| {
base.offset_with_meta(stride * i, MemPlaceMeta::None, field_layout, &tcx)
}))
} }
fn place_constant_index( /// Subslicing
fn project_subslice<P: Projectable<'mir, 'tcx, M::Provenance>>(
&self, &self,
base: &PlaceTy<'tcx, M::Provenance>, base: &P,
offset: u64,
min_length: u64,
from_end: bool,
) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>> {
let (offset, layout) = self.project_constant_index(
base.layout,
self.place_meta(base)?,
offset,
min_length,
from_end,
)?;
base.offset(offset, layout, self)
}
//# Subslicing
fn project_subslice(
&self,
base_layout: TyAndLayout<'tcx>,
base_meta: MemPlaceMeta<M::Provenance>,
from: u64, from: u64,
to: u64, to: u64,
from_end: bool, from_end: bool,
) -> InterpResult<'tcx, (Size, MemPlaceMeta<M::Provenance>, TyAndLayout<'tcx>)> { ) -> InterpResult<'tcx, P> {
let len = base_meta.len(base_layout, self)?; // also asserts that we have a type where this makes sense let len = base.len(self)?; // also asserts that we have a type where this makes sense
let actual_to = if from_end { let actual_to = if from_end {
if from.checked_add(to).map_or(true, |to| to > len) { if from.checked_add(to).map_or(true, |to| to > len) {
// This can only be reached in ConstProp and non-rustc-MIR. // This can only be reached in ConstProp and non-rustc-MIR.
@ -311,16 +244,20 @@ where
// Not using layout method because that works with usize, and does not work with slices // Not using layout method because that works with usize, and does not work with slices
// (that have count 0 in their layout). // (that have count 0 in their layout).
let from_offset = match base_layout.fields { let from_offset = match base.layout().fields {
abi::FieldsShape::Array { stride, .. } => stride * from, // `Size` multiplication is checked abi::FieldsShape::Array { stride, .. } => stride * from, // `Size` multiplication is checked
_ => { _ => {
span_bug!(self.cur_span(), "unexpected layout of index access: {:#?}", base_layout) span_bug!(
self.cur_span(),
"unexpected layout of index access: {:#?}",
base.layout()
)
} }
}; };
// Compute meta and new layout // Compute meta and new layout
let inner_len = actual_to.checked_sub(from).unwrap(); let inner_len = actual_to.checked_sub(from).unwrap();
let (meta, ty) = match base_layout.ty.kind() { let (meta, ty) = match base.layout().ty.kind() {
// It is not nice to match on the type, but that seems to be the only way to // It is not nice to match on the type, but that seems to be the only way to
// implement this. // implement this.
ty::Array(inner, _) => { ty::Array(inner, _) => {
@ -328,98 +265,45 @@ where
} }
ty::Slice(..) => { ty::Slice(..) => {
let len = Scalar::from_target_usize(inner_len, self); let len = Scalar::from_target_usize(inner_len, self);
(MemPlaceMeta::Meta(len), base_layout.ty) (MemPlaceMeta::Meta(len), base.layout().ty)
} }
_ => { _ => {
span_bug!(self.cur_span(), "cannot subslice non-array type: `{:?}`", base_layout.ty) span_bug!(
self.cur_span(),
"cannot subslice non-array type: `{:?}`",
base.layout().ty
)
} }
}; };
let layout = self.layout_of(ty)?; let layout = self.layout_of(ty)?;
Ok((from_offset, meta, layout))
}
fn operand_subslice(
&self,
base: &OpTy<'tcx, M::Provenance>,
from: u64,
to: u64,
from_end: bool,
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
let (from_offset, meta, layout) =
self.project_subslice(base.layout, base.meta()?, from, to, from_end)?;
base.offset_with_meta(from_offset, meta, layout, self) base.offset_with_meta(from_offset, meta, layout, self)
} }
pub fn place_subslice( /// Applying a general projection
&self,
base: &PlaceTy<'tcx, M::Provenance>,
from: u64,
to: u64,
from_end: bool,
) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>> {
let (from_offset, meta, layout) =
self.project_subslice(base.layout, self.place_meta(base)?, from, to, from_end)?;
base.offset_with_meta(from_offset, meta, layout, self)
}
//# Applying a general projection
/// Projects into a place.
#[instrument(skip(self), level = "trace")] #[instrument(skip(self), level = "trace")]
pub fn place_projection( pub fn project<P>(&self, base: &P, proj_elem: mir::PlaceElem<'tcx>) -> InterpResult<'tcx, P>
&self, where
base: &PlaceTy<'tcx, M::Provenance>, P: Projectable<'mir, 'tcx, M::Provenance>
proj_elem: mir::PlaceElem<'tcx>, + From<MPlaceTy<'tcx, M::Provenance>>
) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>> { + std::fmt::Debug,
{
use rustc_middle::mir::ProjectionElem::*; use rustc_middle::mir::ProjectionElem::*;
Ok(match proj_elem { Ok(match proj_elem {
OpaqueCast(ty) => { OpaqueCast(ty) => base.transmute(self.layout_of(ty)?, self)?,
let mut place = base.clone(); Field(field, _) => self.project_field(base, field.index())?,
place.layout = self.layout_of(ty)?; Downcast(_, variant) => self.project_downcast(base, variant)?,
place Deref => self.deref_operand(&base.to_op(self)?)?.into(),
}
Field(field, _) => self.place_field(base, field.index())?,
Downcast(_, variant) => self.place_downcast(base, variant)?,
Deref => self.deref_operand(&self.place_to_op(base)?)?.into(),
Index(local) => { Index(local) => {
let layout = self.layout_of(self.tcx.types.usize)?; let layout = self.layout_of(self.tcx.types.usize)?;
let n = self.local_to_op(self.frame(), local, Some(layout))?; let n = self.local_to_op(self.frame(), local, Some(layout))?;
let n = self.read_target_usize(&n)?; let n = self.read_target_usize(&n)?;
self.place_index(base, n)? self.project_index(base, n)?
} }
ConstantIndex { offset, min_length, from_end } => { ConstantIndex { offset, min_length, from_end } => {
self.place_constant_index(base, offset, min_length, from_end)? self.project_constant_index(base, offset, min_length, from_end)?
} }
Subslice { from, to, from_end } => self.place_subslice(base, from, to, from_end)?, Subslice { from, to, from_end } => self.project_subslice(base, from, to, from_end)?,
})
}
#[instrument(skip(self), level = "trace")]
pub fn operand_projection(
&self,
base: &OpTy<'tcx, M::Provenance>,
proj_elem: mir::PlaceElem<'tcx>,
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
use rustc_middle::mir::ProjectionElem::*;
Ok(match proj_elem {
OpaqueCast(ty) => {
let mut op = base.clone();
op.layout = self.layout_of(ty)?;
op
}
Field(field, _) => self.operand_field(base, field.index())?,
Downcast(_, variant) => self.operand_downcast(base, variant)?,
Deref => self.deref_operand(base)?.into(),
Index(local) => {
let layout = self.layout_of(self.tcx.types.usize)?;
let n = self.local_to_op(self.frame(), local, Some(layout))?;
let n = self.read_target_usize(&n)?;
self.operand_index(base, n)?
}
ConstantIndex { offset, min_length, from_end } => {
self.operand_constant_index(base, offset, min_length, from_end)?
}
Subslice { from, to, from_end } => self.operand_subslice(base, from, to, from_end)?,
}) })
} }
} }

View file

@ -8,7 +8,7 @@ use rustc_middle::mir;
use rustc_middle::mir::interpret::{InterpResult, Scalar}; use rustc_middle::mir::interpret::{InterpResult, Scalar};
use rustc_middle::ty::layout::LayoutOf; use rustc_middle::ty::layout::LayoutOf;
use super::{ImmTy, InterpCx, Machine}; use super::{ImmTy, InterpCx, Machine, Projectable};
use crate::util; use crate::util;
impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
@ -197,7 +197,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
self.get_place_alloc_mut(&dest)?; self.get_place_alloc_mut(&dest)?;
} else { } else {
// Write the src to the first element. // Write the src to the first element.
let first = self.mplace_field(&dest, 0)?; let first = self.project_index(&dest, 0)?;
self.copy_op(&src, &first.into(), /*allow_transmute*/ false)?; self.copy_op(&src, &first.into(), /*allow_transmute*/ false)?;
// This is performance-sensitive code for big static/const arrays! So we // This is performance-sensitive code for big static/const arrays! So we

View file

@ -65,8 +65,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
field: usize, field: usize,
) -> InterpResult<'tcx, FnArg<'tcx, M::Provenance>> { ) -> InterpResult<'tcx, FnArg<'tcx, M::Provenance>> {
Ok(match arg { Ok(match arg {
FnArg::Copy(op) => FnArg::Copy(self.operand_field(op, field)?), FnArg::Copy(op) => FnArg::Copy(self.project_field(op, field)?),
FnArg::InPlace(place) => FnArg::InPlace(self.place_field(place, field)?), FnArg::InPlace(place) => FnArg::InPlace(self.project_field(place, field)?),
}) })
} }
@ -382,8 +382,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// This all has to be in memory, there are no immediate unsized values. // This all has to be in memory, there are no immediate unsized values.
let src = caller_arg_copy.assert_mem_place(); let src = caller_arg_copy.assert_mem_place();
// The destination cannot be one of these "spread args". // The destination cannot be one of these "spread args".
let (dest_frame, dest_local, dest_offset) = let (dest_frame, dest_local, dest_offset) = callee_arg
callee_arg.as_mplace_or_local().right().expect("calee fn arguments must be locals"); .as_mplace_or_local()
.right()
.expect("callee fn arguments must be locals");
// We are just initializing things, so there can't be anything here yet. // We are just initializing things, so there can't be anything here yet.
assert!(matches!( assert!(matches!(
*self.local_to_op(&self.stack()[dest_frame], dest_local, None)?, *self.local_to_op(&self.stack()[dest_frame], dest_local, None)?,
@ -597,7 +599,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
if Some(local) == body.spread_arg { if Some(local) == body.spread_arg {
// Must be a tuple // Must be a tuple
for i in 0..dest.layout.fields.count() { for i in 0..dest.layout.fields.count() {
let dest = self.place_field(&dest, i)?; let dest = self.project_field(&dest, i)?;
let callee_abi = callee_args_abis.next().unwrap(); let callee_abi = callee_args_abis.next().unwrap();
self.pass_argument(&mut caller_args, callee_abi, &dest)?; self.pass_argument(&mut caller_args, callee_abi, &dest)?;
} }
@ -679,7 +681,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// Not there yet, search for the only non-ZST field. // Not there yet, search for the only non-ZST field.
let mut non_zst_field = None; let mut non_zst_field = None;
for i in 0..receiver.layout.fields.count() { for i in 0..receiver.layout.fields.count() {
let field = self.operand_field(&receiver, i)?; let field = self.project_field(&receiver, i)?;
let zst = let zst =
field.layout.is_zst() && field.layout.align.abi.bytes() == 1; field.layout.is_zst() && field.layout.align.abi.bytes() == 1;
if !zst { if !zst {
@ -705,12 +707,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let (vptr, dyn_ty, adjusted_receiver) = if let ty::Dynamic(data, _, ty::DynStar) = let (vptr, dyn_ty, adjusted_receiver) = if let ty::Dynamic(data, _, ty::DynStar) =
receiver_place.layout.ty.kind() receiver_place.layout.ty.kind()
{ {
let (recv, vptr) = self.unpack_dyn_star(&receiver_place.into())?; let (recv, vptr) = self.unpack_dyn_star(&receiver_place)?;
let (dyn_ty, dyn_trait) = self.get_ptr_vtable(vptr)?; let (dyn_ty, dyn_trait) = self.get_ptr_vtable(vptr)?;
if dyn_trait != data.principal() { if dyn_trait != data.principal() {
throw_ub_custom!(fluent::const_eval_dyn_star_call_vtable_mismatch); throw_ub_custom!(fluent::const_eval_dyn_star_call_vtable_mismatch);
} }
let recv = recv.assert_mem_place(); // we passed an MPlaceTy to `unpack_dyn_star` so we definitely still have one
(vptr, dyn_ty, recv.ptr) (vptr, dyn_ty, recv.ptr)
} else { } else {
@ -838,7 +839,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
} }
ty::Dynamic(_, _, ty::DynStar) => { ty::Dynamic(_, _, ty::DynStar) => {
// Dropping a `dyn*`. Need to find actual drop fn. // Dropping a `dyn*`. Need to find actual drop fn.
self.unpack_dyn_star(&place.into())?.0.assert_mem_place() self.unpack_dyn_star(&place)?.0
} }
_ => { _ => {
debug_assert_eq!( debug_assert_eq!(

View file

@ -29,7 +29,7 @@ use std::hash::Hash;
use super::UndefinedBehaviorInfo::*; use super::UndefinedBehaviorInfo::*;
use super::{ use super::{
AllocId, CheckInAllocMsg, GlobalAlloc, ImmTy, Immediate, InterpCx, InterpResult, MPlaceTy, AllocId, CheckInAllocMsg, GlobalAlloc, ImmTy, Immediate, InterpCx, InterpResult, MPlaceTy,
Machine, MemPlaceMeta, OpTy, Pointer, Scalar, ValueVisitor, Machine, MemPlaceMeta, OpTy, Pointer, Projectable, Scalar, ValueVisitor,
}; };
macro_rules! throw_validation_failure { macro_rules! throw_validation_failure {
@ -462,6 +462,8 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
/// Check if this is a value of primitive type, and if yes check the validity of the value /// Check if this is a value of primitive type, and if yes check the validity of the value
/// at that type. Return `true` if the type is indeed primitive. /// at that type. Return `true` if the type is indeed primitive.
///
/// Note that not all of these have `FieldsShape::Primitive`, e.g. wide references.
fn try_visit_primitive( fn try_visit_primitive(
&mut self, &mut self,
value: &OpTy<'tcx, M::Provenance>, value: &OpTy<'tcx, M::Provenance>,
@ -655,15 +657,14 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
) -> InterpResult<'tcx, VariantIdx> { ) -> InterpResult<'tcx, VariantIdx> {
self.with_elem(PathElem::EnumTag, move |this| { self.with_elem(PathElem::EnumTag, move |this| {
Ok(try_validation!( Ok(try_validation!(
this.ecx.read_discriminant(op), this.ecx.read_discriminant(op).map(|(_, idx)| idx),
this.path, this.path,
InvalidTag(val) => InvalidEnumTag { InvalidTag(val) => InvalidEnumTag {
value: format!("{val:x}"), value: format!("{val:x}"),
}, },
UninhabitedEnumVariantRead(_) => UninhabitedEnumTag,
InvalidUninitBytes(None) => UninitEnumTag, InvalidUninitBytes(None) => UninitEnumTag,
) ))
.1)
}) })
} }
@ -733,60 +734,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
} }
} }
// Recursively walk the value at its type. // Recursively walk the value at its type. Apply optimizations for some large types.
self.walk_value(op)?;
// *After* all of this, check the ABI. We need to check the ABI to handle
// types like `NonNull` where the `Scalar` info is more restrictive than what
// the fields say (`rustc_layout_scalar_valid_range_start`).
// But in most cases, this will just propagate what the fields say,
// and then we want the error to point at the field -- so, first recurse,
// then check ABI.
//
// FIXME: We could avoid some redundant checks here. For newtypes wrapping
// scalars, we do the same check on every "level" (e.g., first we check
// MyNewtype and then the scalar in there).
match op.layout.abi {
Abi::Uninhabited => {
let ty = op.layout.ty;
throw_validation_failure!(self.path, UninhabitedVal { ty });
}
Abi::Scalar(scalar_layout) => {
if !scalar_layout.is_uninit_valid() {
// There is something to check here.
let scalar = self.read_scalar(op, ExpectedKind::InitScalar)?;
self.visit_scalar(scalar, scalar_layout)?;
}
}
Abi::ScalarPair(a_layout, b_layout) => {
// We can only proceed if *both* scalars need to be initialized.
// FIXME: find a way to also check ScalarPair when one side can be uninit but
// the other must be init.
if !a_layout.is_uninit_valid() && !b_layout.is_uninit_valid() {
let (a, b) =
self.read_immediate(op, ExpectedKind::InitScalar)?.to_scalar_pair();
self.visit_scalar(a, a_layout)?;
self.visit_scalar(b, b_layout)?;
}
}
Abi::Vector { .. } => {
// No checks here, we assume layout computation gets this right.
// (This is harder to check since Miri does not represent these as `Immediate`. We
// also cannot use field projections since this might be a newtype around a vector.)
}
Abi::Aggregate { .. } => {
// Nothing to do.
}
}
Ok(())
}
fn visit_aggregate(
&mut self,
op: &OpTy<'tcx, M::Provenance>,
fields: impl Iterator<Item = InterpResult<'tcx, Self::V>>,
) -> InterpResult<'tcx> {
match op.layout.ty.kind() { match op.layout.ty.kind() {
ty::Str => { ty::Str => {
let mplace = op.assert_mem_place(); // strings are unsized and hence never immediate let mplace = op.assert_mem_place(); // strings are unsized and hence never immediate
@ -874,12 +822,58 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
// ZST type, so either validation fails for all elements or none. // ZST type, so either validation fails for all elements or none.
ty::Array(tys, ..) | ty::Slice(tys) if self.ecx.layout_of(*tys)?.is_zst() => { ty::Array(tys, ..) | ty::Slice(tys) if self.ecx.layout_of(*tys)?.is_zst() => {
// Validate just the first element (if any). // Validate just the first element (if any).
self.walk_aggregate(op, fields.take(1))? if op.len(self.ecx)? > 0 {
self.visit_field(op, 0, &self.ecx.project_index(op, 0)?)?;
}
} }
_ => { _ => {
self.walk_aggregate(op, fields)? // default handler self.walk_value(op)?; // default handler
} }
} }
// *After* all of this, check the ABI. We need to check the ABI to handle
// types like `NonNull` where the `Scalar` info is more restrictive than what
// the fields say (`rustc_layout_scalar_valid_range_start`).
// But in most cases, this will just propagate what the fields say,
// and then we want the error to point at the field -- so, first recurse,
// then check ABI.
//
// FIXME: We could avoid some redundant checks here. For newtypes wrapping
// scalars, we do the same check on every "level" (e.g., first we check
// MyNewtype and then the scalar in there).
match op.layout.abi {
Abi::Uninhabited => {
let ty = op.layout.ty;
throw_validation_failure!(self.path, UninhabitedVal { ty });
}
Abi::Scalar(scalar_layout) => {
if !scalar_layout.is_uninit_valid() {
// There is something to check here.
let scalar = self.read_scalar(op, ExpectedKind::InitScalar)?;
self.visit_scalar(scalar, scalar_layout)?;
}
}
Abi::ScalarPair(a_layout, b_layout) => {
// We can only proceed if *both* scalars need to be initialized.
// FIXME: find a way to also check ScalarPair when one side can be uninit but
// the other must be init.
if !a_layout.is_uninit_valid() && !b_layout.is_uninit_valid() {
let (a, b) =
self.read_immediate(op, ExpectedKind::InitScalar)?.to_scalar_pair();
self.visit_scalar(a, a_layout)?;
self.visit_scalar(b, b_layout)?;
}
}
Abi::Vector { .. } => {
// No checks here, we assume layout computation gets this right.
// (This is harder to check since Miri does not represent these as `Immediate`. We
// also cannot use field projections since this might be a newtype around a vector.)
}
Abi::Aggregate { .. } => {
// Nothing to do.
}
}
Ok(()) Ok(())
} }
} }

View file

@ -1,378 +1,60 @@
//! Visitor for a run-time value with a given layout: Traverse enums, structs and other compound //! Visitor for a run-time value with a given layout: Traverse enums, structs and other compound
//! types until we arrive at the leaves, with custom handling for primitive types. //! types until we arrive at the leaves, with custom handling for primitive types.
use rustc_index::IndexVec;
use rustc_middle::mir::interpret::InterpResult; use rustc_middle::mir::interpret::InterpResult;
use rustc_middle::ty; use rustc_middle::ty;
use rustc_middle::ty::layout::TyAndLayout; use rustc_target::abi::FieldIdx;
use rustc_target::abi::{FieldsShape, VariantIdx, Variants}; use rustc_target::abi::{FieldsShape, VariantIdx, Variants};
use std::num::NonZeroUsize; use std::num::NonZeroUsize;
use super::{InterpCx, MPlaceTy, Machine, OpTy, PlaceTy}; use super::{InterpCx, MPlaceTy, Machine, Projectable};
/// A thing that we can project into, and that has a layout. /// How to traverse a value and what to do when we are at the leaves.
/// This wouldn't have to depend on `Machine` but with the current type inference, pub trait ValueVisitor<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>>: Sized {
/// that's just more convenient to work with (avoids repeating all the `Machine` bounds). type V: Projectable<'mir, 'tcx, M::Provenance>
pub trait Value<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>>: Sized { + From<MPlaceTy<'tcx, M::Provenance>>
/// Gets this value's layout. + std::fmt::Debug;
fn layout(&self) -> TyAndLayout<'tcx>;
/// Makes this into an `OpTy`, in a cheap way that is good for reading.
fn to_op_for_read(
&self,
ecx: &InterpCx<'mir, 'tcx, M>,
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>>;
/// Makes this into an `OpTy`, in a potentially more expensive way that is good for projections.
fn to_op_for_proj(
&self,
ecx: &InterpCx<'mir, 'tcx, M>,
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
self.to_op_for_read(ecx)
}
/// Creates this from an `OpTy`.
///
/// If `to_op_for_proj` only ever produces `Indirect` operands, then this one is definitely `Indirect`.
fn from_op(op: &OpTy<'tcx, M::Provenance>) -> Self;
/// Projects to the given enum variant.
fn project_downcast(
&self,
ecx: &InterpCx<'mir, 'tcx, M>,
variant: VariantIdx,
) -> InterpResult<'tcx, Self>;
/// Projects to the n-th field.
fn project_field(
&self,
ecx: &InterpCx<'mir, 'tcx, M>,
field: usize,
) -> InterpResult<'tcx, Self>;
}
/// A thing that we can project into given *mutable* access to `ecx`, and that has a layout.
/// This wouldn't have to depend on `Machine` but with the current type inference,
/// that's just more convenient to work with (avoids repeating all the `Machine` bounds).
pub trait ValueMut<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>>: Sized {
/// Gets this value's layout.
fn layout(&self) -> TyAndLayout<'tcx>;
/// Makes this into an `OpTy`, in a cheap way that is good for reading.
fn to_op_for_read(
&self,
ecx: &InterpCx<'mir, 'tcx, M>,
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>>;
/// Makes this into an `OpTy`, in a potentially more expensive way that is good for projections.
fn to_op_for_proj(
&self,
ecx: &mut InterpCx<'mir, 'tcx, M>,
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>>;
/// Creates this from an `OpTy`.
///
/// If `to_op_for_proj` only ever produces `Indirect` operands, then this one is definitely `Indirect`.
fn from_op(op: &OpTy<'tcx, M::Provenance>) -> Self;
/// Projects to the given enum variant.
fn project_downcast(
&self,
ecx: &InterpCx<'mir, 'tcx, M>,
variant: VariantIdx,
) -> InterpResult<'tcx, Self>;
/// Projects to the n-th field.
fn project_field(
&self,
ecx: &InterpCx<'mir, 'tcx, M>,
field: usize,
) -> InterpResult<'tcx, Self>;
}
// We cannot have a general impl which shows that Value implies ValueMut. (When we do, it says we
// cannot `impl ValueMut for PlaceTy` because some downstream crate could `impl Value for PlaceTy`.)
// So we have some copy-paste here. (We could have a macro but since we only have 2 types with this
// double-impl, that would barely make the code shorter, if at all.)
impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> Value<'mir, 'tcx, M> for OpTy<'tcx, M::Provenance> {
#[inline(always)]
fn layout(&self) -> TyAndLayout<'tcx> {
self.layout
}
#[inline(always)]
fn to_op_for_read(
&self,
_ecx: &InterpCx<'mir, 'tcx, M>,
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
Ok(self.clone())
}
#[inline(always)]
fn from_op(op: &OpTy<'tcx, M::Provenance>) -> Self {
op.clone()
}
#[inline(always)]
fn project_downcast(
&self,
ecx: &InterpCx<'mir, 'tcx, M>,
variant: VariantIdx,
) -> InterpResult<'tcx, Self> {
ecx.operand_downcast(self, variant)
}
#[inline(always)]
fn project_field(
&self,
ecx: &InterpCx<'mir, 'tcx, M>,
field: usize,
) -> InterpResult<'tcx, Self> {
ecx.operand_field(self, field)
}
}
impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueMut<'mir, 'tcx, M>
for OpTy<'tcx, M::Provenance>
{
#[inline(always)]
fn layout(&self) -> TyAndLayout<'tcx> {
self.layout
}
#[inline(always)]
fn to_op_for_read(
&self,
_ecx: &InterpCx<'mir, 'tcx, M>,
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
Ok(self.clone())
}
#[inline(always)]
fn to_op_for_proj(
&self,
_ecx: &mut InterpCx<'mir, 'tcx, M>,
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
Ok(self.clone())
}
#[inline(always)]
fn from_op(op: &OpTy<'tcx, M::Provenance>) -> Self {
op.clone()
}
#[inline(always)]
fn project_downcast(
&self,
ecx: &InterpCx<'mir, 'tcx, M>,
variant: VariantIdx,
) -> InterpResult<'tcx, Self> {
ecx.operand_downcast(self, variant)
}
#[inline(always)]
fn project_field(
&self,
ecx: &InterpCx<'mir, 'tcx, M>,
field: usize,
) -> InterpResult<'tcx, Self> {
ecx.operand_field(self, field)
}
}
impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> Value<'mir, 'tcx, M>
for MPlaceTy<'tcx, M::Provenance>
{
#[inline(always)]
fn layout(&self) -> TyAndLayout<'tcx> {
self.layout
}
#[inline(always)]
fn to_op_for_read(
&self,
_ecx: &InterpCx<'mir, 'tcx, M>,
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
Ok(self.into())
}
#[inline(always)]
fn from_op(op: &OpTy<'tcx, M::Provenance>) -> Self {
// assert is justified because our `to_op_for_read` only ever produces `Indirect` operands.
op.assert_mem_place()
}
#[inline(always)]
fn project_downcast(
&self,
ecx: &InterpCx<'mir, 'tcx, M>,
variant: VariantIdx,
) -> InterpResult<'tcx, Self> {
ecx.mplace_downcast(self, variant)
}
#[inline(always)]
fn project_field(
&self,
ecx: &InterpCx<'mir, 'tcx, M>,
field: usize,
) -> InterpResult<'tcx, Self> {
ecx.mplace_field(self, field)
}
}
impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueMut<'mir, 'tcx, M>
for MPlaceTy<'tcx, M::Provenance>
{
#[inline(always)]
fn layout(&self) -> TyAndLayout<'tcx> {
self.layout
}
#[inline(always)]
fn to_op_for_read(
&self,
_ecx: &InterpCx<'mir, 'tcx, M>,
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
Ok(self.into())
}
#[inline(always)]
fn to_op_for_proj(
&self,
_ecx: &mut InterpCx<'mir, 'tcx, M>,
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
Ok(self.into())
}
#[inline(always)]
fn from_op(op: &OpTy<'tcx, M::Provenance>) -> Self {
// assert is justified because our `to_op_for_proj` only ever produces `Indirect` operands.
op.assert_mem_place()
}
#[inline(always)]
fn project_downcast(
&self,
ecx: &InterpCx<'mir, 'tcx, M>,
variant: VariantIdx,
) -> InterpResult<'tcx, Self> {
ecx.mplace_downcast(self, variant)
}
#[inline(always)]
fn project_field(
&self,
ecx: &InterpCx<'mir, 'tcx, M>,
field: usize,
) -> InterpResult<'tcx, Self> {
ecx.mplace_field(self, field)
}
}
impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueMut<'mir, 'tcx, M>
for PlaceTy<'tcx, M::Provenance>
{
#[inline(always)]
fn layout(&self) -> TyAndLayout<'tcx> {
self.layout
}
#[inline(always)]
fn to_op_for_read(
&self,
ecx: &InterpCx<'mir, 'tcx, M>,
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
// No need for `force_allocation` since we are just going to read from this.
ecx.place_to_op(self)
}
#[inline(always)]
fn to_op_for_proj(
&self,
ecx: &mut InterpCx<'mir, 'tcx, M>,
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
// We `force_allocation` here so that `from_op` below can work.
Ok(ecx.force_allocation(self)?.into())
}
#[inline(always)]
fn from_op(op: &OpTy<'tcx, M::Provenance>) -> Self {
// assert is justified because our `to_op` only ever produces `Indirect` operands.
op.assert_mem_place().into()
}
#[inline(always)]
fn project_downcast(
&self,
ecx: &InterpCx<'mir, 'tcx, M>,
variant: VariantIdx,
) -> InterpResult<'tcx, Self> {
ecx.place_downcast(self, variant)
}
#[inline(always)]
fn project_field(
&self,
ecx: &InterpCx<'mir, 'tcx, M>,
field: usize,
) -> InterpResult<'tcx, Self> {
ecx.place_field(self, field)
}
}
macro_rules! make_value_visitor {
($visitor_trait:ident, $value_trait:ident, $($mutability:ident)?) => {
/// How to traverse a value and what to do when we are at the leaves.
pub trait $visitor_trait<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>>: Sized {
type V: $value_trait<'mir, 'tcx, M>;
/// The visitor must have an `InterpCx` in it. /// The visitor must have an `InterpCx` in it.
fn ecx(&$($mutability)? self) fn ecx(&self) -> &InterpCx<'mir, 'tcx, M>;
-> &$($mutability)? InterpCx<'mir, 'tcx, M>;
/// `read_discriminant` can be hooked for better error messages. /// `read_discriminant` can be hooked for better error messages.
#[inline(always)] #[inline(always)]
fn read_discriminant( fn read_discriminant(&mut self, v: &Self::V) -> InterpResult<'tcx, VariantIdx> {
&mut self, Ok(self.ecx().read_discriminant(&v.to_op(self.ecx())?)?.1)
op: &OpTy<'tcx, M::Provenance>, }
) -> InterpResult<'tcx, VariantIdx> {
Ok(self.ecx().read_discriminant(op)?.1) /// This function provides the chance to reorder the order in which fields are visited for
/// `FieldsShape::Aggregate`: The order of fields will be
/// `(0..num_fields).map(aggregate_field_order)`.
///
/// The default means we iterate in source declaration order; alternative this can do an inverse
/// lookup in `memory_index` to use memory field order instead.
#[inline(always)]
fn aggregate_field_order(_memory_index: &IndexVec<FieldIdx, u32>, idx: usize) -> usize {
idx
} }
// Recursive actions, ready to be overloaded. // Recursive actions, ready to be overloaded.
/// Visits the given value, dispatching as appropriate to more specialized visitors. /// Visits the given value, dispatching as appropriate to more specialized visitors.
#[inline(always)] #[inline(always)]
fn visit_value(&mut self, v: &Self::V) -> InterpResult<'tcx> fn visit_value(&mut self, v: &Self::V) -> InterpResult<'tcx> {
{
self.walk_value(v) self.walk_value(v)
} }
/// Visits the given value as a union. No automatic recursion can happen here. /// Visits the given value as a union. No automatic recursion can happen here.
#[inline(always)] #[inline(always)]
fn visit_union(&mut self, _v: &Self::V, _fields: NonZeroUsize) -> InterpResult<'tcx> fn visit_union(&mut self, _v: &Self::V, _fields: NonZeroUsize) -> InterpResult<'tcx> {
{
Ok(()) Ok(())
} }
/// Visits the given value as the pointer of a `Box`. There is nothing to recurse into. /// Visits the given value as the pointer of a `Box`. There is nothing to recurse into.
/// The type of `v` will be a raw pointer, but this is a field of `Box<T>` and the /// The type of `v` will be a raw pointer, but this is a field of `Box<T>` and the
/// pointee type is the actual `T`. /// pointee type is the actual `T`.
#[inline(always)] #[inline(always)]
fn visit_box(&mut self, _v: &Self::V) -> InterpResult<'tcx> fn visit_box(&mut self, _v: &Self::V) -> InterpResult<'tcx> {
{
Ok(()) Ok(())
} }
/// Visits this value as an aggregate, you are getting an iterator yielding
/// all the fields (still in an `InterpResult`, you have to do error handling yourself).
/// Recurses into the fields.
#[inline(always)]
fn visit_aggregate(
&mut self,
v: &Self::V,
fields: impl Iterator<Item=InterpResult<'tcx, Self::V>>,
) -> InterpResult<'tcx> {
self.walk_aggregate(v, fields)
}
/// Called each time we recurse down to a field of a "product-like" aggregate /// Called each time we recurse down to a field of a "product-like" aggregate
/// (structs, tuples, arrays and the like, but not enums), passing in old (outer) /// (structs, tuples, arrays and the like, but not enums), passing in old (outer)
@ -401,20 +83,7 @@ macro_rules! make_value_visitor {
self.visit_value(new_val) self.visit_value(new_val)
} }
// Default recursors. Not meant to be overloaded. fn walk_value(&mut self, v: &Self::V) -> InterpResult<'tcx> {
fn walk_aggregate(
&mut self,
v: &Self::V,
fields: impl Iterator<Item=InterpResult<'tcx, Self::V>>,
) -> InterpResult<'tcx> {
// Now iterate over it.
for (idx, field_val) in fields.enumerate() {
self.visit_field(v, idx, &field_val?)?;
}
Ok(())
}
fn walk_value(&mut self, v: &Self::V) -> InterpResult<'tcx>
{
let ty = v.layout().ty; let ty = v.layout().ty;
trace!("walk_value: type: {ty}"); trace!("walk_value: type: {ty}");
@ -425,20 +94,19 @@ macro_rules! make_value_visitor {
// Dyn types. This is unsized, and the actual dynamic type of the data is given by the // Dyn types. This is unsized, and the actual dynamic type of the data is given by the
// vtable stored in the place metadata. // vtable stored in the place metadata.
// unsized values are never immediate, so we can assert_mem_place // unsized values are never immediate, so we can assert_mem_place
let op = v.to_op_for_read(self.ecx())?; let op = v.to_op(self.ecx())?;
let dest = op.assert_mem_place(); let dest = op.assert_mem_place();
let inner_mplace = self.ecx().unpack_dyn_trait(&dest)?.0; let inner_mplace = self.ecx().unpack_dyn_trait(&dest)?.0;
trace!("walk_value: dyn object layout: {:#?}", inner_mplace.layout); trace!("walk_value: dyn object layout: {:#?}", inner_mplace.layout);
// recurse with the inner type // recurse with the inner type
return self.visit_field(&v, 0, &$value_trait::from_op(&inner_mplace.into())); return self.visit_field(&v, 0, &inner_mplace.into());
}, }
ty::Dynamic(_, _, ty::DynStar) => { ty::Dynamic(_, _, ty::DynStar) => {
// DynStar types. Very different from a dyn type (but strangely part of the // DynStar types. Very different from a dyn type (but strangely part of the
// same variant in `TyKind`): These are pairs where the 2nd component is the // same variant in `TyKind`): These are pairs where the 2nd component is the
// vtable, and the first component is the data (which must be ptr-sized). // vtable, and the first component is the data (which must be ptr-sized).
let op = v.to_op_for_proj(self.ecx())?; let data = self.ecx().unpack_dyn_star(v)?.0;
let data = self.ecx().unpack_dyn_star(&op)?.0; return self.visit_field(&v, 0, &data);
return self.visit_field(&v, 0, &$value_trait::from_op(&data));
} }
// Slices do not need special handling here: they have `Array` field // Slices do not need special handling here: they have `Array` field
// placement with length 0, so we enter the `Array` case below which // placement with length 0, so we enter the `Array` case below which
@ -453,7 +121,7 @@ macro_rules! make_value_visitor {
// `Immediate`. Yeah, it is quite terrible. But many visitors want to do // `Immediate`. Yeah, it is quite terrible. But many visitors want to do
// something with "all boxed pointers", so we handle this mess for them. // something with "all boxed pointers", so we handle this mess for them.
// //
// When we hit a `Box`, we do not do the usual `visit_aggregate`; instead, // When we hit a `Box`, we do not do the usual field recursion; instead,
// we (a) call `visit_box` on the pointer value, and (b) recurse on the // we (a) call `visit_box` on the pointer value, and (b) recurse on the
// allocator field. We also assert tons of things to ensure we do not miss // allocator field. We also assert tons of things to ensure we do not miss
// any other fields. // any other fields.
@ -461,13 +129,13 @@ macro_rules! make_value_visitor {
// `Box` has two fields: the pointer we care about, and the allocator. // `Box` has two fields: the pointer we care about, and the allocator.
assert_eq!(v.layout().fields.count(), 2, "`Box` must have exactly 2 fields"); assert_eq!(v.layout().fields.count(), 2, "`Box` must have exactly 2 fields");
let (unique_ptr, alloc) = let (unique_ptr, alloc) =
(v.project_field(self.ecx(), 0)?, v.project_field(self.ecx(), 1)?); (self.ecx().project_field(v, 0)?, self.ecx().project_field(v, 1)?);
// Unfortunately there is some type junk in the way here: `unique_ptr` is a `Unique`... // Unfortunately there is some type junk in the way here: `unique_ptr` is a `Unique`...
// (which means another 2 fields, the second of which is a `PhantomData`) // (which means another 2 fields, the second of which is a `PhantomData`)
assert_eq!(unique_ptr.layout().fields.count(), 2); assert_eq!(unique_ptr.layout().fields.count(), 2);
let (nonnull_ptr, phantom) = ( let (nonnull_ptr, phantom) = (
unique_ptr.project_field(self.ecx(), 0)?, self.ecx().project_field(&unique_ptr, 0)?,
unique_ptr.project_field(self.ecx(), 1)?, self.ecx().project_field(&unique_ptr, 1)?,
); );
assert!( assert!(
phantom.layout().ty.ty_adt_def().is_some_and(|adt| adt.is_phantom_data()), phantom.layout().ty.ty_adt_def().is_some_and(|adt| adt.is_phantom_data()),
@ -476,7 +144,7 @@ macro_rules! make_value_visitor {
); );
// ... that contains a `NonNull`... (gladly, only a single field here) // ... that contains a `NonNull`... (gladly, only a single field here)
assert_eq!(nonnull_ptr.layout().fields.count(), 1); assert_eq!(nonnull_ptr.layout().fields.count(), 1);
let raw_ptr = nonnull_ptr.project_field(self.ecx(), 0)?; // the actual raw ptr let raw_ptr = self.ecx().project_field(&nonnull_ptr, 0)?; // the actual raw ptr
// ... whose only field finally is a raw ptr we can dereference. // ... whose only field finally is a raw ptr we can dereference.
self.visit_box(&raw_ptr)?; self.visit_box(&raw_ptr)?;
@ -487,7 +155,7 @@ macro_rules! make_value_visitor {
// We visited all parts of this one. // We visited all parts of this one.
return Ok(()); return Ok(());
} }
_ => {}, _ => {}
}; };
// Visit the fields of this value. // Visit the fields of this value.
@ -496,28 +164,17 @@ macro_rules! make_value_visitor {
&FieldsShape::Union(fields) => { &FieldsShape::Union(fields) => {
self.visit_union(v, fields)?; self.visit_union(v, fields)?;
} }
FieldsShape::Arbitrary { offsets, .. } => { FieldsShape::Arbitrary { offsets, memory_index } => {
// FIXME: We collect in a vec because otherwise there are lifetime for idx in 0..offsets.len() {
// errors: Projecting to a field needs access to `ecx`. let idx = Self::aggregate_field_order(memory_index, idx);
let fields: Vec<InterpResult<'tcx, Self::V>> = let field = self.ecx().project_field(v, idx)?;
(0..offsets.len()).map(|i| { self.visit_field(v, idx, &field)?;
v.project_field(self.ecx(), i) }
})
.collect();
self.visit_aggregate(v, fields.into_iter())?;
} }
FieldsShape::Array { .. } => { FieldsShape::Array { .. } => {
// Let's get an mplace (or immediate) first. for (idx, field) in self.ecx().project_array_fields(v)?.enumerate() {
// FIXME: This might `force_allocate` if `v` is a `PlaceTy`! self.visit_field(v, idx, &field?)?;
let op = v.to_op_for_proj(self.ecx())?; }
// Now we can go over all the fields.
// This uses the *run-time length*, i.e., if we are a slice,
// the dynamic info from the metadata is used.
let iter = self.ecx().operand_array_fields(&op)?
.map(|f| f.and_then(|f| {
Ok($value_trait::from_op(&f))
}));
self.visit_aggregate(v, iter)?;
} }
} }
@ -525,20 +182,23 @@ macro_rules! make_value_visitor {
// If this is a multi-variant layout, find the right variant and proceed // If this is a multi-variant layout, find the right variant and proceed
// with *its* fields. // with *its* fields.
Variants::Multiple { .. } => { Variants::Multiple { .. } => {
let op = v.to_op_for_read(self.ecx())?; let idx = self.read_discriminant(v)?;
let idx = self.read_discriminant(&op)?; // There are 3 cases where downcasts can turn a Scalar/ScalarPair into a different ABI which
let inner = v.project_downcast(self.ecx(), idx)?; // could be a problem for `ImmTy` (see layout_sanity_check):
// - variant.size == Size::ZERO: works fine because `ImmTy::offset` has a special case for
// zero-sized layouts.
// - variant.fields.count() == 0: works fine because `ImmTy::offset` has a special case for
// zero-field aggregates.
// - variant.abi.is_uninhabited(): triggers UB in `read_discriminant` so we never get here.
let inner = self.ecx().project_downcast(v, idx)?;
trace!("walk_value: variant layout: {:#?}", inner.layout()); trace!("walk_value: variant layout: {:#?}", inner.layout());
// recurse with the inner type // recurse with the inner type
self.visit_variant(v, idx, &inner) self.visit_variant(v, idx, &inner)?;
} }
// For single-variant layouts, we already did anything there is to do. // For single-variant layouts, we already did anything there is to do.
Variants::Single { .. } => Ok(()) Variants::Single { .. } => {}
}
}
} }
Ok(())
} }
} }
make_value_visitor!(ValueVisitor, Value,);
make_value_visitor!(MutValueVisitor, ValueMut, mut);

View file

@ -12,7 +12,8 @@ use rustc_errors::{
use rustc_macros::HashStable; use rustc_macros::HashStable;
use rustc_session::CtfeBacktrace; use rustc_session::CtfeBacktrace;
use rustc_span::def_id::DefId; use rustc_span::def_id::DefId;
use rustc_target::abi::{call, Align, Size, WrappingRange}; use rustc_target::abi::{call, Align, Size, VariantIdx, WrappingRange};
use std::borrow::Cow; use std::borrow::Cow;
use std::{any::Any, backtrace::Backtrace, fmt}; use std::{any::Any, backtrace::Backtrace, fmt};
@ -323,7 +324,9 @@ pub enum UndefinedBehaviorInfo<'a> {
/// Data size is not equal to target size. /// Data size is not equal to target size.
ScalarSizeMismatch(ScalarSizeMismatch), ScalarSizeMismatch(ScalarSizeMismatch),
/// A discriminant of an uninhabited enum variant is written. /// A discriminant of an uninhabited enum variant is written.
UninhabitedEnumVariantWritten, UninhabitedEnumVariantWritten(VariantIdx),
/// An uninhabited enum variant is projected.
UninhabitedEnumVariantRead(VariantIdx),
/// Validation error. /// Validation error.
Validation(ValidationErrorInfo<'a>), Validation(ValidationErrorInfo<'a>),
// FIXME(fee1-dead) these should all be actual variants of the enum instead of dynamically // FIXME(fee1-dead) these should all be actual variants of the enum instead of dynamically
@ -393,6 +396,7 @@ pub enum ValidationErrorKind<'tcx> {
UnsafeCell, UnsafeCell,
UninhabitedVal { ty: Ty<'tcx> }, UninhabitedVal { ty: Ty<'tcx> },
InvalidEnumTag { value: String }, InvalidEnumTag { value: String },
UninhabitedEnumTag,
UninitEnumTag, UninitEnumTag,
UninitStr, UninitStr,
Uninit { expected: ExpectedKind }, Uninit { expected: ExpectedKind },

View file

@ -741,9 +741,9 @@ where
let fields = match this.ty.kind() { let fields = match this.ty.kind() {
ty::Adt(def, _) if def.variants().is_empty() => ty::Adt(def, _) if def.variants().is_empty() =>
bug!("for_variant called on zero-variant enum"), bug!("for_variant called on zero-variant enum {}", this.ty),
ty::Adt(def, _) => def.variant(variant_index).fields.len(), ty::Adt(def, _) => def.variant(variant_index).fields.len(),
_ => bug!(), _ => bug!("`ty_and_layout_for_variant` on unexpected type {}", this.ty),
}; };
tcx.mk_layout(LayoutS { tcx.mk_layout(LayoutS {
variants: Variants::Single { index: variant_index }, variants: Variants::Single { index: variant_index },

View file

@ -2670,11 +2670,6 @@ impl<'tcx> Ty<'tcx> {
variant_index: VariantIdx, variant_index: VariantIdx,
) -> Option<Discr<'tcx>> { ) -> Option<Discr<'tcx>> {
match self.kind() { match self.kind() {
TyKind::Adt(adt, _) if adt.variants().is_empty() => {
// This can actually happen during CTFE, see
// https://github.com/rust-lang/rust/issues/89765.
None
}
TyKind::Adt(adt, _) if adt.is_enum() => { TyKind::Adt(adt, _) if adt.is_enum() => {
Some(adt.discriminant_for_variant(tcx, variant_index)) Some(adt.discriminant_for_variant(tcx, variant_index))
} }

View file

@ -930,13 +930,13 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
Ok(()) Ok(())
} }
} }
impl<'ecx, 'mir, 'tcx> MutValueVisitor<'mir, 'tcx, MiriMachine<'mir, 'tcx>> impl<'ecx, 'mir, 'tcx> ValueVisitor<'mir, 'tcx, MiriMachine<'mir, 'tcx>>
for RetagVisitor<'ecx, 'mir, 'tcx> for RetagVisitor<'ecx, 'mir, 'tcx>
{ {
type V = PlaceTy<'tcx, Provenance>; type V = PlaceTy<'tcx, Provenance>;
#[inline(always)] #[inline(always)]
fn ecx(&mut self) -> &mut MiriInterpCx<'mir, 'tcx> { fn ecx(&self) -> &MiriInterpCx<'mir, 'tcx> {
self.ecx self.ecx
} }

View file

@ -413,13 +413,13 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
Ok(()) Ok(())
} }
} }
impl<'ecx, 'mir, 'tcx> MutValueVisitor<'mir, 'tcx, MiriMachine<'mir, 'tcx>> impl<'ecx, 'mir, 'tcx> ValueVisitor<'mir, 'tcx, MiriMachine<'mir, 'tcx>>
for RetagVisitor<'ecx, 'mir, 'tcx> for RetagVisitor<'ecx, 'mir, 'tcx>
{ {
type V = PlaceTy<'tcx, Provenance>; type V = PlaceTy<'tcx, Provenance>;
#[inline(always)] #[inline(always)]
fn ecx(&mut self) -> &mut MiriInterpCx<'mir, 'tcx> { fn ecx(&self) -> &MiriInterpCx<'mir, 'tcx> {
self.ecx self.ecx
} }
@ -585,7 +585,7 @@ fn inner_ptr_of_unique<'tcx>(
// `rustc_const_eval`, just with one fewer layer. // `rustc_const_eval`, just with one fewer layer.
// Here we have a `Unique(NonNull(*mut), PhantomData)` // Here we have a `Unique(NonNull(*mut), PhantomData)`
assert_eq!(place.layout.fields.count(), 2, "Unique must have exactly 2 fields"); assert_eq!(place.layout.fields.count(), 2, "Unique must have exactly 2 fields");
let (nonnull, phantom) = (ecx.place_field(place, 0)?, ecx.place_field(place, 1)?); let (nonnull, phantom) = (ecx.project_field(place, 0)?, ecx.project_field(place, 1)?);
assert!( assert!(
phantom.layout.ty.ty_adt_def().is_some_and(|adt| adt.is_phantom_data()), phantom.layout.ty.ty_adt_def().is_some_and(|adt| adt.is_phantom_data()),
"2nd field of `Unique` should be `PhantomData` but is `{:?}`", "2nd field of `Unique` should be `PhantomData` but is `{:?}`",
@ -593,7 +593,7 @@ fn inner_ptr_of_unique<'tcx>(
); );
// Now down to `NonNull(*mut)` // Now down to `NonNull(*mut)`
assert_eq!(nonnull.layout.fields.count(), 1, "NonNull must have exactly 1 field"); assert_eq!(nonnull.layout.fields.count(), 1, "NonNull must have exactly 1 field");
let ptr = ecx.place_field(&nonnull, 0)?; let ptr = ecx.project_field(&nonnull, 0)?;
// Finally a plain `*mut` // Finally a plain `*mut`
Ok(ptr) Ok(ptr)
} }

View file

@ -320,7 +320,7 @@ pub fn create_ecx<'mir, 'tcx: 'mir>(
))?; ))?;
let argvs_place = ecx.allocate(argvs_layout, MiriMemoryKind::Machine.into())?; let argvs_place = ecx.allocate(argvs_layout, MiriMemoryKind::Machine.into())?;
for (idx, arg) in argvs.into_iter().enumerate() { for (idx, arg) in argvs.into_iter().enumerate() {
let place = ecx.mplace_field(&argvs_place, idx)?; let place = ecx.project_field(&argvs_place, idx)?;
ecx.write_immediate(arg, &place.into())?; ecx.write_immediate(arg, &place.into())?;
} }
ecx.mark_immutable(&argvs_place); ecx.mark_immutable(&argvs_place);
@ -354,7 +354,7 @@ pub fn create_ecx<'mir, 'tcx: 'mir>(
ecx.machine.cmd_line = Some(*cmd_place); ecx.machine.cmd_line = Some(*cmd_place);
// Store the UTF-16 string. We just allocated so we know the bounds are fine. // Store the UTF-16 string. We just allocated so we know the bounds are fine.
for (idx, &c) in cmd_utf16.iter().enumerate() { for (idx, &c) in cmd_utf16.iter().enumerate() {
let place = ecx.mplace_field(&cmd_place, idx)?; let place = ecx.project_field(&cmd_place, idx)?;
ecx.write_scalar(Scalar::from_u16(c), &place.into())?; ecx.write_scalar(Scalar::from_u16(c), &place.into())?;
} }
ecx.mark_immutable(&cmd_place); ecx.mark_immutable(&cmd_place);

View file

@ -10,6 +10,7 @@ use log::trace;
use rustc_hir::def::{DefKind, Namespace}; use rustc_hir::def::{DefKind, Namespace};
use rustc_hir::def_id::{DefId, CRATE_DEF_INDEX}; use rustc_hir::def_id::{DefId, CRATE_DEF_INDEX};
use rustc_index::IndexVec;
use rustc_middle::mir; use rustc_middle::mir;
use rustc_middle::ty::{ use rustc_middle::ty::{
self, self,
@ -17,7 +18,7 @@ use rustc_middle::ty::{
List, TyCtxt, List, TyCtxt,
}; };
use rustc_span::{def_id::CrateNum, sym, Span, Symbol}; use rustc_span::{def_id::CrateNum, sym, Span, Symbol};
use rustc_target::abi::{Align, FieldsShape, Size, Variants}; use rustc_target::abi::{Align, FieldIdx, FieldsShape, Size, Variants};
use rustc_target::spec::abi::Abi; use rustc_target::spec::abi::Abi;
use rand::RngCore; use rand::RngCore;
@ -229,20 +230,20 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
this.layout_of(ty).unwrap() this.layout_of(ty).unwrap()
} }
/// Project to the given *named* field of the mplace (which must be a struct or union type). /// Project to the given *named* field (which must be a struct or union type).
fn mplace_field_named( fn project_field_named<P: Projectable<'mir, 'tcx, Provenance>>(
&self, &self,
mplace: &MPlaceTy<'tcx, Provenance>, base: &P,
name: &str, name: &str,
) -> InterpResult<'tcx, MPlaceTy<'tcx, Provenance>> { ) -> InterpResult<'tcx, P> {
let this = self.eval_context_ref(); let this = self.eval_context_ref();
let adt = mplace.layout.ty.ty_adt_def().unwrap(); let adt = base.layout().ty.ty_adt_def().unwrap();
for (idx, field) in adt.non_enum_variant().fields.iter().enumerate() { for (idx, field) in adt.non_enum_variant().fields.iter().enumerate() {
if field.name.as_str() == name { if field.name.as_str() == name {
return this.mplace_field(mplace, idx); return this.project_field(base, idx);
} }
} }
bug!("No field named {} in type {}", name, mplace.layout.ty); bug!("No field named {} in type {}", name, base.layout().ty);
} }
/// Write an int of the appropriate size to `dest`. The target type may be signed or unsigned, /// Write an int of the appropriate size to `dest`. The target type may be signed or unsigned,
@ -270,7 +271,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
) -> InterpResult<'tcx> { ) -> InterpResult<'tcx> {
let this = self.eval_context_mut(); let this = self.eval_context_mut();
for (idx, &val) in values.iter().enumerate() { for (idx, &val) in values.iter().enumerate() {
let field = this.mplace_field(dest, idx)?; let field = this.project_field(dest, idx)?;
this.write_int(val, &field.into())?; this.write_int(val, &field.into())?;
} }
Ok(()) Ok(())
@ -284,7 +285,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
) -> InterpResult<'tcx> { ) -> InterpResult<'tcx> {
let this = self.eval_context_mut(); let this = self.eval_context_mut();
for &(name, val) in values.iter() { for &(name, val) in values.iter() {
let field = this.mplace_field_named(dest, name)?; let field = this.project_field_named(dest, name)?;
this.write_int(val, &field.into())?; this.write_int(val, &field.into())?;
} }
Ok(()) Ok(())
@ -479,6 +480,16 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
self.ecx self.ecx
} }
fn aggregate_field_order(memory_index: &IndexVec<FieldIdx, u32>, idx: usize) -> usize {
// We need to do an *inverse* lookup: find the field that has position `idx` in memory order.
for (src_field, &mem_pos) in memory_index.iter_enumerated() {
if mem_pos as usize == idx {
return src_field.as_usize();
}
}
panic!("invalid `memory_index`, could not find {}-th field in memory order", idx);
}
// Hook to detect `UnsafeCell`. // Hook to detect `UnsafeCell`.
fn visit_value(&mut self, v: &MPlaceTy<'tcx, Provenance>) -> InterpResult<'tcx> { fn visit_value(&mut self, v: &MPlaceTy<'tcx, Provenance>) -> InterpResult<'tcx> {
trace!("UnsafeCellVisitor: {:?} {:?}", *v, v.layout.ty); trace!("UnsafeCellVisitor: {:?} {:?}", *v, v.layout.ty);
@ -524,33 +535,6 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
} }
} }
// Make sure we visit aggregates in increasing offset order.
fn visit_aggregate(
&mut self,
place: &MPlaceTy<'tcx, Provenance>,
fields: impl Iterator<Item = InterpResult<'tcx, MPlaceTy<'tcx, Provenance>>>,
) -> InterpResult<'tcx> {
match place.layout.fields {
FieldsShape::Array { .. } => {
// For the array layout, we know the iterator will yield sorted elements so
// we can avoid the allocation.
self.walk_aggregate(place, fields)
}
FieldsShape::Arbitrary { .. } => {
// Gather the subplaces and sort them before visiting.
let mut places = fields
.collect::<InterpResult<'tcx, Vec<MPlaceTy<'tcx, Provenance>>>>()?;
// we just compare offsets, the abs. value never matters
places.sort_by_key(|place| place.ptr.addr());
self.walk_aggregate(place, places.into_iter().map(Ok))
}
FieldsShape::Union { .. } | FieldsShape::Primitive => {
// Uh, what?
bug!("unions/primitives are not aggregates we should ever visit")
}
}
}
fn visit_union( fn visit_union(
&mut self, &mut self,
_v: &MPlaceTy<'tcx, Provenance>, _v: &MPlaceTy<'tcx, Provenance>,
@ -746,7 +730,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
Ok(mplace) Ok(mplace)
} }
fn deref_pointer_as( /// Deref' a pointer *without* checking that the place is dereferenceable.
fn deref_pointer_unchecked(
&self, &self,
val: &ImmTy<'tcx, Provenance>, val: &ImmTy<'tcx, Provenance>,
layout: TyAndLayout<'tcx>, layout: TyAndLayout<'tcx>,
@ -811,10 +796,10 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
tp: &MPlaceTy<'tcx, Provenance>, tp: &MPlaceTy<'tcx, Provenance>,
) -> InterpResult<'tcx, Option<Duration>> { ) -> InterpResult<'tcx, Option<Duration>> {
let this = self.eval_context_mut(); let this = self.eval_context_mut();
let seconds_place = this.mplace_field(tp, 0)?; let seconds_place = this.project_field(tp, 0)?;
let seconds_scalar = this.read_scalar(&seconds_place.into())?; let seconds_scalar = this.read_scalar(&seconds_place.into())?;
let seconds = seconds_scalar.to_target_isize(this)?; let seconds = seconds_scalar.to_target_isize(this)?;
let nanoseconds_place = this.mplace_field(tp, 1)?; let nanoseconds_place = this.project_field(tp, 1)?;
let nanoseconds_scalar = this.read_scalar(&nanoseconds_place.into())?; let nanoseconds_scalar = this.read_scalar(&nanoseconds_place.into())?;
let nanoseconds = nanoseconds_scalar.to_target_isize(this)?; let nanoseconds = nanoseconds_scalar.to_target_isize(this)?;

View file

@ -83,7 +83,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
// Write pointers into array // Write pointers into array
for (i, ptr) in ptrs.into_iter().enumerate() { for (i, ptr) in ptrs.into_iter().enumerate() {
let place = this.mplace_index(&alloc, i as u64)?; let place = this.project_index(&alloc, i as u64)?;
this.write_pointer(ptr, &place.into())?; this.write_pointer(ptr, &place.into())?;
} }
@ -196,33 +196,33 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
this.write_immediate( this.write_immediate(
name_alloc.to_ref(this), name_alloc.to_ref(this),
&this.mplace_field(&dest, 0)?.into(), &this.project_field(&dest, 0)?.into(),
)?; )?;
this.write_immediate( this.write_immediate(
filename_alloc.to_ref(this), filename_alloc.to_ref(this),
&this.mplace_field(&dest, 1)?.into(), &this.project_field(&dest, 1)?.into(),
)?; )?;
} }
1 => { 1 => {
this.write_scalar( this.write_scalar(
Scalar::from_target_usize(name.len().try_into().unwrap(), this), Scalar::from_target_usize(name.len().try_into().unwrap(), this),
&this.mplace_field(&dest, 0)?.into(), &this.project_field(&dest, 0)?.into(),
)?; )?;
this.write_scalar( this.write_scalar(
Scalar::from_target_usize(filename.len().try_into().unwrap(), this), Scalar::from_target_usize(filename.len().try_into().unwrap(), this),
&this.mplace_field(&dest, 1)?.into(), &this.project_field(&dest, 1)?.into(),
)?; )?;
} }
_ => throw_unsup_format!("unknown `miri_resolve_frame` flags {}", flags), _ => throw_unsup_format!("unknown `miri_resolve_frame` flags {}", flags),
} }
this.write_scalar(Scalar::from_u32(lineno), &this.mplace_field(&dest, 2)?.into())?; this.write_scalar(Scalar::from_u32(lineno), &this.project_field(&dest, 2)?.into())?;
this.write_scalar(Scalar::from_u32(colno), &this.mplace_field(&dest, 3)?.into())?; this.write_scalar(Scalar::from_u32(colno), &this.project_field(&dest, 3)?.into())?;
// Support a 4-field struct for now - this is deprecated // Support a 4-field struct for now - this is deprecated
// and slated for removal. // and slated for removal.
if num_fields == 5 { if num_fields == 5 {
this.write_pointer(fn_ptr, &this.mplace_field(&dest, 4)?.into())?; this.write_pointer(fn_ptr, &this.project_field(&dest, 4)?.into())?;
} }
Ok(()) Ok(())

View file

@ -456,7 +456,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
))?; ))?;
let vars_place = this.allocate(vars_layout, MiriMemoryKind::Runtime.into())?; let vars_place = this.allocate(vars_layout, MiriMemoryKind::Runtime.into())?;
for (idx, var) in vars.into_iter().enumerate() { for (idx, var) in vars.into_iter().enumerate() {
let place = this.mplace_field(&vars_place, idx)?; let place = this.project_field(&vars_place, idx)?;
this.write_pointer(var, &place.into())?; this.write_pointer(var, &place.into())?;
} }
this.write_pointer(vars_place.ptr, &this.machine.env_vars.environ.unwrap().into())?; this.write_pointer(vars_place.ptr, &this.machine.env_vars.environ.unwrap().into())?;

View file

@ -942,9 +942,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
#[allow(clippy::arithmetic_side_effects)] // it's a u128, we can shift by 64 #[allow(clippy::arithmetic_side_effects)] // it's a u128, we can shift by 64
let (c_out, sum) = ((wide_sum >> 64).truncate::<u8>(), wide_sum.truncate::<u64>()); let (c_out, sum) = ((wide_sum >> 64).truncate::<u8>(), wide_sum.truncate::<u64>());
let c_out_field = this.place_field(dest, 0)?; let c_out_field = this.project_field(dest, 0)?;
this.write_scalar(Scalar::from_u8(c_out), &c_out_field)?; this.write_scalar(Scalar::from_u8(c_out), &c_out_field)?;
let sum_field = this.place_field(dest, 1)?; let sum_field = this.project_field(dest, 1)?;
this.write_scalar(Scalar::from_u64(sum), &sum_field)?; this.write_scalar(Scalar::from_u64(sum), &sum_field)?;
} }
"llvm.x86.sse2.pause" "llvm.x86.sse2.pause"

View file

@ -57,8 +57,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
}; };
for i in 0..dest_len { for i in 0..dest_len {
let op = this.read_immediate(&this.mplace_index(&op, i)?.into())?; let op = this.read_immediate(&this.project_index(&op, i)?.into())?;
let dest = this.mplace_index(&dest, i)?; let dest = this.project_index(&dest, i)?;
let val = match which { let val = match which {
Op::MirOp(mir_op) => this.unary_op(mir_op, &op)?.to_scalar(), Op::MirOp(mir_op) => this.unary_op(mir_op, &op)?.to_scalar(),
Op::Abs => { Op::Abs => {
@ -172,9 +172,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
}; };
for i in 0..dest_len { for i in 0..dest_len {
let left = this.read_immediate(&this.mplace_index(&left, i)?.into())?; let left = this.read_immediate(&this.project_index(&left, i)?.into())?;
let right = this.read_immediate(&this.mplace_index(&right, i)?.into())?; let right = this.read_immediate(&this.project_index(&right, i)?.into())?;
let dest = this.mplace_index(&dest, i)?; let dest = this.project_index(&dest, i)?;
let val = match which { let val = match which {
Op::MirOp(mir_op) => { Op::MirOp(mir_op) => {
let (val, overflowed, ty) = this.overflowing_binary_op(mir_op, &left, &right)?; let (val, overflowed, ty) = this.overflowing_binary_op(mir_op, &left, &right)?;
@ -232,10 +232,10 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
assert_eq!(dest_len, c_len); assert_eq!(dest_len, c_len);
for i in 0..dest_len { for i in 0..dest_len {
let a = this.read_scalar(&this.mplace_index(&a, i)?.into())?; let a = this.read_scalar(&this.project_index(&a, i)?.into())?;
let b = this.read_scalar(&this.mplace_index(&b, i)?.into())?; let b = this.read_scalar(&this.project_index(&b, i)?.into())?;
let c = this.read_scalar(&this.mplace_index(&c, i)?.into())?; let c = this.read_scalar(&this.project_index(&c, i)?.into())?;
let dest = this.mplace_index(&dest, i)?; let dest = this.project_index(&dest, i)?;
// Works for f32 and f64. // Works for f32 and f64.
// FIXME: using host floats to work around https://github.com/rust-lang/miri/issues/2468. // FIXME: using host floats to work around https://github.com/rust-lang/miri/issues/2468.
@ -295,13 +295,13 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
}; };
// Initialize with first lane, then proceed with the rest. // Initialize with first lane, then proceed with the rest.
let mut res = this.read_immediate(&this.mplace_index(&op, 0)?.into())?; let mut res = this.read_immediate(&this.project_index(&op, 0)?.into())?;
if matches!(which, Op::MirOpBool(_)) { if matches!(which, Op::MirOpBool(_)) {
// Convert to `bool` scalar. // Convert to `bool` scalar.
res = imm_from_bool(simd_element_to_bool(res)?); res = imm_from_bool(simd_element_to_bool(res)?);
} }
for i in 1..op_len { for i in 1..op_len {
let op = this.read_immediate(&this.mplace_index(&op, i)?.into())?; let op = this.read_immediate(&this.project_index(&op, i)?.into())?;
res = match which { res = match which {
Op::MirOp(mir_op) => { Op::MirOp(mir_op) => {
this.binary_op(mir_op, &res, &op)? this.binary_op(mir_op, &res, &op)?
@ -355,7 +355,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
let mut res = init; let mut res = init;
for i in 0..op_len { for i in 0..op_len {
let op = this.read_immediate(&this.mplace_index(&op, i)?.into())?; let op = this.read_immediate(&this.project_index(&op, i)?.into())?;
res = this.binary_op(mir_op, &res, &op)?; res = this.binary_op(mir_op, &res, &op)?;
} }
this.write_immediate(*res, dest)?; this.write_immediate(*res, dest)?;
@ -372,10 +372,10 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
assert_eq!(dest_len, no_len); assert_eq!(dest_len, no_len);
for i in 0..dest_len { for i in 0..dest_len {
let mask = this.read_immediate(&this.mplace_index(&mask, i)?.into())?; let mask = this.read_immediate(&this.project_index(&mask, i)?.into())?;
let yes = this.read_immediate(&this.mplace_index(&yes, i)?.into())?; let yes = this.read_immediate(&this.project_index(&yes, i)?.into())?;
let no = this.read_immediate(&this.mplace_index(&no, i)?.into())?; let no = this.read_immediate(&this.project_index(&no, i)?.into())?;
let dest = this.mplace_index(&dest, i)?; let dest = this.project_index(&dest, i)?;
let val = if simd_element_to_bool(mask)? { yes } else { no }; let val = if simd_element_to_bool(mask)? { yes } else { no };
this.write_immediate(*val, &dest.into())?; this.write_immediate(*val, &dest.into())?;
@ -403,9 +403,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
& 1u64 & 1u64
.checked_shl(simd_bitmask_index(i, dest_len, this.data_layout().endian)) .checked_shl(simd_bitmask_index(i, dest_len, this.data_layout().endian))
.unwrap(); .unwrap();
let yes = this.read_immediate(&this.mplace_index(&yes, i.into())?.into())?; let yes = this.read_immediate(&this.project_index(&yes, i.into())?.into())?;
let no = this.read_immediate(&this.mplace_index(&no, i.into())?.into())?; let no = this.read_immediate(&this.project_index(&no, i.into())?.into())?;
let dest = this.mplace_index(&dest, i.into())?; let dest = this.project_index(&dest, i.into())?;
let val = if mask != 0 { yes } else { no }; let val = if mask != 0 { yes } else { no };
this.write_immediate(*val, &dest.into())?; this.write_immediate(*val, &dest.into())?;
@ -435,8 +435,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
let from_exposed_cast = intrinsic_name == "from_exposed_addr"; let from_exposed_cast = intrinsic_name == "from_exposed_addr";
for i in 0..dest_len { for i in 0..dest_len {
let op = this.read_immediate(&this.mplace_index(&op, i)?.into())?; let op = this.read_immediate(&this.project_index(&op, i)?.into())?;
let dest = this.mplace_index(&dest, i)?; let dest = this.project_index(&dest, i)?;
let val = match (op.layout.ty.kind(), dest.layout.ty.kind()) { let val = match (op.layout.ty.kind(), dest.layout.ty.kind()) {
// Int-to-(int|float): always safe // Int-to-(int|float): always safe
@ -496,17 +496,17 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
for i in 0..dest_len { for i in 0..dest_len {
let src_index: u64 = this let src_index: u64 = this
.read_immediate(&this.operand_index(index, i)?)? .read_immediate(&this.project_index(index, i)?)?
.to_scalar() .to_scalar()
.to_u32()? .to_u32()?
.into(); .into();
let dest = this.mplace_index(&dest, i)?; let dest = this.project_index(&dest, i)?;
let val = if src_index < left_len { let val = if src_index < left_len {
this.read_immediate(&this.mplace_index(&left, src_index)?.into())? this.read_immediate(&this.project_index(&left, src_index)?.into())?
} else if src_index < left_len.checked_add(right_len).unwrap() { } else if src_index < left_len.checked_add(right_len).unwrap() {
let right_idx = src_index.checked_sub(left_len).unwrap(); let right_idx = src_index.checked_sub(left_len).unwrap();
this.read_immediate(&this.mplace_index(&right, right_idx)?.into())? this.read_immediate(&this.project_index(&right, right_idx)?.into())?
} else { } else {
span_bug!( span_bug!(
this.cur_span(), this.cur_span(),
@ -528,10 +528,10 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
assert_eq!(dest_len, mask_len); assert_eq!(dest_len, mask_len);
for i in 0..dest_len { for i in 0..dest_len {
let passthru = this.read_immediate(&this.mplace_index(&passthru, i)?.into())?; let passthru = this.read_immediate(&this.project_index(&passthru, i)?.into())?;
let ptr = this.read_immediate(&this.mplace_index(&ptrs, i)?.into())?; let ptr = this.read_immediate(&this.project_index(&ptrs, i)?.into())?;
let mask = this.read_immediate(&this.mplace_index(&mask, i)?.into())?; let mask = this.read_immediate(&this.project_index(&mask, i)?.into())?;
let dest = this.mplace_index(&dest, i)?; let dest = this.project_index(&dest, i)?;
let val = if simd_element_to_bool(mask)? { let val = if simd_element_to_bool(mask)? {
let place = this.deref_operand(&ptr.into())?; let place = this.deref_operand(&ptr.into())?;
@ -552,9 +552,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
assert_eq!(ptrs_len, mask_len); assert_eq!(ptrs_len, mask_len);
for i in 0..ptrs_len { for i in 0..ptrs_len {
let value = this.read_immediate(&this.mplace_index(&value, i)?.into())?; let value = this.read_immediate(&this.project_index(&value, i)?.into())?;
let ptr = this.read_immediate(&this.mplace_index(&ptrs, i)?.into())?; let ptr = this.read_immediate(&this.project_index(&ptrs, i)?.into())?;
let mask = this.read_immediate(&this.mplace_index(&mask, i)?.into())?; let mask = this.read_immediate(&this.project_index(&mask, i)?.into())?;
if simd_element_to_bool(mask)? { if simd_element_to_bool(mask)? {
let place = this.deref_operand(&ptr.into())?; let place = this.deref_operand(&ptr.into())?;
@ -578,7 +578,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
let mut res = 0u64; let mut res = 0u64;
for i in 0..op_len { for i in 0..op_len {
let op = this.read_immediate(&this.mplace_index(&op, i.into())?.into())?; let op = this.read_immediate(&this.project_index(&op, i.into())?.into())?;
if simd_element_to_bool(op)? { if simd_element_to_bool(op)? {
res |= 1u64 res |= 1u64
.checked_shl(simd_bitmask_index(i, op_len, this.data_layout().endian)) .checked_shl(simd_bitmask_index(i, op_len, this.data_layout().endian))

View file

@ -593,7 +593,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
#[allow(deprecated)] #[allow(deprecated)]
let home_dir = std::env::home_dir().unwrap(); let home_dir = std::env::home_dir().unwrap();
let (written, _) = this.write_path_to_c_str(&home_dir, buf, buflen)?; let (written, _) = this.write_path_to_c_str(&home_dir, buf, buflen)?;
let pw_dir = this.mplace_field_named(&pwd, "pw_dir")?; let pw_dir = this.project_field_named(&pwd, "pw_dir")?;
this.write_pointer(buf, &pw_dir.into())?; this.write_pointer(buf, &pw_dir.into())?;
if written { if written {

View file

@ -1141,7 +1141,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
("tv_sec", access_sec.into()), ("tv_sec", access_sec.into()),
("tv_nsec", access_nsec.into()), ("tv_nsec", access_nsec.into()),
], ],
&this.mplace_field_named(&statxbuf, "stx_atime")?, &this.project_field_named(&statxbuf, "stx_atime")?,
)?; )?;
#[rustfmt::skip] #[rustfmt::skip]
this.write_int_fields_named( this.write_int_fields_named(
@ -1149,7 +1149,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
("tv_sec", created_sec.into()), ("tv_sec", created_sec.into()),
("tv_nsec", created_nsec.into()), ("tv_nsec", created_nsec.into()),
], ],
&this.mplace_field_named(&statxbuf, "stx_btime")?, &this.project_field_named(&statxbuf, "stx_btime")?,
)?; )?;
#[rustfmt::skip] #[rustfmt::skip]
this.write_int_fields_named( this.write_int_fields_named(
@ -1157,7 +1157,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
("tv_sec", 0.into()), ("tv_sec", 0.into()),
("tv_nsec", 0.into()), ("tv_nsec", 0.into()),
], ],
&this.mplace_field_named(&statxbuf, "stx_ctime")?, &this.project_field_named(&statxbuf, "stx_ctime")?,
)?; )?;
#[rustfmt::skip] #[rustfmt::skip]
this.write_int_fields_named( this.write_int_fields_named(
@ -1165,7 +1165,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
("tv_sec", modified_sec.into()), ("tv_sec", modified_sec.into()),
("tv_nsec", modified_nsec.into()), ("tv_nsec", modified_nsec.into()),
], ],
&this.mplace_field_named(&statxbuf, "stx_mtime")?, &this.project_field_named(&statxbuf, "stx_mtime")?,
)?; )?;
Ok(0) Ok(0)
@ -1421,7 +1421,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
// } // }
let entry_place = this.deref_operand_as(entry_op, this.libc_ty_layout("dirent"))?; let entry_place = this.deref_operand_as(entry_op, this.libc_ty_layout("dirent"))?;
let name_place = this.mplace_field(&entry_place, 5)?; let name_place = this.project_field(&entry_place, 5)?;
let file_name = dir_entry.file_name(); // not a Path as there are no separators! let file_name = dir_entry.file_name(); // not a Path as there are no separators!
let (name_fits, file_name_buf_len) = this.write_os_str_to_c_str( let (name_fits, file_name_buf_len) = this.write_os_str_to_c_str(

View file

@ -73,9 +73,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
if op == epoll_ctl_add || op == epoll_ctl_mod { if op == epoll_ctl_add || op == epoll_ctl_mod {
let event = this.deref_operand_as(event, this.libc_ty_layout("epoll_event"))?; let event = this.deref_operand_as(event, this.libc_ty_layout("epoll_event"))?;
let events = this.mplace_field(&event, 0)?; let events = this.project_field(&event, 0)?;
let events = this.read_scalar(&events.into())?.to_u32()?; let events = this.read_scalar(&events.into())?.to_u32()?;
let data = this.mplace_field(&event, 1)?; let data = this.project_field(&event, 1)?;
let data = this.read_scalar(&data.into())?; let data = this.read_scalar(&data.into())?;
let event = EpollEvent { events, data }; let event = EpollEvent { events, data };

View file

@ -85,7 +85,8 @@ pub fn futex<'tcx>(
return Ok(()); return Ok(());
} }
let timeout = this.deref_pointer_as( // `read_timespec` will check the place when it is not null.
let timeout = this.deref_pointer_unchecked(
&this.read_immediate(&args[3])?, &this.read_immediate(&args[3])?,
this.libc_ty_layout("timespec"), this.libc_ty_layout("timespec"),
)?; )?;

View file

@ -122,7 +122,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
// We have to put the result into io_status_block. // We have to put the result into io_status_block.
if let Some(n) = written { if let Some(n) = written {
let io_status_information = let io_status_information =
this.mplace_field_named(&io_status_block, "Information")?; this.project_field_named(&io_status_block, "Information")?;
this.write_scalar( this.write_scalar(
Scalar::from_target_usize(n.into(), this), Scalar::from_target_usize(n.into(), this),
&io_status_information.into(), &io_status_information.into(),

View file

@ -3,7 +3,7 @@
//! Tests for various intrinsics that do not fit anywhere else. //! Tests for various intrinsics that do not fit anywhere else.
use std::intrinsics; use std::intrinsics;
use std::mem::{size_of, size_of_val, size_of_val_raw}; use std::mem::{size_of, size_of_val, size_of_val_raw, discriminant};
struct Bomb; struct Bomb;
@ -39,4 +39,7 @@ fn main() {
let _v = intrinsics::discriminant_value(&0); let _v = intrinsics::discriminant_value(&0);
let _v = intrinsics::discriminant_value(&true); let _v = intrinsics::discriminant_value(&true);
let _v = intrinsics::discriminant_value(&vec![1, 2, 3]); let _v = intrinsics::discriminant_value(&vec![1, 2, 3]);
// Make sure that even if the discriminant is stored together with data, the intrinsic returns
// only the discriminant, nothing about the data.
assert_eq!(discriminant(&Some(false)), discriminant(&Some(true)));
} }

View file

@ -23,17 +23,17 @@ alloc1 (static: FOO, size: 8, align: 4) {
alloc19 (size: 48, align: 4) { alloc19 (size: 48, align: 4) {
0x00 00 00 00 00 __ __ __ __ alloc6 00 00 00 00 ........ 0x00 00 00 00 00 __ __ __ __ alloc6 00 00 00 00 ........
0x10 00 00 00 00 __ __ __ __ alloc9 02 00 00 00 ........ 0x10 00 00 00 00 __ __ __ __ alloc10 02 00 00 00 ........
0x20 01 00 00 00 2a 00 00 00 alloc14 03 00 00 00 ....*....... 0x20 01 00 00 00 2a 00 00 00 alloc15 03 00 00 00 ....*.......
} }
alloc6 (size: 0, align: 4) {} alloc6 (size: 0, align: 4) {}
alloc9 (size: 16, align: 4) { alloc10 (size: 16, align: 4) {
alloc10 03 00 00 00 alloc11 03 00 00 00 ........ alloc9 03 00 00 00 alloc11 03 00 00 00 ........
} }
alloc10 (size: 3, align: 1) { alloc9 (size: 3, align: 1) {
66 6f 6f foo 66 6f 6f foo
} }
@ -41,12 +41,12 @@ alloc11 (size: 3, align: 1) {
62 61 72 bar 62 61 72 bar
} }
alloc14 (size: 24, align: 4) { alloc15 (size: 24, align: 4) {
0x00 alloc15 03 00 00 00 alloc16 03 00 00 00 ........ 0x00 alloc14 03 00 00 00 alloc16 03 00 00 00 ........
0x10 alloc17 04 00 00 00 .... 0x10 alloc17 04 00 00 00 ....
} }
alloc15 (size: 3, align: 1) { alloc14 (size: 3, align: 1) {
6d 65 68 meh 6d 65 68 meh
} }

View file

@ -24,19 +24,19 @@ alloc1 (static: FOO, size: 16, align: 8) {
alloc19 (size: 72, align: 8) { alloc19 (size: 72, align: 8) {
0x00 00 00 00 00 __ __ __ __ alloc6 .... 0x00 00 00 00 00 __ __ __ __ alloc6 ....
0x10 00 00 00 00 00 00 00 00 00 00 00 00 __ __ __ __ ............ 0x10 00 00 00 00 00 00 00 00 00 00 00 00 __ __ __ __ ............
0x20 alloc9 02 00 00 00 00 00 00 00 ........ 0x20 alloc10 02 00 00 00 00 00 00 00 ........
0x30 01 00 00 00 2a 00 00 00 alloc14 ....*... 0x30 01 00 00 00 2a 00 00 00 alloc15 ....*...
0x40 03 00 00 00 00 00 00 00 ........ 0x40 03 00 00 00 00 00 00 00 ........
} }
alloc6 (size: 0, align: 8) {} alloc6 (size: 0, align: 8) {}
alloc9 (size: 32, align: 8) { alloc10 (size: 32, align: 8) {
0x00 alloc10 03 00 00 00 00 00 00 00 ........ 0x00 alloc9 03 00 00 00 00 00 00 00 ........
0x10 alloc11 03 00 00 00 00 00 00 00 ........ 0x10 alloc11 03 00 00 00 00 00 00 00 ........
} }
alloc10 (size: 3, align: 1) { alloc9 (size: 3, align: 1) {
66 6f 6f foo 66 6f 6f foo
} }
@ -44,13 +44,13 @@ alloc11 (size: 3, align: 1) {
62 61 72 bar 62 61 72 bar
} }
alloc14 (size: 48, align: 8) { alloc15 (size: 48, align: 8) {
0x00 alloc15 03 00 00 00 00 00 00 00 ........ 0x00 alloc14 03 00 00 00 00 00 00 00 ........
0x10 alloc16 03 00 00 00 00 00 00 00 ........ 0x10 alloc16 03 00 00 00 00 00 00 00 ........
0x20 alloc17 04 00 00 00 00 00 00 00 ........ 0x20 alloc17 04 00 00 00 00 00 00 00 ........
} }
alloc15 (size: 3, align: 1) { alloc14 (size: 3, align: 1) {
6d 65 68 meh 6d 65 68 meh
} }

View file

@ -24,7 +24,7 @@ error[E0080]: it is undefined behavior to use this value
--> $DIR/raw-bytes.rs:42:1 --> $DIR/raw-bytes.rs:42:1
| |
LL | const BAD_UNINHABITED_VARIANT1: UninhDiscriminant = unsafe { mem::transmute(1u8) }; LL | const BAD_UNINHABITED_VARIANT1: UninhDiscriminant = unsafe { mem::transmute(1u8) };
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ constructing invalid value at .<enum-variant(B)>.0: encountered a value of the never type `!` | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ constructing invalid value at .<enum-tag>: encountered an uninhabited enum variant
| |
= note: The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rustc repository if you believe it should not be considered undefined behavior. = note: The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rustc repository if you believe it should not be considered undefined behavior.
= note: the raw bytes of the constant (size: 1, align: 1) { = note: the raw bytes of the constant (size: 1, align: 1) {
@ -35,7 +35,7 @@ error[E0080]: it is undefined behavior to use this value
--> $DIR/raw-bytes.rs:44:1 --> $DIR/raw-bytes.rs:44:1
| |
LL | const BAD_UNINHABITED_VARIANT2: UninhDiscriminant = unsafe { mem::transmute(3u8) }; LL | const BAD_UNINHABITED_VARIANT2: UninhDiscriminant = unsafe { mem::transmute(3u8) };
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ constructing invalid value at .<enum-variant(D)>.0: encountered a value of uninhabited type `Never` | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ constructing invalid value at .<enum-tag>: encountered an uninhabited enum variant
| |
= note: The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rustc repository if you believe it should not be considered undefined behavior. = note: The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rustc repository if you believe it should not be considered undefined behavior.
= note: the raw bytes of the constant (size: 1, align: 1) { = note: the raw bytes of the constant (size: 1, align: 1) {

View file

@ -75,7 +75,7 @@ error[E0080]: it is undefined behavior to use this value
--> $DIR/ub-enum.rs:81:1 --> $DIR/ub-enum.rs:81:1
| |
LL | const BAD_UNINHABITED_VARIANT1: UninhDiscriminant = unsafe { mem::transmute(1u8) }; LL | const BAD_UNINHABITED_VARIANT1: UninhDiscriminant = unsafe { mem::transmute(1u8) };
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ constructing invalid value at .<enum-variant(B)>.0: encountered a value of the never type `!` | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ constructing invalid value at .<enum-tag>: encountered an uninhabited enum variant
| |
= note: The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rustc repository if you believe it should not be considered undefined behavior. = note: The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rustc repository if you believe it should not be considered undefined behavior.
= note: the raw bytes of the constant (size: $SIZE, align: $ALIGN) { = note: the raw bytes of the constant (size: $SIZE, align: $ALIGN) {
@ -86,7 +86,7 @@ error[E0080]: it is undefined behavior to use this value
--> $DIR/ub-enum.rs:83:1 --> $DIR/ub-enum.rs:83:1
| |
LL | const BAD_UNINHABITED_VARIANT2: UninhDiscriminant = unsafe { mem::transmute(3u8) }; LL | const BAD_UNINHABITED_VARIANT2: UninhDiscriminant = unsafe { mem::transmute(3u8) };
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ constructing invalid value at .<enum-variant(D)>.0: encountered a value of uninhabited type `Never` | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ constructing invalid value at .<enum-tag>: encountered an uninhabited enum variant
| |
= note: The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rustc repository if you believe it should not be considered undefined behavior. = note: The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rustc repository if you believe it should not be considered undefined behavior.
= note: the raw bytes of the constant (size: $SIZE, align: $ALIGN) { = note: the raw bytes of the constant (size: $SIZE, align: $ALIGN) {
@ -108,14 +108,27 @@ error[E0080]: evaluation of constant value failed
--> $DIR/ub-enum.rs:96:77 --> $DIR/ub-enum.rs:96:77
| |
LL | const BAD_UNINHABITED_WITH_DATA1: Result<(i32, Never), (i32, !)> = unsafe { mem::transmute(0u64) }; LL | const BAD_UNINHABITED_WITH_DATA1: Result<(i32, Never), (i32, !)> = unsafe { mem::transmute(0u64) };
| ^^^^^^^^^^^^^^^^^^^^ constructing invalid value at .<enum-variant(Ok)>.0.1: encountered a value of uninhabited type `Never` | ^^^^^^^^^^^^^^^^^^^^ constructing invalid value at .<enum-tag>: encountered an uninhabited enum variant
error[E0080]: evaluation of constant value failed error[E0080]: evaluation of constant value failed
--> $DIR/ub-enum.rs:98:77 --> $DIR/ub-enum.rs:98:77
| |
LL | const BAD_UNINHABITED_WITH_DATA2: Result<(i32, !), (i32, Never)> = unsafe { mem::transmute(0u64) }; LL | const BAD_UNINHABITED_WITH_DATA2: Result<(i32, !), (i32, Never)> = unsafe { mem::transmute(0u64) };
| ^^^^^^^^^^^^^^^^^^^^ constructing invalid value at .<enum-variant(Ok)>.0.1: encountered a value of the never type `!` | ^^^^^^^^^^^^^^^^^^^^ constructing invalid value at .<enum-tag>: encountered an uninhabited enum variant
error: aborting due to 13 previous errors error[E0080]: evaluation of constant value failed
--> $SRC_DIR/core/src/mem/mod.rs:LL:COL
|
= note: read discriminant of an uninhabited enum variant
|
note: inside `discriminant::<Never>`
--> $SRC_DIR/core/src/mem/mod.rs:LL:COL
note: inside `TEST_ICE_89765`
--> $DIR/ub-enum.rs:103:14
|
LL | unsafe { std::mem::discriminant(&*(&() as *const () as *const Never)); };
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
error: aborting due to 14 previous errors
For more information about this error, try `rustc --explain E0080`. For more information about this error, try `rustc --explain E0080`.

View file

@ -2,7 +2,7 @@
// Strip out raw byte dumps to make comparison platform-independent: // Strip out raw byte dumps to make comparison platform-independent:
// normalize-stderr-test "(the raw bytes of the constant) \(size: [0-9]*, align: [0-9]*\)" -> "$1 (size: $$SIZE, align: $$ALIGN)" // normalize-stderr-test "(the raw bytes of the constant) \(size: [0-9]*, align: [0-9]*\)" -> "$1 (size: $$SIZE, align: $$ALIGN)"
// normalize-stderr-test "([0-9a-f][0-9a-f] |╾─*a(lloc)?[0-9]+(\+[a-z0-9]+)?─*╼ )+ *│.*" -> "HEX_DUMP" // normalize-stderr-test "([0-9a-f][0-9a-f] |╾─*a(lloc)?[0-9]+(\+[a-z0-9]+)?─*╼ )+ *│.*" -> "HEX_DUMP"
#![feature(never_type)] #![feature(never_type, const_discriminant)]
#![allow(invalid_value)] #![allow(invalid_value)]
use std::mem; use std::mem;
@ -66,8 +66,8 @@ const BAD_ENUM2_OPTION_PTR: Option<Enum2> = unsafe { mem::transmute(&0) };
// # valid discriminant for uninhabited variant // # valid discriminant for uninhabited variant
// An enum with 3 variants of which some are uninhabited -- so the uninhabited variants *do* // An enum with uninhabited variants but also at least 2 inhabited variants -- so the uninhabited
// have a discriminant. // variants *do* have a discriminant.
enum UninhDiscriminant { enum UninhDiscriminant {
A, A,
B(!), B(!),
@ -98,5 +98,11 @@ const BAD_UNINHABITED_WITH_DATA1: Result<(i32, Never), (i32, !)> = unsafe { mem:
const BAD_UNINHABITED_WITH_DATA2: Result<(i32, !), (i32, Never)> = unsafe { mem::transmute(0u64) }; const BAD_UNINHABITED_WITH_DATA2: Result<(i32, !), (i32, Never)> = unsafe { mem::transmute(0u64) };
//~^ ERROR evaluation of constant value failed //~^ ERROR evaluation of constant value failed
const TEST_ICE_89765: () = {
// This is a regression test for https://github.com/rust-lang/rust/issues/89765.
unsafe { std::mem::discriminant(&*(&() as *const () as *const Never)); };
//~^ inside `TEST_ICE_89765`
};
fn main() { fn main() {
} }

View file

@ -24,13 +24,6 @@ enum SingleVariant {
const TEST_V: Discriminant<SingleVariant> = discriminant(&SingleVariant::V); const TEST_V: Discriminant<SingleVariant> = discriminant(&SingleVariant::V);
pub const TEST_VOID: () = {
// This is UB, but CTFE does not check validity so it does not detect this.
// This is a regression test for https://github.com/rust-lang/rust/issues/89765.
unsafe { std::mem::discriminant(&*(&() as *const () as *const Void)); };
};
fn main() { fn main() {
assert_eq!(TEST_A, TEST_A_OTHER); assert_eq!(TEST_A, TEST_A_OTHER);
assert_eq!(TEST_A, discriminant(black_box(&Test::A(17)))); assert_eq!(TEST_A, discriminant(black_box(&Test::A(17))));

View file

@ -1,8 +1,26 @@
// revisions: no_flag with_flag // revisions: no_flag with_flag
// [no_flag] check-pass // [no_flag] check-pass
// [with_flag] compile-flags: -Zextra-const-ub-checks // [with_flag] compile-flags: -Zextra-const-ub-checks
#![feature(never_type)]
use std::mem::transmute; use std::mem::transmute;
use std::ptr::addr_of;
#[derive(Clone, Copy)]
enum E { A, B }
#[derive(Clone, Copy)]
enum Never {}
// An enum with uninhabited variants but also at least 2 inhabited variants -- so the uninhabited
// variants *do* have a discriminant.
#[derive(Clone, Copy)]
enum UninhDiscriminant {
A,
B(!),
C,
D(Never),
}
const INVALID_BOOL: () = unsafe { const INVALID_BOOL: () = unsafe {
let _x: bool = transmute(3u8); let _x: bool = transmute(3u8);
@ -27,4 +45,15 @@ const UNALIGNED_PTR: () = unsafe {
//[with_flag]~| invalid value //[with_flag]~| invalid value
}; };
const UNINHABITED_VARIANT: () = unsafe {
let data = [1u8];
// Not using transmute, we want to hit the ImmTy code path.
let v = *addr_of!(data).cast::<UninhDiscriminant>();
//[with_flag]~^ ERROR: evaluation of constant value failed
};
// Regression tests for an ICE (related to <https://github.com/rust-lang/rust/issues/113988>).
const VALID_ENUM1: E = { let e = E::A; e };
const VALID_ENUM2: Result<&'static [u8], ()> = { let e = Err(()); e };
fn main() {} fn main() {}

View file

@ -1,11 +1,11 @@
error[E0080]: evaluation of constant value failed error[E0080]: evaluation of constant value failed
--> $DIR/detect-extra-ub.rs:8:20 --> $DIR/detect-extra-ub.rs:26:20
| |
LL | let _x: bool = transmute(3u8); LL | let _x: bool = transmute(3u8);
| ^^^^^^^^^^^^^^ constructing invalid value: encountered 0x03, but expected a boolean | ^^^^^^^^^^^^^^ constructing invalid value: encountered 0x03, but expected a boolean
error[E0080]: evaluation of constant value failed error[E0080]: evaluation of constant value failed
--> $DIR/detect-extra-ub.rs:14:21 --> $DIR/detect-extra-ub.rs:32:21
| |
LL | let _x: usize = transmute(&3u8); LL | let _x: usize = transmute(&3u8);
| ^^^^^^^^^^^^^^^ unable to turn pointer into raw bytes | ^^^^^^^^^^^^^^^ unable to turn pointer into raw bytes
@ -14,7 +14,7 @@ LL | let _x: usize = transmute(&3u8);
= help: the absolute address of a pointer is not known at compile-time, so such operations are not supported = help: the absolute address of a pointer is not known at compile-time, so such operations are not supported
error[E0080]: evaluation of constant value failed error[E0080]: evaluation of constant value failed
--> $DIR/detect-extra-ub.rs:20:30 --> $DIR/detect-extra-ub.rs:38:30
| |
LL | let _x: (usize, usize) = transmute(x); LL | let _x: (usize, usize) = transmute(x);
| ^^^^^^^^^^^^ unable to turn pointer into raw bytes | ^^^^^^^^^^^^ unable to turn pointer into raw bytes
@ -23,11 +23,17 @@ LL | let _x: (usize, usize) = transmute(x);
= help: the absolute address of a pointer is not known at compile-time, so such operations are not supported = help: the absolute address of a pointer is not known at compile-time, so such operations are not supported
error[E0080]: evaluation of constant value failed error[E0080]: evaluation of constant value failed
--> $DIR/detect-extra-ub.rs:25:20 --> $DIR/detect-extra-ub.rs:43:20
| |
LL | let _x: &u32 = transmute(&[0u8; 4]); LL | let _x: &u32 = transmute(&[0u8; 4]);
| ^^^^^^^^^^^^^^^^^^^^ constructing invalid value: encountered an unaligned reference (required 4 byte alignment but found 1) | ^^^^^^^^^^^^^^^^^^^^ constructing invalid value: encountered an unaligned reference (required 4 byte alignment but found 1)
error: aborting due to 4 previous errors error[E0080]: evaluation of constant value failed
--> $DIR/detect-extra-ub.rs:51:13
|
LL | let v = *addr_of!(data).cast::<UninhDiscriminant>();
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ constructing invalid value at .<enum-tag>: encountered an uninhabited enum variant
error: aborting due to 5 previous errors
For more information about this error, try `rustc --explain E0080`. For more information about this error, try `rustc --explain E0080`.