2019-09-06 03:57:44 +01:00
|
|
|
//! An interpreter for MIR used in CTFE and by miri.
|
2017-07-21 17:25:30 +02:00
|
|
|
|
2017-08-02 16:59:01 +02:00
|
|
|
#[macro_export]
|
2019-08-01 09:49:01 +05:30
|
|
|
macro_rules! err_unsup {
|
2019-07-29 13:06:42 +05:30
|
|
|
($($tt:tt)*) => {
|
2019-08-01 09:49:01 +05:30
|
|
|
$crate::mir::interpret::InterpError::Unsupported(
|
|
|
|
$crate::mir::interpret::UnsupportedOpInfo::$($tt)*
|
|
|
|
)
|
2019-07-29 13:06:42 +05:30
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2019-08-03 20:35:52 +02:00
|
|
|
#[macro_export]
|
|
|
|
macro_rules! err_unsup_format {
|
|
|
|
($($tt:tt)*) => { err_unsup!(Unsupported(format!($($tt)*))) };
|
|
|
|
}
|
|
|
|
|
2019-07-30 15:25:12 +05:30
|
|
|
#[macro_export]
|
|
|
|
macro_rules! err_inval {
|
|
|
|
($($tt:tt)*) => {
|
|
|
|
$crate::mir::interpret::InterpError::InvalidProgram(
|
|
|
|
$crate::mir::interpret::InvalidProgramInfo::$($tt)*
|
|
|
|
)
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
#[macro_export]
|
2019-08-01 09:49:01 +05:30
|
|
|
macro_rules! err_ub {
|
2019-07-30 15:25:12 +05:30
|
|
|
($($tt:tt)*) => {
|
2019-08-02 23:24:27 +02:00
|
|
|
$crate::mir::interpret::InterpError::UndefinedBehavior(
|
|
|
|
$crate::mir::interpret::UndefinedBehaviorInfo::$($tt)*
|
2019-08-01 09:49:01 +05:30
|
|
|
)
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2019-08-03 20:35:52 +02:00
|
|
|
#[macro_export]
|
|
|
|
macro_rules! err_ub_format {
|
|
|
|
($($tt:tt)*) => { err_ub!(Ub(format!($($tt)*))) };
|
|
|
|
}
|
|
|
|
|
2019-07-31 12:48:54 +05:30
|
|
|
#[macro_export]
|
|
|
|
macro_rules! err_exhaust {
|
|
|
|
($($tt:tt)*) => {
|
|
|
|
$crate::mir::interpret::InterpError::ResourceExhaustion(
|
|
|
|
$crate::mir::interpret::ResourceExhaustionInfo::$($tt)*
|
|
|
|
)
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2020-03-23 08:48:03 +01:00
|
|
|
#[macro_export]
|
|
|
|
macro_rules! err_machine_stop {
|
|
|
|
($($tt:tt)*) => {
|
|
|
|
$crate::mir::interpret::InterpError::MachineStop(Box::new($($tt)*))
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2020-03-19 09:07:43 +01:00
|
|
|
// In the `throw_*` macros, avoid `return` to make them work with `try {}`.
|
2019-07-31 12:48:54 +05:30
|
|
|
#[macro_export]
|
2019-08-01 09:49:01 +05:30
|
|
|
macro_rules! throw_unsup {
|
2020-03-20 23:28:01 +01:00
|
|
|
($($tt:tt)*) => { Err::<!, _>(err_unsup!($($tt)*))? };
|
2019-08-01 09:49:01 +05:30
|
|
|
}
|
|
|
|
|
2019-08-02 23:41:24 +02:00
|
|
|
#[macro_export]
|
|
|
|
macro_rules! throw_unsup_format {
|
|
|
|
($($tt:tt)*) => { throw_unsup!(Unsupported(format!($($tt)*))) };
|
|
|
|
}
|
|
|
|
|
2019-08-01 09:49:01 +05:30
|
|
|
#[macro_export]
|
|
|
|
macro_rules! throw_inval {
|
2020-03-20 23:28:01 +01:00
|
|
|
($($tt:tt)*) => { Err::<!, _>(err_inval!($($tt)*))? };
|
2019-08-01 09:49:01 +05:30
|
|
|
}
|
|
|
|
|
|
|
|
#[macro_export]
|
|
|
|
macro_rules! throw_ub {
|
2020-03-20 23:28:01 +01:00
|
|
|
($($tt:tt)*) => { Err::<!, _>(err_ub!($($tt)*))? };
|
2019-07-31 12:48:54 +05:30
|
|
|
}
|
|
|
|
|
2019-08-02 23:41:24 +02:00
|
|
|
#[macro_export]
|
|
|
|
macro_rules! throw_ub_format {
|
|
|
|
($($tt:tt)*) => { throw_ub!(Ub(format!($($tt)*))) };
|
|
|
|
}
|
|
|
|
|
2019-08-01 09:49:01 +05:30
|
|
|
#[macro_export]
|
|
|
|
macro_rules! throw_exhaust {
|
2020-03-20 23:28:01 +01:00
|
|
|
($($tt:tt)*) => { Err::<!, _>(err_exhaust!($($tt)*))? };
|
2019-08-01 09:49:01 +05:30
|
|
|
}
|
|
|
|
|
2019-12-02 10:59:06 +01:00
|
|
|
#[macro_export]
|
|
|
|
macro_rules! throw_machine_stop {
|
2020-03-23 08:48:03 +01:00
|
|
|
($($tt:tt)*) => { Err::<!, _>(err_machine_stop!($($tt)*))? };
|
2019-12-02 10:59:06 +01:00
|
|
|
}
|
|
|
|
|
2018-10-23 18:05:32 +02:00
|
|
|
mod allocation;
|
2019-12-22 17:42:04 -05:00
|
|
|
mod error;
|
2018-10-25 18:23:09 +02:00
|
|
|
mod pointer;
|
2019-11-30 08:42:56 +13:00
|
|
|
mod queries;
|
2019-12-22 17:42:04 -05:00
|
|
|
mod value;
|
2016-12-07 20:30:37 -08:00
|
|
|
|
2020-03-21 13:49:02 +01:00
|
|
|
use std::convert::TryFrom;
|
|
|
|
use std::fmt;
|
|
|
|
use std::io;
|
2020-08-20 02:37:00 -07:00
|
|
|
use std::io::{Read, Write};
|
2020-03-21 13:49:02 +01:00
|
|
|
use std::num::NonZeroU32;
|
|
|
|
use std::sync::atomic::{AtomicU32, Ordering};
|
|
|
|
|
2020-04-27 23:26:11 +05:30
|
|
|
use rustc_ast::LitKind;
|
2020-03-21 13:49:02 +01:00
|
|
|
use rustc_data_structures::fx::FxHashMap;
|
|
|
|
use rustc_data_structures::sync::{HashMapExt, Lock};
|
|
|
|
use rustc_data_structures::tiny_list::TinyList;
|
|
|
|
use rustc_hir::def_id::DefId;
|
|
|
|
use rustc_macros::HashStable;
|
2020-09-02 10:40:56 +03:00
|
|
|
use rustc_middle::ty::print::with_no_trimmed_paths;
|
2020-06-11 15:49:57 +01:00
|
|
|
use rustc_serialize::{Decodable, Encodable};
|
2020-03-31 18:16:47 +02:00
|
|
|
use rustc_target::abi::{Endian, Size};
|
2020-03-21 13:49:02 +01:00
|
|
|
|
|
|
|
use crate::mir;
|
2020-06-11 15:49:57 +01:00
|
|
|
use crate::ty::codec::{TyDecoder, TyEncoder};
|
2020-03-21 13:49:02 +01:00
|
|
|
use crate::ty::subst::GenericArgKind;
|
|
|
|
use crate::ty::{self, Instance, Ty, TyCtxt};
|
|
|
|
|
2018-06-25 18:46:02 +02:00
|
|
|
pub use self::error::{
|
2020-08-20 18:55:07 +02:00
|
|
|
struct_error, CheckInAllocMsg, ErrorHandled, EvalToAllocationRawResult, EvalToConstValueResult,
|
|
|
|
InterpError, InterpErrorInfo, InterpResult, InvalidProgramInfo, MachineStopType,
|
|
|
|
ResourceExhaustionInfo, UndefinedBehaviorInfo, UninitBytesAccess, UnsupportedOpInfo,
|
2018-06-25 18:46:02 +02:00
|
|
|
};
|
2016-06-10 13:01:51 +02:00
|
|
|
|
2020-04-22 03:20:40 -04:00
|
|
|
pub use self::value::{get_slice_bytes, ConstValue, RawConst, Scalar, ScalarMaybeUninit};
|
2016-06-10 13:01:51 +02:00
|
|
|
|
2020-04-22 03:20:40 -04:00
|
|
|
pub use self::allocation::{Allocation, AllocationExtra, InitMask, Relocations};
|
2018-10-23 18:05:32 +02:00
|
|
|
|
2020-04-30 20:37:58 +02:00
|
|
|
pub use self::pointer::{Pointer, PointerArithmetic};
|
2018-10-25 18:23:09 +02:00
|
|
|
|
2019-11-11 14:32:36 +00:00
|
|
|
/// Uniquely identifies one of the following:
|
|
|
|
/// - A constant
|
|
|
|
/// - A static
|
|
|
|
/// - A const fn where all arguments (if any) are zero-sized types
|
2020-06-11 15:49:57 +01:00
|
|
|
#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash, TyEncodable, TyDecodable)]
|
2019-11-15 18:19:52 +01:00
|
|
|
#[derive(HashStable, Lift)]
|
2017-12-12 17:14:49 +01:00
|
|
|
pub struct GlobalId<'tcx> {
|
|
|
|
/// For a constant or static, the `Instance` of the item itself.
|
|
|
|
/// For a promoted global, the `Instance` of the function they belong to.
|
|
|
|
pub instance: ty::Instance<'tcx>,
|
|
|
|
|
2019-05-29 00:26:56 +03:00
|
|
|
/// The index for promoted globals within their function's `mir::Body`.
|
2017-12-12 17:14:49 +01:00
|
|
|
pub promoted: Option<mir::Promoted>,
|
|
|
|
}
|
|
|
|
|
2020-08-19 12:05:32 +02:00
|
|
|
impl GlobalId<'tcx> {
|
|
|
|
pub fn display(self, tcx: TyCtxt<'tcx>) -> String {
|
2020-09-02 10:40:56 +03:00
|
|
|
let instance_name = with_no_trimmed_paths(|| tcx.def_path_str(self.instance.def.def_id()));
|
2020-08-19 12:05:32 +02:00
|
|
|
if let Some(promoted) = self.promoted {
|
|
|
|
format!("{}::{:?}", instance_name, promoted)
|
|
|
|
} else {
|
|
|
|
instance_name
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-11 20:57:38 +13:00
|
|
|
/// Input argument for `tcx.lit_to_const`.
|
2020-01-11 15:22:36 +13:00
|
|
|
#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash, HashStable)]
|
|
|
|
pub struct LitToConstInput<'tcx> {
|
2020-01-11 20:57:38 +13:00
|
|
|
/// The absolute value of the resultant constant.
|
2020-01-11 15:22:36 +13:00
|
|
|
pub lit: &'tcx LitKind,
|
2020-01-11 20:57:38 +13:00
|
|
|
/// The type of the constant.
|
2020-01-11 15:22:36 +13:00
|
|
|
pub ty: Ty<'tcx>,
|
2020-01-11 20:57:38 +13:00
|
|
|
/// If the constant is negative.
|
2020-01-11 15:22:36 +13:00
|
|
|
pub neg: bool,
|
|
|
|
}
|
|
|
|
|
2020-01-11 20:57:38 +13:00
|
|
|
/// Error type for `tcx.lit_to_const`.
|
2020-01-11 15:22:36 +13:00
|
|
|
#[derive(Copy, Clone, Debug, Eq, PartialEq, HashStable)]
|
|
|
|
pub enum LitToConstError {
|
2020-02-20 23:23:22 +01:00
|
|
|
/// The literal's inferred type did not match the expected `ty` in the input.
|
|
|
|
/// This is used for graceful error handling (`delay_span_bug`) in
|
2020-03-23 22:24:31 +01:00
|
|
|
/// type checking (`Const::from_anon_const`).
|
2020-02-20 23:23:22 +01:00
|
|
|
TypeError,
|
2020-01-11 15:22:36 +13:00
|
|
|
UnparseableFloat,
|
|
|
|
Reported,
|
|
|
|
}
|
|
|
|
|
2019-12-23 17:41:06 +01:00
|
|
|
#[derive(Copy, Clone, Eq, Hash, Ord, PartialEq, PartialOrd)]
|
2017-12-12 17:14:49 +01:00
|
|
|
pub struct AllocId(pub u64);
|
|
|
|
|
2020-04-26 18:59:20 +02:00
|
|
|
// We want the `Debug` output to be readable as it is used by `derive(Debug)` for
|
|
|
|
// all the Miri types.
|
|
|
|
impl fmt::Debug for AllocId {
|
2020-04-26 18:14:16 +02:00
|
|
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
2020-04-26 19:33:16 +02:00
|
|
|
if f.alternate() { write!(f, "a{}", self.0) } else { write!(f, "alloc{}", self.0) }
|
2020-03-08 18:52:30 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-26 18:59:20 +02:00
|
|
|
impl fmt::Display for AllocId {
|
|
|
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
|
|
|
fmt::Debug::fmt(self, f)
|
2019-12-23 17:41:06 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-11 15:49:57 +01:00
|
|
|
#[derive(TyDecodable, TyEncodable)]
|
2018-12-03 16:30:43 +01:00
|
|
|
enum AllocDiscriminant {
|
2018-04-10 09:58:46 +02:00
|
|
|
Alloc,
|
|
|
|
Fn,
|
2018-04-10 16:25:10 +02:00
|
|
|
Static,
|
2018-04-10 09:58:46 +02:00
|
|
|
}
|
2018-03-16 09:59:42 +01:00
|
|
|
|
2020-06-11 15:49:57 +01:00
|
|
|
pub fn specialized_encode_alloc_id<'tcx, E: TyEncoder<'tcx>>(
|
2018-03-16 09:59:42 +01:00
|
|
|
encoder: &mut E,
|
2019-06-14 00:48:52 +03:00
|
|
|
tcx: TyCtxt<'tcx>,
|
2018-03-16 09:59:42 +01:00
|
|
|
alloc_id: AllocId,
|
|
|
|
) -> Result<(), E::Error> {
|
2020-05-08 10:58:53 +02:00
|
|
|
match tcx.global_alloc(alloc_id) {
|
2019-05-30 13:05:05 +02:00
|
|
|
GlobalAlloc::Memory(alloc) => {
|
2018-05-02 06:03:06 +02:00
|
|
|
trace!("encoding {:?} with {:#?}", alloc_id, alloc);
|
2018-12-03 16:30:43 +01:00
|
|
|
AllocDiscriminant::Alloc.encode(encoder)?;
|
2018-05-02 06:03:06 +02:00
|
|
|
alloc.encode(encoder)?;
|
|
|
|
}
|
2019-05-30 13:05:05 +02:00
|
|
|
GlobalAlloc::Function(fn_instance) => {
|
2018-05-02 06:03:06 +02:00
|
|
|
trace!("encoding {:?} with {:#?}", alloc_id, fn_instance);
|
2018-12-03 16:30:43 +01:00
|
|
|
AllocDiscriminant::Fn.encode(encoder)?;
|
2018-05-02 06:03:06 +02:00
|
|
|
fn_instance.encode(encoder)?;
|
|
|
|
}
|
2019-05-30 13:05:05 +02:00
|
|
|
GlobalAlloc::Static(did) => {
|
2020-05-02 21:44:25 +02:00
|
|
|
assert!(!tcx.is_thread_local_static(did));
|
2019-09-06 03:57:44 +01:00
|
|
|
// References to statics doesn't need to know about their allocations,
|
|
|
|
// just about its `DefId`.
|
2018-12-03 16:30:43 +01:00
|
|
|
AllocDiscriminant::Static.encode(encoder)?;
|
2018-05-02 06:03:06 +02:00
|
|
|
did.encode(encoder)?;
|
|
|
|
}
|
2018-03-16 09:59:42 +01:00
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2018-05-25 17:19:31 +02:00
|
|
|
// Used to avoid infinite recursion when decoding cyclic allocations.
|
|
|
|
type DecodingSessionId = NonZeroU32;
|
|
|
|
|
|
|
|
#[derive(Clone)]
|
|
|
|
enum State {
|
|
|
|
Empty,
|
|
|
|
InProgressNonAlloc(TinyList<DecodingSessionId>),
|
|
|
|
InProgress(TinyList<DecodingSessionId>, AllocId),
|
|
|
|
Done(AllocId),
|
|
|
|
}
|
|
|
|
|
|
|
|
pub struct AllocDecodingState {
|
2019-09-06 03:57:44 +01:00
|
|
|
// For each `AllocId`, we keep track of which decoding state it's currently in.
|
|
|
|
decoding_state: Vec<Lock<State>>,
|
2018-05-25 17:19:31 +02:00
|
|
|
// The offsets of each allocation in the data stream.
|
|
|
|
data_offsets: Vec<u32>,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl AllocDecodingState {
|
2018-08-29 22:02:42 -07:00
|
|
|
pub fn new_decoding_session(&self) -> AllocDecodingSession<'_> {
|
2018-05-25 17:19:31 +02:00
|
|
|
static DECODER_SESSION_ID: AtomicU32 = AtomicU32::new(0);
|
|
|
|
let counter = DECODER_SESSION_ID.fetch_add(1, Ordering::SeqCst);
|
|
|
|
|
2019-09-06 03:57:44 +01:00
|
|
|
// Make sure this is never zero.
|
2018-05-25 17:19:31 +02:00
|
|
|
let session_id = DecodingSessionId::new((counter & 0x7FFFFFFF) + 1).unwrap();
|
|
|
|
|
2019-12-22 17:42:04 -05:00
|
|
|
AllocDecodingSession { state: self, session_id }
|
2018-05-25 17:19:31 +02:00
|
|
|
}
|
|
|
|
|
2019-09-06 03:57:44 +01:00
|
|
|
pub fn new(data_offsets: Vec<u32>) -> Self {
|
|
|
|
let decoding_state = vec![Lock::new(State::Empty); data_offsets.len()];
|
2018-05-25 17:19:31 +02:00
|
|
|
|
2019-12-22 17:42:04 -05:00
|
|
|
Self { decoding_state, data_offsets }
|
2018-05-25 17:19:31 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Copy, Clone)]
|
|
|
|
pub struct AllocDecodingSession<'s> {
|
|
|
|
state: &'s AllocDecodingState,
|
|
|
|
session_id: DecodingSessionId,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<'s> AllocDecodingSession<'s> {
|
2019-09-06 03:57:44 +01:00
|
|
|
/// Decodes an `AllocId` in a thread-safe way.
|
2019-06-12 00:11:55 +03:00
|
|
|
pub fn decode_alloc_id<D>(&self, decoder: &mut D) -> Result<AllocId, D::Error>
|
|
|
|
where
|
|
|
|
D: TyDecoder<'tcx>,
|
2018-05-25 17:19:31 +02:00
|
|
|
{
|
2019-09-06 03:57:44 +01:00
|
|
|
// Read the index of the allocation.
|
2020-03-21 13:49:02 +01:00
|
|
|
let idx = usize::try_from(decoder.read_u32()?).unwrap();
|
|
|
|
let pos = usize::try_from(self.state.data_offsets[idx]).unwrap();
|
2018-05-25 17:19:31 +02:00
|
|
|
|
2019-09-06 03:57:44 +01:00
|
|
|
// Decode the `AllocDiscriminant` now so that we know if we have to reserve an
|
|
|
|
// `AllocId`.
|
2018-05-25 17:19:31 +02:00
|
|
|
let (alloc_kind, pos) = decoder.with_position(pos, |decoder| {
|
2018-12-03 16:30:43 +01:00
|
|
|
let alloc_kind = AllocDiscriminant::decode(decoder)?;
|
2018-05-25 17:19:31 +02:00
|
|
|
Ok((alloc_kind, decoder.position()))
|
|
|
|
})?;
|
|
|
|
|
2019-09-06 03:57:44 +01:00
|
|
|
// Check the decoding state to see if it's already decoded or if we should
|
2018-05-25 17:19:31 +02:00
|
|
|
// decode it here.
|
|
|
|
let alloc_id = {
|
|
|
|
let mut entry = self.state.decoding_state[idx].lock();
|
|
|
|
|
|
|
|
match *entry {
|
|
|
|
State::Done(alloc_id) => {
|
|
|
|
return Ok(alloc_id);
|
|
|
|
}
|
|
|
|
ref mut entry @ State::Empty => {
|
2019-09-06 03:57:44 +01:00
|
|
|
// We are allowed to decode.
|
2018-05-25 17:19:31 +02:00
|
|
|
match alloc_kind {
|
2018-12-03 16:30:43 +01:00
|
|
|
AllocDiscriminant::Alloc => {
|
2018-05-25 17:19:31 +02:00
|
|
|
// If this is an allocation, we need to reserve an
|
2019-09-06 03:57:44 +01:00
|
|
|
// `AllocId` so we can decode cyclic graphs.
|
2020-04-24 12:53:18 +02:00
|
|
|
let alloc_id = decoder.tcx().reserve_alloc_id();
|
2019-12-22 17:42:04 -05:00
|
|
|
*entry =
|
|
|
|
State::InProgress(TinyList::new_single(self.session_id), alloc_id);
|
2018-05-25 17:19:31 +02:00
|
|
|
Some(alloc_id)
|
2019-12-22 17:42:04 -05:00
|
|
|
}
|
2018-12-03 16:30:43 +01:00
|
|
|
AllocDiscriminant::Fn | AllocDiscriminant::Static => {
|
2019-09-06 03:57:44 +01:00
|
|
|
// Fns and statics cannot be cyclic, and their `AllocId`
|
|
|
|
// is determined later by interning.
|
2019-12-22 17:42:04 -05:00
|
|
|
*entry =
|
|
|
|
State::InProgressNonAlloc(TinyList::new_single(self.session_id));
|
2018-05-25 17:19:31 +02:00
|
|
|
None
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
State::InProgressNonAlloc(ref mut sessions) => {
|
|
|
|
if sessions.contains(&self.session_id) {
|
2019-09-06 03:57:44 +01:00
|
|
|
bug!("this should be unreachable");
|
2018-05-25 17:19:31 +02:00
|
|
|
} else {
|
2019-09-06 03:57:44 +01:00
|
|
|
// Start decoding concurrently.
|
2018-05-25 17:19:31 +02:00
|
|
|
sessions.insert(self.session_id);
|
|
|
|
None
|
|
|
|
}
|
|
|
|
}
|
|
|
|
State::InProgress(ref mut sessions, alloc_id) => {
|
|
|
|
if sessions.contains(&self.session_id) {
|
|
|
|
// Don't recurse.
|
2019-12-22 17:42:04 -05:00
|
|
|
return Ok(alloc_id);
|
2018-05-25 17:19:31 +02:00
|
|
|
} else {
|
2019-09-06 03:57:44 +01:00
|
|
|
// Start decoding concurrently.
|
2018-05-25 17:19:31 +02:00
|
|
|
sessions.insert(self.session_id);
|
|
|
|
Some(alloc_id)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2019-09-06 03:57:44 +01:00
|
|
|
// Now decode the actual data.
|
2018-05-25 17:19:31 +02:00
|
|
|
let alloc_id = decoder.with_position(pos, |decoder| {
|
|
|
|
match alloc_kind {
|
2018-12-03 16:30:43 +01:00
|
|
|
AllocDiscriminant::Alloc => {
|
2020-06-11 15:49:57 +01:00
|
|
|
let alloc = <&'tcx Allocation as Decodable<_>>::decode(decoder)?;
|
2019-09-06 03:57:44 +01:00
|
|
|
// We already have a reserved `AllocId`.
|
2018-05-25 17:19:31 +02:00
|
|
|
let alloc_id = alloc_id.unwrap();
|
2019-09-06 03:57:44 +01:00
|
|
|
trace!("decoded alloc {:?}: {:#?}", alloc_id, alloc);
|
2020-04-24 12:53:18 +02:00
|
|
|
decoder.tcx().set_alloc_id_same_memory(alloc_id, alloc);
|
2018-05-25 17:19:31 +02:00
|
|
|
Ok(alloc_id)
|
2019-12-22 17:42:04 -05:00
|
|
|
}
|
2018-12-03 16:30:43 +01:00
|
|
|
AllocDiscriminant::Fn => {
|
2018-05-25 17:19:31 +02:00
|
|
|
assert!(alloc_id.is_none());
|
2019-09-06 03:57:44 +01:00
|
|
|
trace!("creating fn alloc ID");
|
2018-05-25 17:19:31 +02:00
|
|
|
let instance = ty::Instance::decode(decoder)?;
|
|
|
|
trace!("decoded fn alloc instance: {:?}", instance);
|
2020-04-24 12:53:18 +02:00
|
|
|
let alloc_id = decoder.tcx().create_fn_alloc(instance);
|
2018-05-25 17:19:31 +02:00
|
|
|
Ok(alloc_id)
|
2019-12-22 17:42:04 -05:00
|
|
|
}
|
2018-12-03 16:30:43 +01:00
|
|
|
AllocDiscriminant::Static => {
|
2018-05-25 17:19:31 +02:00
|
|
|
assert!(alloc_id.is_none());
|
2019-09-06 03:57:44 +01:00
|
|
|
trace!("creating extern static alloc ID");
|
2020-06-11 15:49:57 +01:00
|
|
|
let did = <DefId as Decodable<D>>::decode(decoder)?;
|
2019-09-06 03:57:44 +01:00
|
|
|
trace!("decoded static def-ID: {:?}", did);
|
2020-04-24 12:53:18 +02:00
|
|
|
let alloc_id = decoder.tcx().create_static_alloc(did);
|
2018-05-25 17:19:31 +02:00
|
|
|
Ok(alloc_id)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})?;
|
|
|
|
|
|
|
|
self.state.decoding_state[idx].with_lock(|entry| {
|
|
|
|
*entry = State::Done(alloc_id);
|
|
|
|
});
|
|
|
|
|
|
|
|
Ok(alloc_id)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-30 13:05:05 +02:00
|
|
|
/// An allocation in the global (tcx-managed) memory can be either a function pointer,
|
|
|
|
/// a static, or a "real" allocation with some data in it.
|
2020-06-11 15:49:57 +01:00
|
|
|
#[derive(Debug, Clone, Eq, PartialEq, Hash, TyDecodable, TyEncodable, HashStable)]
|
2019-05-30 13:05:05 +02:00
|
|
|
pub enum GlobalAlloc<'tcx> {
|
2019-09-06 03:57:44 +01:00
|
|
|
/// The alloc ID is used as a function pointer.
|
2018-05-02 06:03:06 +02:00
|
|
|
Function(Instance<'tcx>),
|
2019-02-08 14:53:55 +01:00
|
|
|
/// The alloc ID points to a "lazy" static variable that did not get computed (yet).
|
2018-08-23 19:04:33 +02:00
|
|
|
/// This is also used to break the cycle in recursive statics.
|
2018-05-02 06:03:06 +02:00
|
|
|
Static(DefId),
|
2019-02-08 14:53:55 +01:00
|
|
|
/// The alloc ID points to memory.
|
2018-12-03 14:57:41 +01:00
|
|
|
Memory(&'tcx Allocation),
|
2018-05-02 06:03:06 +02:00
|
|
|
}
|
|
|
|
|
2020-04-24 17:33:25 +02:00
|
|
|
impl GlobalAlloc<'tcx> {
|
|
|
|
/// Panics if the `GlobalAlloc` does not refer to an `GlobalAlloc::Memory`
|
|
|
|
#[track_caller]
|
|
|
|
#[inline]
|
|
|
|
pub fn unwrap_memory(&self) -> &'tcx Allocation {
|
|
|
|
match *self {
|
|
|
|
GlobalAlloc::Memory(mem) => mem,
|
|
|
|
_ => bug!("expected memory, got {:?}", self),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Panics if the `GlobalAlloc` is not `GlobalAlloc::Function`
|
|
|
|
#[track_caller]
|
|
|
|
#[inline]
|
|
|
|
pub fn unwrap_fn(&self) -> Instance<'tcx> {
|
|
|
|
match *self {
|
|
|
|
GlobalAlloc::Function(instance) => instance,
|
|
|
|
_ => bug!("expected function, got {:?}", self),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-24 17:34:56 +02:00
|
|
|
crate struct AllocMap<'tcx> {
|
2019-09-06 03:57:44 +01:00
|
|
|
/// Maps `AllocId`s to their corresponding allocations.
|
2019-05-30 13:05:05 +02:00
|
|
|
alloc_map: FxHashMap<AllocId, GlobalAlloc<'tcx>>,
|
2018-05-02 06:03:06 +02:00
|
|
|
|
2019-05-30 13:05:05 +02:00
|
|
|
/// Used to ensure that statics and functions only get one associated `AllocId`.
|
|
|
|
/// Should never contain a `GlobalAlloc::Memory`!
|
2019-09-06 03:57:44 +01:00
|
|
|
//
|
|
|
|
// FIXME: Should we just have two separate dedup maps for statics and functions each?
|
2019-05-30 13:05:05 +02:00
|
|
|
dedup: FxHashMap<GlobalAlloc<'tcx>, AllocId>,
|
2018-05-02 06:03:06 +02:00
|
|
|
|
2019-02-08 14:53:55 +01:00
|
|
|
/// The `AllocId` to assign to the next requested ID.
|
2019-09-06 03:57:44 +01:00
|
|
|
/// Always incremented; never gets smaller.
|
2018-05-02 06:03:06 +02:00
|
|
|
next_id: AllocId,
|
|
|
|
}
|
|
|
|
|
2018-12-03 14:54:58 +01:00
|
|
|
impl<'tcx> AllocMap<'tcx> {
|
2020-04-24 17:34:56 +02:00
|
|
|
crate fn new() -> Self {
|
2019-12-22 17:42:04 -05:00
|
|
|
AllocMap { alloc_map: Default::default(), dedup: Default::default(), next_id: AllocId(0) }
|
2018-05-02 06:03:06 +02:00
|
|
|
}
|
2020-04-24 17:34:56 +02:00
|
|
|
fn reserve(&mut self) -> AllocId {
|
2018-05-02 06:03:06 +02:00
|
|
|
let next = self.next_id;
|
2019-12-22 17:42:04 -05:00
|
|
|
self.next_id.0 = self.next_id.0.checked_add(1).expect(
|
|
|
|
"You overflowed a u64 by incrementing by 1... \
|
2020-01-03 13:31:56 +01:00
|
|
|
You've just earned yourself a free drink if we ever meet. \
|
|
|
|
Seriously, how did you do that?!",
|
2019-12-22 17:42:04 -05:00
|
|
|
);
|
2018-05-02 06:03:06 +02:00
|
|
|
next
|
|
|
|
}
|
2020-04-24 12:53:18 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
impl<'tcx> TyCtxt<'tcx> {
|
|
|
|
/// Obtains a new allocation ID that can be referenced but does not
|
|
|
|
/// yet have an allocation backing it.
|
|
|
|
///
|
|
|
|
/// Make sure to call `set_alloc_id_memory` or `set_alloc_id_same_memory` before returning such
|
|
|
|
/// an `AllocId` from a query.
|
|
|
|
pub fn reserve_alloc_id(&self) -> AllocId {
|
|
|
|
self.alloc_map.lock().reserve()
|
|
|
|
}
|
2018-05-02 06:03:06 +02:00
|
|
|
|
2019-09-06 03:57:44 +01:00
|
|
|
/// Reserves a new ID *if* this allocation has not been dedup-reserved before.
|
2019-05-30 13:05:05 +02:00
|
|
|
/// Should only be used for function pointers and statics, we don't want
|
|
|
|
/// to dedup IDs for "real" memory!
|
2020-04-24 12:53:18 +02:00
|
|
|
fn reserve_and_set_dedup(&self, alloc: GlobalAlloc<'tcx>) -> AllocId {
|
|
|
|
let mut alloc_map = self.alloc_map.lock();
|
2019-05-30 13:05:05 +02:00
|
|
|
match alloc {
|
2019-12-22 17:42:04 -05:00
|
|
|
GlobalAlloc::Function(..) | GlobalAlloc::Static(..) => {}
|
2019-05-30 13:05:05 +02:00
|
|
|
GlobalAlloc::Memory(..) => bug!("Trying to dedup-reserve memory with real data!"),
|
|
|
|
}
|
2020-04-24 12:53:18 +02:00
|
|
|
if let Some(&alloc_id) = alloc_map.dedup.get(&alloc) {
|
2018-05-02 06:03:06 +02:00
|
|
|
return alloc_id;
|
|
|
|
}
|
2020-04-24 12:53:18 +02:00
|
|
|
let id = alloc_map.reserve();
|
2019-05-30 13:05:05 +02:00
|
|
|
debug!("creating alloc {:?} with id {}", alloc, id);
|
2020-04-24 12:53:18 +02:00
|
|
|
alloc_map.alloc_map.insert(id, alloc.clone());
|
|
|
|
alloc_map.dedup.insert(alloc, id);
|
2018-05-02 06:03:06 +02:00
|
|
|
id
|
|
|
|
}
|
|
|
|
|
2019-05-30 13:05:05 +02:00
|
|
|
/// Generates an `AllocId` for a static or return a cached one in case this function has been
|
|
|
|
/// called on the same static before.
|
2020-04-24 12:53:18 +02:00
|
|
|
pub fn create_static_alloc(&self, static_id: DefId) -> AllocId {
|
2019-05-30 13:05:05 +02:00
|
|
|
self.reserve_and_set_dedup(GlobalAlloc::Static(static_id))
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Generates an `AllocId` for a function. Depending on the function type,
|
|
|
|
/// this might get deduplicated or assigned a new ID each time.
|
2020-04-24 12:53:18 +02:00
|
|
|
pub fn create_fn_alloc(&self, instance: Instance<'tcx>) -> AllocId {
|
2019-02-09 15:44:54 +01:00
|
|
|
// Functions cannot be identified by pointers, as asm-equal functions can get deduplicated
|
|
|
|
// by the linker (we set the "unnamed_addr" attribute for LLVM) and functions can be
|
|
|
|
// duplicated across crates.
|
|
|
|
// We thus generate a new `AllocId` for every mention of a function. This means that
|
|
|
|
// `main as fn() == main as fn()` is false, while `let x = main as fn(); x == x` is true.
|
|
|
|
// However, formatting code relies on function identity (see #58320), so we only do
|
|
|
|
// this for generic functions. Lifetime parameters are ignored.
|
2019-12-22 17:42:04 -05:00
|
|
|
let is_generic = instance.substs.into_iter().any(|kind| match kind.unpack() {
|
|
|
|
GenericArgKind::Lifetime(_) => false,
|
|
|
|
_ => true,
|
2019-02-09 15:44:54 +01:00
|
|
|
});
|
|
|
|
if is_generic {
|
2019-09-06 03:57:44 +01:00
|
|
|
// Get a fresh ID.
|
2020-04-24 12:53:18 +02:00
|
|
|
let mut alloc_map = self.alloc_map.lock();
|
|
|
|
let id = alloc_map.reserve();
|
|
|
|
alloc_map.alloc_map.insert(id, GlobalAlloc::Function(instance));
|
2019-02-09 15:44:54 +01:00
|
|
|
id
|
|
|
|
} else {
|
2019-09-06 03:57:44 +01:00
|
|
|
// Deduplicate.
|
2019-05-30 13:05:05 +02:00
|
|
|
self.reserve_and_set_dedup(GlobalAlloc::Function(instance))
|
2019-02-09 15:44:54 +01:00
|
|
|
}
|
2018-05-02 06:03:06 +02:00
|
|
|
}
|
|
|
|
|
2019-09-06 03:57:44 +01:00
|
|
|
/// Interns the `Allocation` and return a new `AllocId`, even if there's already an identical
|
2019-05-30 13:05:05 +02:00
|
|
|
/// `Allocation` with a different `AllocId`.
|
|
|
|
/// Statics with identical content will still point to the same `Allocation`, i.e.,
|
|
|
|
/// their data will be deduplicated through `Allocation` interning -- but they
|
|
|
|
/// are different places in memory and as such need different IDs.
|
2020-04-24 12:53:18 +02:00
|
|
|
pub fn create_memory_alloc(&self, mem: &'tcx Allocation) -> AllocId {
|
|
|
|
let id = self.reserve_alloc_id();
|
2019-05-30 13:05:05 +02:00
|
|
|
self.set_alloc_id_memory(id, mem);
|
|
|
|
id
|
|
|
|
}
|
|
|
|
|
2019-03-27 10:57:03 +09:00
|
|
|
/// Returns `None` in case the `AllocId` is dangling. An `InterpretCx` can still have a
|
2018-12-04 09:58:36 +01:00
|
|
|
/// local `Allocation` for that `AllocId`, but having such an `AllocId` in a constant is
|
|
|
|
/// illegal and will likely ICE.
|
2018-12-03 16:18:31 +01:00
|
|
|
/// This function exists to allow const eval to detect the difference between evaluation-
|
|
|
|
/// local dangling pointers and allocations in constants/statics.
|
2019-05-27 09:40:13 +02:00
|
|
|
#[inline]
|
2020-04-24 12:53:18 +02:00
|
|
|
pub fn get_global_alloc(&self, id: AllocId) -> Option<GlobalAlloc<'tcx>> {
|
|
|
|
self.alloc_map.lock().alloc_map.get(&id).cloned()
|
2018-05-02 06:03:06 +02:00
|
|
|
}
|
|
|
|
|
2020-05-08 10:58:53 +02:00
|
|
|
#[inline]
|
|
|
|
#[track_caller]
|
2020-05-08 21:59:17 +02:00
|
|
|
/// Panics in case the `AllocId` is dangling. Since that is impossible for `AllocId`s in
|
|
|
|
/// constants (as all constants must pass interning and validation that check for dangling
|
|
|
|
/// ids), this function is frequently used throughout rustc, but should not be used within
|
|
|
|
/// the miri engine.
|
2020-05-08 10:58:53 +02:00
|
|
|
pub fn global_alloc(&self, id: AllocId) -> GlobalAlloc<'tcx> {
|
|
|
|
match self.get_global_alloc(id) {
|
|
|
|
Some(alloc) => alloc,
|
|
|
|
None => bug!("could not find allocation for {}", id),
|
2019-10-02 20:29:16 +13:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-09-06 03:57:44 +01:00
|
|
|
/// Freezes an `AllocId` created with `reserve` by pointing it at an `Allocation`. Trying to
|
2018-12-03 16:18:31 +01:00
|
|
|
/// call this function twice, even with the same `Allocation` will ICE the compiler.
|
2020-04-24 12:53:18 +02:00
|
|
|
pub fn set_alloc_id_memory(&self, id: AllocId, mem: &'tcx Allocation) {
|
|
|
|
if let Some(old) = self.alloc_map.lock().alloc_map.insert(id, GlobalAlloc::Memory(mem)) {
|
2019-09-06 03:57:44 +01:00
|
|
|
bug!("tried to set allocation ID {}, but it was already existing as {:#?}", id, old);
|
2018-05-02 06:03:06 +02:00
|
|
|
}
|
|
|
|
}
|
2018-05-25 17:19:31 +02:00
|
|
|
|
2019-09-06 03:57:44 +01:00
|
|
|
/// Freezes an `AllocId` created with `reserve` by pointing it at an `Allocation`. May be called
|
2018-12-03 16:18:31 +01:00
|
|
|
/// twice for the same `(AllocId, Allocation)` pair.
|
2020-04-24 12:53:18 +02:00
|
|
|
fn set_alloc_id_same_memory(&self, id: AllocId, mem: &'tcx Allocation) {
|
|
|
|
self.alloc_map.lock().alloc_map.insert_same(id, GlobalAlloc::Memory(mem));
|
2018-05-25 17:19:31 +02:00
|
|
|
}
|
2018-05-02 06:03:06 +02:00
|
|
|
}
|
|
|
|
|
2018-04-26 09:18:19 +02:00
|
|
|
////////////////////////////////////////////////////////////////////////////////
|
|
|
|
// Methods to access integers in the target endianness
|
|
|
|
////////////////////////////////////////////////////////////////////////////////
|
|
|
|
|
2019-05-27 09:40:13 +02:00
|
|
|
#[inline]
|
2018-04-26 09:18:19 +02:00
|
|
|
pub fn write_target_uint(
|
2020-03-31 18:16:47 +02:00
|
|
|
endianness: Endian,
|
2018-04-26 09:18:19 +02:00
|
|
|
mut target: &mut [u8],
|
|
|
|
data: u128,
|
|
|
|
) -> Result<(), io::Error> {
|
2020-08-21 00:55:33 -07:00
|
|
|
// This u128 holds an "any-size uint" (since smaller uints can fits in it)
|
|
|
|
// So we do not write all bytes of the u128, just the "payload".
|
2018-04-26 09:18:19 +02:00
|
|
|
match endianness {
|
2020-08-20 02:37:00 -07:00
|
|
|
Endian::Little => target.write(&data.to_le_bytes())?,
|
2020-08-22 03:54:15 -07:00
|
|
|
Endian::Big => target.write(&data.to_be_bytes()[16 - target.len()..])?,
|
2020-08-20 02:37:00 -07:00
|
|
|
};
|
2020-08-21 00:55:33 -07:00
|
|
|
debug_assert!(target.len() == 0); // We should have filled the target buffer.
|
2020-08-20 02:37:00 -07:00
|
|
|
Ok(())
|
2018-04-26 09:18:19 +02:00
|
|
|
}
|
|
|
|
|
2019-05-27 09:40:13 +02:00
|
|
|
#[inline]
|
2020-03-31 18:16:47 +02:00
|
|
|
pub fn read_target_uint(endianness: Endian, mut source: &[u8]) -> Result<u128, io::Error> {
|
2020-08-21 00:55:33 -07:00
|
|
|
// This u128 holds an "any-size uint" (since smaller uints can fits in it)
|
2020-08-20 15:55:02 -07:00
|
|
|
let mut buf = [0u8; std::mem::size_of::<u128>()];
|
2020-08-21 00:55:33 -07:00
|
|
|
// So we do not read exactly 16 bytes into the u128, just the "payload".
|
2020-08-22 03:54:15 -07:00
|
|
|
let uint = match endianness {
|
|
|
|
Endian::Little => {
|
|
|
|
source.read(&mut buf)?;
|
|
|
|
Ok(u128::from_le_bytes(buf))
|
|
|
|
}
|
|
|
|
Endian::Big => {
|
|
|
|
source.read(&mut buf[16 - source.len()..])?;
|
|
|
|
Ok(u128::from_be_bytes(buf))
|
|
|
|
}
|
|
|
|
};
|
2020-08-21 00:55:33 -07:00
|
|
|
debug_assert!(source.len() == 0); // We should have consumed the source buffer.
|
2020-08-22 03:54:15 -07:00
|
|
|
uint
|
2017-12-12 17:14:49 +01:00
|
|
|
}
|
|
|
|
|
2018-08-16 11:38:16 +02:00
|
|
|
////////////////////////////////////////////////////////////////////////////////
|
2018-10-22 18:21:55 +02:00
|
|
|
// Methods to facilitate working with signed integers stored in a u128
|
2018-08-16 11:38:16 +02:00
|
|
|
////////////////////////////////////////////////////////////////////////////////
|
|
|
|
|
2019-09-06 03:57:44 +01:00
|
|
|
/// Truncates `value` to `size` bits and then sign-extend it to 128 bits
|
2019-05-27 09:40:13 +02:00
|
|
|
/// (i.e., if it is negative, fill with 1's on the left).
|
|
|
|
#[inline]
|
2018-08-16 11:38:16 +02:00
|
|
|
pub fn sign_extend(value: u128, size: Size) -> u128 {
|
|
|
|
let size = size.bits();
|
2019-05-27 09:40:13 +02:00
|
|
|
if size == 0 {
|
|
|
|
// Truncated until nothing is left.
|
|
|
|
return 0;
|
|
|
|
}
|
2019-09-06 03:57:44 +01:00
|
|
|
// Sign-extend it.
|
2018-08-16 11:38:16 +02:00
|
|
|
let shift = 128 - size;
|
2019-09-06 03:57:44 +01:00
|
|
|
// Shift the unsigned value to the left, then shift back to the right as signed
|
|
|
|
// (essentially fills with FF on the left).
|
2018-08-16 11:38:16 +02:00
|
|
|
(((value << shift) as i128) >> shift) as u128
|
|
|
|
}
|
|
|
|
|
2019-09-06 03:57:44 +01:00
|
|
|
/// Truncates `value` to `size` bits.
|
2019-05-27 09:40:13 +02:00
|
|
|
#[inline]
|
2018-08-16 11:38:16 +02:00
|
|
|
pub fn truncate(value: u128, size: Size) -> u128 {
|
|
|
|
let size = size.bits();
|
2019-05-27 09:40:13 +02:00
|
|
|
if size == 0 {
|
|
|
|
// Truncated until nothing is left.
|
|
|
|
return 0;
|
|
|
|
}
|
2018-08-16 11:38:16 +02:00
|
|
|
let shift = 128 - size;
|
2019-09-06 03:57:44 +01:00
|
|
|
// Truncate (shift left to drop out leftover values, shift right to fill with zeroes).
|
2018-08-16 11:38:16 +02:00
|
|
|
(value << shift) >> shift
|
|
|
|
}
|
2020-05-26 01:57:49 -07:00
|
|
|
|
|
|
|
/// Computes the unsigned absolute value without wrapping or panicking.
|
|
|
|
#[inline]
|
|
|
|
pub fn uabs(value: i64) -> u64 {
|
|
|
|
// The only tricky part here is if value == i64::MIN. In that case,
|
|
|
|
// wrapping_abs() returns i64::MIN == -2^63. Casting this value to a u64
|
|
|
|
// gives 2^63, the correct value.
|
|
|
|
value.wrapping_abs() as u64
|
|
|
|
}
|