1
Fork 0

Auto merge of #130401 - matthiaskrgr:rollup-fri2j58, r=matthiaskrgr

Rollup of 5 pull requests

Successful merges:

 - #129439 (Implement feature `string_from_utf8_lossy_owned` for lossy conversion from `Vec<u8>` to `String` methods)
 - #129828 (miri: treat non-memory local variables properly for data race detection)
 - #130110 (make dist vendoring configurable)
 - #130293 (Fix lint levels not getting overridden by attrs on `Stmt` nodes)
 - #130342 (interpret, miri: fix dealing with overflow during slice indexing and allocation)

Failed merges:

 - #130394 (const: don't ICE when encountering a mutable ref to immutable memory)

r? `@ghost`
`@rustbot` modify labels: rollup
This commit is contained in:
bors 2024-09-15 15:05:29 +00:00
commit 8c2c9a9ef5
37 changed files with 807 additions and 185 deletions

View file

@ -216,7 +216,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
self.copy_intrinsic(&args[0], &args[1], &args[2], /*nonoverlapping*/ false)?; self.copy_intrinsic(&args[0], &args[1], &args[2], /*nonoverlapping*/ false)?;
} }
sym::write_bytes => { sym::write_bytes => {
self.write_bytes_intrinsic(&args[0], &args[1], &args[2])?; self.write_bytes_intrinsic(&args[0], &args[1], &args[2], "write_bytes")?;
} }
sym::compare_bytes => { sym::compare_bytes => {
let result = self.compare_bytes_intrinsic(&args[0], &args[1], &args[2])?; let result = self.compare_bytes_intrinsic(&args[0], &args[1], &args[2])?;
@ -599,9 +599,8 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
let count = self.read_target_usize(count)?; let count = self.read_target_usize(count)?;
let layout = self.layout_of(src.layout.ty.builtin_deref(true).unwrap())?; let layout = self.layout_of(src.layout.ty.builtin_deref(true).unwrap())?;
let (size, align) = (layout.size, layout.align.abi); let (size, align) = (layout.size, layout.align.abi);
// `checked_mul` enforces a too small bound (the correct one would probably be target_isize_max),
// but no actual allocation can be big enough for the difference to be noticeable. let size = self.compute_size_in_bytes(size, count).ok_or_else(|| {
let size = size.checked_mul(count, self).ok_or_else(|| {
err_ub_custom!( err_ub_custom!(
fluent::const_eval_size_overflow, fluent::const_eval_size_overflow,
name = if nonoverlapping { "copy_nonoverlapping" } else { "copy" } name = if nonoverlapping { "copy_nonoverlapping" } else { "copy" }
@ -635,11 +634,12 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
Ok(()) Ok(())
} }
pub(crate) fn write_bytes_intrinsic( pub fn write_bytes_intrinsic(
&mut self, &mut self,
dst: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>, dst: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>,
byte: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>, byte: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>,
count: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>, count: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>,
name: &'static str,
) -> InterpResult<'tcx> { ) -> InterpResult<'tcx> {
let layout = self.layout_of(dst.layout.ty.builtin_deref(true).unwrap())?; let layout = self.layout_of(dst.layout.ty.builtin_deref(true).unwrap())?;
@ -649,9 +649,9 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
// `checked_mul` enforces a too small bound (the correct one would probably be target_isize_max), // `checked_mul` enforces a too small bound (the correct one would probably be target_isize_max),
// but no actual allocation can be big enough for the difference to be noticeable. // but no actual allocation can be big enough for the difference to be noticeable.
let len = layout.size.checked_mul(count, self).ok_or_else(|| { let len = self
err_ub_custom!(fluent::const_eval_size_overflow, name = "write_bytes") .compute_size_in_bytes(layout.size, count)
})?; .ok_or_else(|| err_ub_custom!(fluent::const_eval_size_overflow, name = name))?;
let bytes = std::iter::repeat(byte).take(len.bytes_usize()); let bytes = std::iter::repeat(byte).take(len.bytes_usize());
self.write_bytes_ptr(dst, bytes) self.write_bytes_ptr(dst, bytes)

View file

@ -540,10 +540,29 @@ pub trait Machine<'tcx>: Sized {
Ok(ReturnAction::Normal) Ok(ReturnAction::Normal)
} }
/// Called immediately after an "immediate" local variable is read
/// (i.e., this is called for reads that do not end up accessing addressable memory).
#[inline(always)]
fn after_local_read(_ecx: &InterpCx<'tcx, Self>, _local: mir::Local) -> InterpResult<'tcx> {
Ok(())
}
/// Called immediately after an "immediate" local variable is assigned a new value
/// (i.e., this is called for writes that do not end up in memory).
/// `storage_live` indicates whether this is the initial write upon `StorageLive`.
#[inline(always)]
fn after_local_write(
_ecx: &mut InterpCx<'tcx, Self>,
_local: mir::Local,
_storage_live: bool,
) -> InterpResult<'tcx> {
Ok(())
}
/// Called immediately after actual memory was allocated for a local /// Called immediately after actual memory was allocated for a local
/// but before the local's stack frame is updated to point to that memory. /// but before the local's stack frame is updated to point to that memory.
#[inline(always)] #[inline(always)]
fn after_local_allocated( fn after_local_moved_to_memory(
_ecx: &mut InterpCx<'tcx, Self>, _ecx: &mut InterpCx<'tcx, Self>,
_local: mir::Local, _local: mir::Local,
_mplace: &MPlaceTy<'tcx, Self::Provenance>, _mplace: &MPlaceTy<'tcx, Self::Provenance>,

View file

@ -222,7 +222,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
} else { } else {
Allocation::try_uninit(size, align)? Allocation::try_uninit(size, align)?
}; };
self.allocate_raw_ptr(alloc, kind) self.insert_allocation(alloc, kind)
} }
pub fn allocate_bytes_ptr( pub fn allocate_bytes_ptr(
@ -233,14 +233,15 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
mutability: Mutability, mutability: Mutability,
) -> InterpResult<'tcx, Pointer<M::Provenance>> { ) -> InterpResult<'tcx, Pointer<M::Provenance>> {
let alloc = Allocation::from_bytes(bytes, align, mutability); let alloc = Allocation::from_bytes(bytes, align, mutability);
self.allocate_raw_ptr(alloc, kind) self.insert_allocation(alloc, kind)
} }
pub fn allocate_raw_ptr( pub fn insert_allocation(
&mut self, &mut self,
alloc: Allocation<M::Provenance, (), M::Bytes>, alloc: Allocation<M::Provenance, (), M::Bytes>,
kind: MemoryKind<M::MemoryKind>, kind: MemoryKind<M::MemoryKind>,
) -> InterpResult<'tcx, Pointer<M::Provenance>> { ) -> InterpResult<'tcx, Pointer<M::Provenance>> {
assert!(alloc.size() <= self.max_size_of_val());
let id = self.tcx.reserve_alloc_id(); let id = self.tcx.reserve_alloc_id();
debug_assert_ne!( debug_assert_ne!(
Some(kind), Some(kind),
@ -1046,6 +1047,10 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
); );
res res
} }
pub(super) fn validation_in_progress(&self) -> bool {
self.memory.validation_in_progress
}
} }
#[doc(hidden)] #[doc(hidden)]

View file

@ -697,6 +697,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
if matches!(op, Operand::Immediate(_)) { if matches!(op, Operand::Immediate(_)) {
assert!(!layout.is_unsized()); assert!(!layout.is_unsized());
} }
M::after_local_read(self, local)?;
Ok(OpTy { op, layout }) Ok(OpTy { op, layout })
} }

View file

@ -1,11 +1,12 @@
use either::Either; use either::Either;
use rustc_apfloat::{Float, FloatConvert}; use rustc_apfloat::{Float, FloatConvert};
use rustc_middle::mir::interpret::{InterpResult, Scalar}; use rustc_middle::mir::interpret::{InterpResult, PointerArithmetic, Scalar};
use rustc_middle::mir::NullOp; use rustc_middle::mir::NullOp;
use rustc_middle::ty::layout::{LayoutOf, TyAndLayout}; use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
use rustc_middle::ty::{self, FloatTy, ScalarInt, Ty}; use rustc_middle::ty::{self, FloatTy, ScalarInt, Ty};
use rustc_middle::{bug, mir, span_bug}; use rustc_middle::{bug, mir, span_bug};
use rustc_span::symbol::sym; use rustc_span::symbol::sym;
use rustc_target::abi::Size;
use tracing::trace; use tracing::trace;
use super::{throw_ub, ImmTy, InterpCx, Machine, MemPlaceMeta}; use super::{throw_ub, ImmTy, InterpCx, Machine, MemPlaceMeta};
@ -287,6 +288,20 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
}) })
} }
/// Computes the total size of this access, `count * elem_size`,
/// checking for overflow beyond isize::MAX.
pub fn compute_size_in_bytes(&self, elem_size: Size, count: u64) -> Option<Size> {
// `checked_mul` applies `u64` limits independent of the target pointer size... but the
// subsequent check for `max_size_of_val` means we also handle 32bit targets correctly.
// (We cannot use `Size::checked_mul` as that enforces `obj_size_bound` as the limit, which
// would be wrong here.)
elem_size
.bytes()
.checked_mul(count)
.map(Size::from_bytes)
.filter(|&total| total <= self.max_size_of_val())
}
fn binary_ptr_op( fn binary_ptr_op(
&self, &self,
bin_op: mir::BinOp, bin_op: mir::BinOp,

View file

@ -500,15 +500,13 @@ where
&self, &self,
local: mir::Local, local: mir::Local,
) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>> { ) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>> {
// Other parts of the system rely on `Place::Local` never being unsized.
// So we eagerly check here if this local has an MPlace, and if yes we use it.
let frame = self.frame(); let frame = self.frame();
let layout = self.layout_of_local(frame, local, None)?; let layout = self.layout_of_local(frame, local, None)?;
let place = if layout.is_sized() { let place = if layout.is_sized() {
// We can just always use the `Local` for sized values. // We can just always use the `Local` for sized values.
Place::Local { local, offset: None, locals_addr: frame.locals_addr() } Place::Local { local, offset: None, locals_addr: frame.locals_addr() }
} else { } else {
// Unsized `Local` isn't okay (we cannot store the metadata). // Other parts of the system rely on `Place::Local` never being unsized.
match frame.locals[local].access()? { match frame.locals[local].access()? {
Operand::Immediate(_) => bug!(), Operand::Immediate(_) => bug!(),
Operand::Indirect(mplace) => Place::Ptr(*mplace), Operand::Indirect(mplace) => Place::Ptr(*mplace),
@ -562,7 +560,10 @@ where
place: &PlaceTy<'tcx, M::Provenance>, place: &PlaceTy<'tcx, M::Provenance>,
) -> InterpResult< ) -> InterpResult<
'tcx, 'tcx,
Either<MPlaceTy<'tcx, M::Provenance>, (&mut Immediate<M::Provenance>, TyAndLayout<'tcx>)>, Either<
MPlaceTy<'tcx, M::Provenance>,
(&mut Immediate<M::Provenance>, TyAndLayout<'tcx>, mir::Local),
>,
> { > {
Ok(match place.to_place().as_mplace_or_local() { Ok(match place.to_place().as_mplace_or_local() {
Left(mplace) => Left(mplace), Left(mplace) => Left(mplace),
@ -581,7 +582,7 @@ where
} }
Operand::Immediate(local_val) => { Operand::Immediate(local_val) => {
// The local still has the optimized representation. // The local still has the optimized representation.
Right((local_val, layout)) Right((local_val, layout, local))
} }
} }
} }
@ -643,9 +644,13 @@ where
assert!(dest.layout().is_sized(), "Cannot write unsized immediate data"); assert!(dest.layout().is_sized(), "Cannot write unsized immediate data");
match self.as_mplace_or_mutable_local(&dest.to_place())? { match self.as_mplace_or_mutable_local(&dest.to_place())? {
Right((local_val, local_layout)) => { Right((local_val, local_layout, local)) => {
// Local can be updated in-place. // Local can be updated in-place.
*local_val = src; *local_val = src;
// Call the machine hook (the data race detector needs to know about this write).
if !self.validation_in_progress() {
M::after_local_write(self, local, /*storage_live*/ false)?;
}
// Double-check that the value we are storing and the local fit to each other. // Double-check that the value we are storing and the local fit to each other.
if cfg!(debug_assertions) { if cfg!(debug_assertions) {
src.assert_matches_abi(local_layout.abi, self); src.assert_matches_abi(local_layout.abi, self);
@ -714,8 +719,12 @@ where
dest: &impl Writeable<'tcx, M::Provenance>, dest: &impl Writeable<'tcx, M::Provenance>,
) -> InterpResult<'tcx> { ) -> InterpResult<'tcx> {
match self.as_mplace_or_mutable_local(&dest.to_place())? { match self.as_mplace_or_mutable_local(&dest.to_place())? {
Right((local_val, _local_layout)) => { Right((local_val, _local_layout, local)) => {
*local_val = Immediate::Uninit; *local_val = Immediate::Uninit;
// Call the machine hook (the data race detector needs to know about this write).
if !self.validation_in_progress() {
M::after_local_write(self, local, /*storage_live*/ false)?;
}
} }
Left(mplace) => { Left(mplace) => {
let Some(mut alloc) = self.get_place_alloc_mut(&mplace)? else { let Some(mut alloc) = self.get_place_alloc_mut(&mplace)? else {
@ -734,8 +743,12 @@ where
dest: &impl Writeable<'tcx, M::Provenance>, dest: &impl Writeable<'tcx, M::Provenance>,
) -> InterpResult<'tcx> { ) -> InterpResult<'tcx> {
match self.as_mplace_or_mutable_local(&dest.to_place())? { match self.as_mplace_or_mutable_local(&dest.to_place())? {
Right((local_val, _local_layout)) => { Right((local_val, _local_layout, local)) => {
local_val.clear_provenance()?; local_val.clear_provenance()?;
// Call the machine hook (the data race detector needs to know about this write).
if !self.validation_in_progress() {
M::after_local_write(self, local, /*storage_live*/ false)?;
}
} }
Left(mplace) => { Left(mplace) => {
let Some(mut alloc) = self.get_place_alloc_mut(&mplace)? else { let Some(mut alloc) = self.get_place_alloc_mut(&mplace)? else {
@ -941,7 +954,7 @@ where
mplace.mplace, mplace.mplace,
)?; )?;
} }
M::after_local_allocated(self, local, &mplace)?; M::after_local_moved_to_memory(self, local, &mplace)?;
// Now we can call `access_mut` again, asserting it goes well, and actually // Now we can call `access_mut` again, asserting it goes well, and actually
// overwrite things. This points to the entire allocation, not just the part // overwrite things. This points to the entire allocation, not just the part
// the place refers to, i.e. we do this before we apply `offset`. // the place refers to, i.e. we do this before we apply `offset`.

View file

@ -17,7 +17,7 @@ use rustc_target::abi::{self, Size, VariantIdx};
use tracing::{debug, instrument}; use tracing::{debug, instrument};
use super::{ use super::{
throw_ub, throw_unsup, InterpCx, InterpResult, MPlaceTy, Machine, MemPlaceMeta, OpTy, err_ub, throw_ub, throw_unsup, InterpCx, InterpResult, MPlaceTy, Machine, MemPlaceMeta, OpTy,
Provenance, Scalar, Provenance, Scalar,
}; };
@ -229,7 +229,11 @@ where
// This can only be reached in ConstProp and non-rustc-MIR. // This can only be reached in ConstProp and non-rustc-MIR.
throw_ub!(BoundsCheckFailed { len, index }); throw_ub!(BoundsCheckFailed { len, index });
} }
let offset = stride * index; // `Size` multiplication // With raw slices, `len` can be so big that this *can* overflow.
let offset = self
.compute_size_in_bytes(stride, index)
.ok_or_else(|| err_ub!(PointerArithOverflow))?;
// All fields have the same layout. // All fields have the same layout.
let field_layout = base.layout().field(self, 0); let field_layout = base.layout().field(self, 0);
(offset, field_layout) (offset, field_layout)

View file

@ -534,8 +534,11 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
let dest_place = self.allocate_dyn(layout, MemoryKind::Stack, meta)?; let dest_place = self.allocate_dyn(layout, MemoryKind::Stack, meta)?;
Operand::Indirect(*dest_place.mplace()) Operand::Indirect(*dest_place.mplace())
} else { } else {
assert!(!meta.has_meta()); // we're dropping the metadata
// Just make this an efficient immediate. // Just make this an efficient immediate.
assert!(!meta.has_meta()); // we're dropping the metadata
// Make sure the machine knows this "write" is happening. (This is important so that
// races involving local variable allocation can be detected by Miri.)
M::after_local_write(self, local, /*storage_live*/ true)?;
// Note that not calling `layout_of` here does have one real consequence: // Note that not calling `layout_of` here does have one real consequence:
// if the type is too big, we'll only notice this when the local is actually initialized, // if the type is too big, we'll only notice this when the local is actually initialized,
// which is a bit too late -- we should ideally notice this already here, when the memory // which is a bit too late -- we should ideally notice this already here, when the memory

View file

@ -255,11 +255,9 @@ impl<'tcx> Visitor<'tcx> for LintLevelsBuilder<'_, LintLevelQueryMap<'tcx>> {
intravisit::walk_foreign_item(self, it); intravisit::walk_foreign_item(self, it);
} }
fn visit_stmt(&mut self, e: &'tcx hir::Stmt<'tcx>) { fn visit_stmt(&mut self, s: &'tcx hir::Stmt<'tcx>) {
// We will call `add_id` when we walk self.add_id(s.hir_id);
// the `StmtKind`. The outer statement itself doesn't intravisit::walk_stmt(self, s);
// define the lint levels.
intravisit::walk_stmt(self, e);
} }
fn visit_expr(&mut self, e: &'tcx hir::Expr<'tcx>) { fn visit_expr(&mut self, e: &'tcx hir::Expr<'tcx>) {

View file

@ -942,3 +942,6 @@
# Copy the linker, DLLs, and various libraries from MinGW into the Rust toolchain. # Copy the linker, DLLs, and various libraries from MinGW into the Rust toolchain.
# Only applies when the host or target is pc-windows-gnu. # Only applies when the host or target is pc-windows-gnu.
#include-mingw-linker = true #include-mingw-linker = true
# Whether to vendor dependencies for the dist tarball.
#vendor = if "is a tarball source" || "is a git repository" { true } else { false }

View file

@ -660,6 +660,56 @@ impl String {
Cow::Owned(res) Cow::Owned(res)
} }
/// Converts a [`Vec<u8>`] to a `String`, substituting invalid UTF-8
/// sequences with replacement characters.
///
/// See [`from_utf8_lossy`] for more details.
///
/// [`from_utf8_lossy`]: String::from_utf8_lossy
///
/// Note that this function does not guarantee reuse of the original `Vec`
/// allocation.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// #![feature(string_from_utf8_lossy_owned)]
/// // some bytes, in a vector
/// let sparkle_heart = vec![240, 159, 146, 150];
///
/// let sparkle_heart = String::from_utf8_lossy_owned(sparkle_heart);
///
/// assert_eq!(String::from("💖"), sparkle_heart);
/// ```
///
/// Incorrect bytes:
///
/// ```
/// #![feature(string_from_utf8_lossy_owned)]
/// // some invalid bytes
/// let input: Vec<u8> = b"Hello \xF0\x90\x80World".into();
/// let output = String::from_utf8_lossy_owned(input);
///
/// assert_eq!(String::from("Hello <20>World"), output);
/// ```
#[must_use]
#[cfg(not(no_global_oom_handling))]
#[unstable(feature = "string_from_utf8_lossy_owned", issue = "129436")]
pub fn from_utf8_lossy_owned(v: Vec<u8>) -> String {
if let Cow::Owned(string) = String::from_utf8_lossy(&v) {
string
} else {
// SAFETY: `String::from_utf8_lossy`'s contract ensures that if
// it returns a `Cow::Borrowed`, it is a valid UTF-8 string.
// Otherwise, it returns a new allocation of an owned `String`, with
// replacement characters for invalid sequences, which is returned
// above.
unsafe { String::from_utf8_unchecked(v) }
}
}
/// Decode a UTF-16encoded vector `v` into a `String`, returning [`Err`] /// Decode a UTF-16encoded vector `v` into a `String`, returning [`Err`]
/// if `v` contains any invalid data. /// if `v` contains any invalid data.
/// ///
@ -2010,6 +2060,30 @@ impl FromUtf8Error {
&self.bytes[..] &self.bytes[..]
} }
/// Converts the bytes into a `String` lossily, substituting invalid UTF-8
/// sequences with replacement characters.
///
/// See [`String::from_utf8_lossy`] for more details on replacement of
/// invalid sequences, and [`String::from_utf8_lossy_owned`] for the
/// `String` function which corresponds to this function.
///
/// # Examples
///
/// ```
/// #![feature(string_from_utf8_lossy_owned)]
/// // some invalid bytes
/// let input: Vec<u8> = b"Hello \xF0\x90\x80World".into();
/// let output = String::from_utf8(input).unwrap_or_else(|e| e.into_utf8_lossy());
///
/// assert_eq!(String::from("Hello <20>World"), output);
/// ```
#[must_use]
#[cfg(not(no_global_oom_handling))]
#[unstable(feature = "string_from_utf8_lossy_owned", issue = "129436")]
pub fn into_utf8_lossy(self) -> String {
String::from_utf8_lossy_owned(self.bytes)
}
/// Returns the bytes that were attempted to convert to a `String`. /// Returns the bytes that were attempted to convert to a `String`.
/// ///
/// This method is carefully constructed to avoid allocation. It will /// This method is carefully constructed to avoid allocation. It will

View file

@ -1011,11 +1011,7 @@ impl Step for PlainSourceTarball {
write_git_info(builder.rust_info().info(), plain_dst_src); write_git_info(builder.rust_info().info(), plain_dst_src);
write_git_info(builder.cargo_info.info(), &plain_dst_src.join("./src/tools/cargo")); write_git_info(builder.cargo_info.info(), &plain_dst_src.join("./src/tools/cargo"));
// If we're building from git or tarball sources, we need to vendor if builder.config.dist_vendor {
// a complete distribution.
if builder.rust_info().is_managed_git_subrepository()
|| builder.rust_info().is_from_tarball()
{
builder.require_and_update_all_submodules(); builder.require_and_update_all_submodules();
// Vendor all Cargo dependencies // Vendor all Cargo dependencies

View file

@ -308,6 +308,7 @@ pub struct Config {
pub dist_compression_formats: Option<Vec<String>>, pub dist_compression_formats: Option<Vec<String>>,
pub dist_compression_profile: String, pub dist_compression_profile: String,
pub dist_include_mingw_linker: bool, pub dist_include_mingw_linker: bool,
pub dist_vendor: bool,
// libstd features // libstd features
pub backtrace: bool, // support for RUST_BACKTRACE pub backtrace: bool, // support for RUST_BACKTRACE
@ -933,6 +934,7 @@ define_config! {
compression_formats: Option<Vec<String>> = "compression-formats", compression_formats: Option<Vec<String>> = "compression-formats",
compression_profile: Option<String> = "compression-profile", compression_profile: Option<String> = "compression-profile",
include_mingw_linker: Option<bool> = "include-mingw-linker", include_mingw_linker: Option<bool> = "include-mingw-linker",
vendor: Option<bool> = "vendor",
} }
} }
@ -2028,13 +2030,19 @@ impl Config {
compression_formats, compression_formats,
compression_profile, compression_profile,
include_mingw_linker, include_mingw_linker,
vendor,
} = dist; } = dist;
config.dist_sign_folder = sign_folder.map(PathBuf::from); config.dist_sign_folder = sign_folder.map(PathBuf::from);
config.dist_upload_addr = upload_addr; config.dist_upload_addr = upload_addr;
config.dist_compression_formats = compression_formats; config.dist_compression_formats = compression_formats;
set(&mut config.dist_compression_profile, compression_profile); set(&mut config.dist_compression_profile, compression_profile);
set(&mut config.rust_dist_src, src_tarball); set(&mut config.rust_dist_src, src_tarball);
set(&mut config.dist_include_mingw_linker, include_mingw_linker) set(&mut config.dist_include_mingw_linker, include_mingw_linker);
config.dist_vendor = vendor.unwrap_or_else(|| {
// If we're building from git or tarball sources, enable it by default.
config.rust_info.is_managed_git_subrepository()
|| config.rust_info.is_from_tarball()
});
} }
if let Some(r) = rustfmt { if let Some(r) = rustfmt {

View file

@ -260,4 +260,9 @@ pub const CONFIG_CHANGE_HISTORY: &[ChangeInfo] = &[
severity: ChangeSeverity::Info, severity: ChangeSeverity::Info,
summary: "'tools' and 'library' profiles switched `download-ci-llvm` option from `if-unchanged` to `true`.", summary: "'tools' and 'library' profiles switched `download-ci-llvm` option from `if-unchanged` to `true`.",
}, },
ChangeInfo {
change_id: 130110,
severity: ChangeSeverity::Info,
summary: "New option `dist.vendor` added to control whether bootstrap should vendor dependencies for dist tarball.",
},
]; ];

View file

@ -13,6 +13,14 @@ error: this lint expectation is unfulfilled
LL | #[expect(invalid_nan_comparisons)] LL | #[expect(invalid_nan_comparisons)]
| ^^^^^^^^^^^^^^^^^^^^^^^ | ^^^^^^^^^^^^^^^^^^^^^^^
error: this lint expectation is unfulfilled
--> tests/ui/expect_tool_lint_rfc_2383.rs:36:18
|
LL | #[expect(invalid_nan_comparisons)]
| ^^^^^^^^^^^^^^^^^^^^^^^
|
= note: duplicate diagnostic emitted due to `-Z deduplicate-diagnostics=no`
error: this lint expectation is unfulfilled error: this lint expectation is unfulfilled
--> tests/ui/expect_tool_lint_rfc_2383.rs:107:14 --> tests/ui/expect_tool_lint_rfc_2383.rs:107:14
| |
@ -37,5 +45,5 @@ error: this lint expectation is unfulfilled
LL | #[expect(clippy::overly_complex_bool_expr)] LL | #[expect(clippy::overly_complex_bool_expr)]
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
error: aborting due to 6 previous errors error: aborting due to 7 previous errors

View file

@ -47,6 +47,7 @@ use std::{
}; };
use rustc_ast::Mutability; use rustc_ast::Mutability;
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::fx::FxHashSet; use rustc_data_structures::fx::FxHashSet;
use rustc_index::{Idx, IndexVec}; use rustc_index::{Idx, IndexVec};
use rustc_middle::{mir, ty::Ty}; use rustc_middle::{mir, ty::Ty};
@ -1047,32 +1048,31 @@ impl VClockAlloc {
) -> InterpResult<'tcx> { ) -> InterpResult<'tcx> {
let current_span = machine.current_span(); let current_span = machine.current_span();
let global = machine.data_race.as_ref().unwrap(); let global = machine.data_race.as_ref().unwrap();
if global.race_detecting() { if !global.race_detecting() {
let (index, mut thread_clocks) = global.active_thread_state_mut(&machine.threads); return Ok(());
let mut alloc_ranges = self.alloc_ranges.borrow_mut();
for (mem_clocks_range, mem_clocks) in
alloc_ranges.iter_mut(access_range.start, access_range.size)
{
if let Err(DataRace) =
mem_clocks.read_race_detect(&mut thread_clocks, index, read_type, current_span)
{
drop(thread_clocks);
// Report data-race.
return Self::report_data_race(
global,
&machine.threads,
mem_clocks,
AccessType::NaRead(read_type),
access_range.size,
interpret::Pointer::new(alloc_id, Size::from_bytes(mem_clocks_range.start)),
ty,
);
}
}
Ok(())
} else {
Ok(())
} }
let (index, mut thread_clocks) = global.active_thread_state_mut(&machine.threads);
let mut alloc_ranges = self.alloc_ranges.borrow_mut();
for (mem_clocks_range, mem_clocks) in
alloc_ranges.iter_mut(access_range.start, access_range.size)
{
if let Err(DataRace) =
mem_clocks.read_race_detect(&mut thread_clocks, index, read_type, current_span)
{
drop(thread_clocks);
// Report data-race.
return Self::report_data_race(
global,
&machine.threads,
mem_clocks,
AccessType::NaRead(read_type),
access_range.size,
interpret::Pointer::new(alloc_id, Size::from_bytes(mem_clocks_range.start)),
ty,
);
}
}
Ok(())
} }
/// Detect data-races for an unsynchronized write operation. It will not perform /// Detect data-races for an unsynchronized write operation. It will not perform
@ -1090,33 +1090,129 @@ impl VClockAlloc {
) -> InterpResult<'tcx> { ) -> InterpResult<'tcx> {
let current_span = machine.current_span(); let current_span = machine.current_span();
let global = machine.data_race.as_mut().unwrap(); let global = machine.data_race.as_mut().unwrap();
if global.race_detecting() { if !global.race_detecting() {
let (index, mut thread_clocks) = global.active_thread_state_mut(&machine.threads); return Ok(());
for (mem_clocks_range, mem_clocks) in }
self.alloc_ranges.get_mut().iter_mut(access_range.start, access_range.size) let (index, mut thread_clocks) = global.active_thread_state_mut(&machine.threads);
for (mem_clocks_range, mem_clocks) in
self.alloc_ranges.get_mut().iter_mut(access_range.start, access_range.size)
{
if let Err(DataRace) =
mem_clocks.write_race_detect(&mut thread_clocks, index, write_type, current_span)
{ {
if let Err(DataRace) = mem_clocks.write_race_detect( drop(thread_clocks);
&mut thread_clocks, // Report data-race
index, return Self::report_data_race(
write_type, global,
current_span, &machine.threads,
) { mem_clocks,
drop(thread_clocks); AccessType::NaWrite(write_type),
// Report data-race access_range.size,
return Self::report_data_race( interpret::Pointer::new(alloc_id, Size::from_bytes(mem_clocks_range.start)),
global, ty,
&machine.threads, );
mem_clocks,
AccessType::NaWrite(write_type),
access_range.size,
interpret::Pointer::new(alloc_id, Size::from_bytes(mem_clocks_range.start)),
ty,
);
}
} }
Ok(()) }
Ok(())
}
}
/// Vector clock state for a stack frame (tracking the local variables
/// that do not have an allocation yet).
#[derive(Debug, Default)]
pub struct FrameState {
local_clocks: RefCell<FxHashMap<mir::Local, LocalClocks>>,
}
/// Stripped-down version of [`MemoryCellClocks`] for the clocks we need to keep track
/// of in a local that does not yet have addressable memory -- and hence can only
/// be accessed from the thread its stack frame belongs to, and cannot be access atomically.
#[derive(Debug)]
struct LocalClocks {
write: VTimestamp,
write_type: NaWriteType,
read: VTimestamp,
}
impl Default for LocalClocks {
fn default() -> Self {
Self { write: VTimestamp::ZERO, write_type: NaWriteType::Allocate, read: VTimestamp::ZERO }
}
}
impl FrameState {
pub fn local_write(&self, local: mir::Local, storage_live: bool, machine: &MiriMachine<'_>) {
let current_span = machine.current_span();
let global = machine.data_race.as_ref().unwrap();
if !global.race_detecting() {
return;
}
let (index, mut thread_clocks) = global.active_thread_state_mut(&machine.threads);
// This should do the same things as `MemoryCellClocks::write_race_detect`.
if !current_span.is_dummy() {
thread_clocks.clock.index_mut(index).span = current_span;
}
let mut clocks = self.local_clocks.borrow_mut();
if storage_live {
let new_clocks = LocalClocks {
write: thread_clocks.clock[index],
write_type: NaWriteType::Allocate,
read: VTimestamp::ZERO,
};
// There might already be an entry in the map for this, if the local was previously
// live already.
clocks.insert(local, new_clocks);
} else { } else {
Ok(()) // This can fail to exist if `race_detecting` was false when the allocation
// occurred, in which case we can backdate this to the beginning of time.
let clocks = clocks.entry(local).or_insert_with(Default::default);
clocks.write = thread_clocks.clock[index];
clocks.write_type = NaWriteType::Write;
}
}
pub fn local_read(&self, local: mir::Local, machine: &MiriMachine<'_>) {
let current_span = machine.current_span();
let global = machine.data_race.as_ref().unwrap();
if !global.race_detecting() {
return;
}
let (index, mut thread_clocks) = global.active_thread_state_mut(&machine.threads);
// This should do the same things as `MemoryCellClocks::read_race_detect`.
if !current_span.is_dummy() {
thread_clocks.clock.index_mut(index).span = current_span;
}
thread_clocks.clock.index_mut(index).set_read_type(NaReadType::Read);
// This can fail to exist if `race_detecting` was false when the allocation
// occurred, in which case we can backdate this to the beginning of time.
let mut clocks = self.local_clocks.borrow_mut();
let clocks = clocks.entry(local).or_insert_with(Default::default);
clocks.read = thread_clocks.clock[index];
}
pub fn local_moved_to_memory(
&self,
local: mir::Local,
alloc: &mut VClockAlloc,
machine: &MiriMachine<'_>,
) {
let global = machine.data_race.as_ref().unwrap();
if !global.race_detecting() {
return;
}
let (index, _thread_clocks) = global.active_thread_state_mut(&machine.threads);
// Get the time the last write actually happened. This can fail to exist if
// `race_detecting` was false when the write occurred, in that case we can backdate this
// to the beginning of time.
let local_clocks = self.local_clocks.borrow_mut().remove(&local).unwrap_or_default();
for (_mem_clocks_range, mem_clocks) in alloc.alloc_ranges.get_mut().iter_mut_all() {
// The initialization write for this already happened, just at the wrong timestamp.
// Check that the thread index matches what we expect.
assert_eq!(mem_clocks.write.0, index);
// Convert the local's clocks into memory clocks.
mem_clocks.write = (index, local_clocks.write);
mem_clocks.write_type = local_clocks.write_type;
mem_clocks.read = VClock::new_with_index(index, local_clocks.read);
} }
} }
} }
@ -1305,69 +1401,67 @@ trait EvalContextPrivExt<'tcx>: MiriInterpCxExt<'tcx> {
) -> InterpResult<'tcx> { ) -> InterpResult<'tcx> {
let this = self.eval_context_ref(); let this = self.eval_context_ref();
assert!(access.is_atomic()); assert!(access.is_atomic());
if let Some(data_race) = &this.machine.data_race { let Some(data_race) = &this.machine.data_race else { return Ok(()) };
if data_race.race_detecting() { if !data_race.race_detecting() {
let size = place.layout.size; return Ok(());
let (alloc_id, base_offset, _prov) = this.ptr_get_alloc_id(place.ptr(), 0)?; }
// Load and log the atomic operation. let size = place.layout.size;
// Note that atomic loads are possible even from read-only allocations, so `get_alloc_extra_mut` is not an option. let (alloc_id, base_offset, _prov) = this.ptr_get_alloc_id(place.ptr(), 0)?;
let alloc_meta = this.get_alloc_extra(alloc_id)?.data_race.as_ref().unwrap(); // Load and log the atomic operation.
trace!( // Note that atomic loads are possible even from read-only allocations, so `get_alloc_extra_mut` is not an option.
"Atomic op({}) with ordering {:?} on {:?} (size={})", let alloc_meta = this.get_alloc_extra(alloc_id)?.data_race.as_ref().unwrap();
access.description(None, None), trace!(
&atomic, "Atomic op({}) with ordering {:?} on {:?} (size={})",
place.ptr(), access.description(None, None),
size.bytes() &atomic,
); place.ptr(),
size.bytes()
);
let current_span = this.machine.current_span(); let current_span = this.machine.current_span();
// Perform the atomic operation. // Perform the atomic operation.
data_race.maybe_perform_sync_operation( data_race.maybe_perform_sync_operation(
&this.machine.threads, &this.machine.threads,
current_span, current_span,
|index, mut thread_clocks| { |index, mut thread_clocks| {
for (mem_clocks_range, mem_clocks) in for (mem_clocks_range, mem_clocks) in
alloc_meta.alloc_ranges.borrow_mut().iter_mut(base_offset, size) alloc_meta.alloc_ranges.borrow_mut().iter_mut(base_offset, size)
{ {
if let Err(DataRace) = op(mem_clocks, &mut thread_clocks, index, atomic) if let Err(DataRace) = op(mem_clocks, &mut thread_clocks, index, atomic) {
{ mem::drop(thread_clocks);
mem::drop(thread_clocks); return VClockAlloc::report_data_race(
return VClockAlloc::report_data_race( data_race,
data_race, &this.machine.threads,
&this.machine.threads, mem_clocks,
mem_clocks, access,
access, place.layout.size,
place.layout.size, interpret::Pointer::new(
interpret::Pointer::new( alloc_id,
alloc_id, Size::from_bytes(mem_clocks_range.start),
Size::from_bytes(mem_clocks_range.start), ),
), None,
None, )
) .map(|_| true);
.map(|_| true);
}
}
// This conservatively assumes all operations have release semantics
Ok(true)
},
)?;
// Log changes to atomic memory.
if tracing::enabled!(tracing::Level::TRACE) {
for (_offset, mem_clocks) in
alloc_meta.alloc_ranges.borrow().iter(base_offset, size)
{
trace!(
"Updated atomic memory({:?}, size={}) to {:#?}",
place.ptr(),
size.bytes(),
mem_clocks.atomic_ops
);
} }
} }
// This conservatively assumes all operations have release semantics
Ok(true)
},
)?;
// Log changes to atomic memory.
if tracing::enabled!(tracing::Level::TRACE) {
for (_offset, mem_clocks) in alloc_meta.alloc_ranges.borrow().iter(base_offset, size) {
trace!(
"Updated atomic memory({:?}, size={}) to {:#?}",
place.ptr(),
size.bytes(),
mem_clocks.atomic_ops
);
} }
} }
Ok(()) Ok(())
} }
} }

View file

@ -530,7 +530,9 @@ impl<'tcx> ThreadManager<'tcx> {
} }
/// Mutably borrow the stack of the active thread. /// Mutably borrow the stack of the active thread.
fn active_thread_stack_mut(&mut self) -> &mut Vec<Frame<'tcx, Provenance, FrameExtra<'tcx>>> { pub fn active_thread_stack_mut(
&mut self,
) -> &mut Vec<Frame<'tcx, Provenance, FrameExtra<'tcx>>> {
&mut self.threads[self.active_thread].stack &mut self.threads[self.active_thread].stack
} }
pub fn all_stacks( pub fn all_stacks(
@ -898,7 +900,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
// This allocation will be deallocated when the thread dies, so it is not in read-only memory. // This allocation will be deallocated when the thread dies, so it is not in read-only memory.
alloc.mutability = Mutability::Mut; alloc.mutability = Mutability::Mut;
// Create a fresh allocation with this content. // Create a fresh allocation with this content.
let ptr = this.allocate_raw_ptr(alloc, MiriMemoryKind::Tls.into())?; let ptr = this.insert_allocation(alloc, MiriMemoryKind::Tls.into())?;
this.machine.threads.set_thread_local_alloc(def_id, ptr); this.machine.threads.set_thread_local_alloc(def_id, ptr);
Ok(ptr) Ok(ptr)
} }

View file

@ -130,6 +130,9 @@ impl Ord for VTimestamp {
/// also this means that there is only one unique valid length /// also this means that there is only one unique valid length
/// for each set of vector clock values and hence the PartialEq /// for each set of vector clock values and hence the PartialEq
/// and Eq derivations are correct. /// and Eq derivations are correct.
///
/// This means we cannot represent a clock where the last entry is a timestamp-0 read that occurs
/// because of a retag. That's fine, all it does is risk wrong diagnostics in a extreme corner case.
#[derive(PartialEq, Eq, Default, Debug)] #[derive(PartialEq, Eq, Default, Debug)]
pub struct VClock(SmallVec<[VTimestamp; SMALL_VECTOR]>); pub struct VClock(SmallVec<[VTimestamp; SMALL_VECTOR]>);
@ -137,6 +140,9 @@ impl VClock {
/// Create a new vector-clock containing all zeros except /// Create a new vector-clock containing all zeros except
/// for a value at the given index /// for a value at the given index
pub(super) fn new_with_index(index: VectorIdx, timestamp: VTimestamp) -> VClock { pub(super) fn new_with_index(index: VectorIdx, timestamp: VTimestamp) -> VClock {
if timestamp.time() == 0 {
return VClock::default();
}
let len = index.index() + 1; let len = index.index() + 1;
let mut vec = smallvec::smallvec![VTimestamp::ZERO; len]; let mut vec = smallvec::smallvec![VTimestamp::ZERO; len];
vec[index.index()] = timestamp; vec[index.index()] = timestamp;

View file

@ -3,11 +3,8 @@
mod atomic; mod atomic;
mod simd; mod simd;
use std::iter;
use rand::Rng; use rand::Rng;
use rustc_apfloat::{Float, Round}; use rustc_apfloat::{Float, Round};
use rustc_middle::ty::layout::LayoutOf;
use rustc_middle::{ use rustc_middle::{
mir, mir,
ty::{self, FloatTy}, ty::{self, FloatTy},
@ -119,19 +116,9 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
this.copy_op(dest, &place)?; this.copy_op(dest, &place)?;
} }
"write_bytes" | "volatile_set_memory" => { "volatile_set_memory" => {
let [ptr, val_byte, count] = check_arg_count(args)?; let [ptr, val_byte, count] = check_arg_count(args)?;
let ty = ptr.layout.ty.builtin_deref(true).unwrap(); this.write_bytes_intrinsic(ptr, val_byte, count, "volatile_set_memory")?;
let ty_layout = this.layout_of(ty)?;
let val_byte = this.read_scalar(val_byte)?.to_u8()?;
let ptr = this.read_pointer(ptr)?;
let count = this.read_target_usize(count)?;
// `checked_mul` enforces a too small bound (the correct one would probably be target_isize_max),
// but no actual allocation can be big enough for the difference to be noticeable.
let byte_count = ty_layout.size.checked_mul(count, this).ok_or_else(|| {
err_ub_format!("overflow computing total size of `{intrinsic_name}`")
})?;
this.write_bytes_ptr(ptr, iter::repeat(val_byte).take(byte_count.bytes_usize()))?;
} }
// Memory model / provenance manipulation // Memory model / provenance manipulation

View file

@ -81,24 +81,42 @@ pub struct FrameExtra<'tcx> {
/// an additional bit of "salt" into the cache key. This salt is fixed per-frame /// an additional bit of "salt" into the cache key. This salt is fixed per-frame
/// so that within a call, a const will have a stable address. /// so that within a call, a const will have a stable address.
salt: usize, salt: usize,
/// Data race detector per-frame data.
pub data_race: Option<data_race::FrameState>,
} }
impl<'tcx> std::fmt::Debug for FrameExtra<'tcx> { impl<'tcx> std::fmt::Debug for FrameExtra<'tcx> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
// Omitting `timing`, it does not support `Debug`. // Omitting `timing`, it does not support `Debug`.
let FrameExtra { borrow_tracker, catch_unwind, timing: _, is_user_relevant: _, salt: _ } = let FrameExtra {
self; borrow_tracker,
catch_unwind,
timing: _,
is_user_relevant,
salt,
data_race,
} = self;
f.debug_struct("FrameData") f.debug_struct("FrameData")
.field("borrow_tracker", borrow_tracker) .field("borrow_tracker", borrow_tracker)
.field("catch_unwind", catch_unwind) .field("catch_unwind", catch_unwind)
.field("is_user_relevant", is_user_relevant)
.field("salt", salt)
.field("data_race", data_race)
.finish() .finish()
} }
} }
impl VisitProvenance for FrameExtra<'_> { impl VisitProvenance for FrameExtra<'_> {
fn visit_provenance(&self, visit: &mut VisitWith<'_>) { fn visit_provenance(&self, visit: &mut VisitWith<'_>) {
let FrameExtra { catch_unwind, borrow_tracker, timing: _, is_user_relevant: _, salt: _ } = let FrameExtra {
self; catch_unwind,
borrow_tracker,
timing: _,
is_user_relevant: _,
salt: _,
data_race: _,
} = self;
catch_unwind.visit_provenance(visit); catch_unwind.visit_provenance(visit);
borrow_tracker.visit_provenance(visit); borrow_tracker.visit_provenance(visit);
@ -1446,6 +1464,7 @@ impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> {
timing, timing,
is_user_relevant: ecx.machine.is_user_relevant(&frame), is_user_relevant: ecx.machine.is_user_relevant(&frame),
salt: ecx.machine.rng.borrow_mut().gen::<usize>() % ADDRS_PER_ANON_GLOBAL, salt: ecx.machine.rng.borrow_mut().gen::<usize>() % ADDRS_PER_ANON_GLOBAL,
data_race: ecx.machine.data_race.as_ref().map(|_| data_race::FrameState::default()),
}; };
Ok(frame.with_extra(extra)) Ok(frame.with_extra(extra))
@ -1551,7 +1570,25 @@ impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> {
res res
} }
fn after_local_allocated( fn after_local_read(ecx: &InterpCx<'tcx, Self>, local: mir::Local) -> InterpResult<'tcx> {
if let Some(data_race) = &ecx.frame().extra.data_race {
data_race.local_read(local, &ecx.machine);
}
Ok(())
}
fn after_local_write(
ecx: &mut InterpCx<'tcx, Self>,
local: mir::Local,
storage_live: bool,
) -> InterpResult<'tcx> {
if let Some(data_race) = &ecx.frame().extra.data_race {
data_race.local_write(local, storage_live, &ecx.machine);
}
Ok(())
}
fn after_local_moved_to_memory(
ecx: &mut InterpCx<'tcx, Self>, ecx: &mut InterpCx<'tcx, Self>,
local: mir::Local, local: mir::Local,
mplace: &MPlaceTy<'tcx>, mplace: &MPlaceTy<'tcx>,
@ -1559,9 +1596,17 @@ impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> {
let Some(Provenance::Concrete { alloc_id, .. }) = mplace.ptr().provenance else { let Some(Provenance::Concrete { alloc_id, .. }) = mplace.ptr().provenance else {
panic!("after_local_allocated should only be called on fresh allocations"); panic!("after_local_allocated should only be called on fresh allocations");
}; };
// Record the span where this was allocated: the declaration of the local.
let local_decl = &ecx.frame().body().local_decls[local]; let local_decl = &ecx.frame().body().local_decls[local];
let span = local_decl.source_info.span; let span = local_decl.source_info.span;
ecx.machine.allocation_spans.borrow_mut().insert(alloc_id, (span, None)); ecx.machine.allocation_spans.borrow_mut().insert(alloc_id, (span, None));
// The data race system has to fix the clocks used for this write.
let (alloc_info, machine) = ecx.get_alloc_extra_mut(alloc_id)?;
if let Some(data_race) =
&machine.threads.active_thread_stack().last().unwrap().extra.data_race
{
data_race.local_moved_to_memory(local, alloc_info.data_race.as_mut().unwrap(), machine);
}
Ok(()) Ok(())
} }

View file

@ -196,7 +196,7 @@ trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
if size == 0 { if size == 0 {
throw_ub_format!("creating allocation with size 0"); throw_ub_format!("creating allocation with size 0");
} }
if i128::from(size) > this.tcx.data_layout.pointer_size.signed_int_max() { if size > this.max_size_of_val().bytes() {
throw_ub_format!("creating an allocation larger than half the address space"); throw_ub_format!("creating an allocation larger than half the address space");
} }
if let Err(e) = Align::from_bytes(align) { if let Err(e) = Align::from_bytes(align) {
@ -441,19 +441,34 @@ trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
"malloc" => { "malloc" => {
let [size] = this.check_shim(abi, Abi::C { unwind: false }, link_name, args)?; let [size] = this.check_shim(abi, Abi::C { unwind: false }, link_name, args)?;
let size = this.read_target_usize(size)?; let size = this.read_target_usize(size)?;
let res = this.malloc(size, /*zero_init:*/ false)?; if size <= this.max_size_of_val().bytes() {
this.write_pointer(res, dest)?; let res = this.malloc(size, /*zero_init:*/ false)?;
this.write_pointer(res, dest)?;
} else {
// If this does not fit in an isize, return null and, on Unix, set errno.
if this.target_os_is_unix() {
let einval = this.eval_libc("ENOMEM");
this.set_last_error(einval)?;
}
this.write_null(dest)?;
}
} }
"calloc" => { "calloc" => {
let [items, len] = let [items, elem_size] =
this.check_shim(abi, Abi::C { unwind: false }, link_name, args)?; this.check_shim(abi, Abi::C { unwind: false }, link_name, args)?;
let items = this.read_target_usize(items)?; let items = this.read_target_usize(items)?;
let len = this.read_target_usize(len)?; let elem_size = this.read_target_usize(elem_size)?;
let size = items if let Some(size) = this.compute_size_in_bytes(Size::from_bytes(elem_size), items) {
.checked_mul(len) let res = this.malloc(size.bytes(), /*zero_init:*/ true)?;
.ok_or_else(|| err_ub_format!("overflow during calloc size computation"))?; this.write_pointer(res, dest)?;
let res = this.malloc(size, /*zero_init:*/ true)?; } else {
this.write_pointer(res, dest)?; // On size overflow, return null and, on Unix, set errno.
if this.target_os_is_unix() {
let einval = this.eval_libc("ENOMEM");
this.set_last_error(einval)?;
}
this.write_null(dest)?;
}
} }
"free" => { "free" => {
let [ptr] = this.check_shim(abi, Abi::C { unwind: false }, link_name, args)?; let [ptr] = this.check_shim(abi, Abi::C { unwind: false }, link_name, args)?;
@ -465,8 +480,17 @@ trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
this.check_shim(abi, Abi::C { unwind: false }, link_name, args)?; this.check_shim(abi, Abi::C { unwind: false }, link_name, args)?;
let old_ptr = this.read_pointer(old_ptr)?; let old_ptr = this.read_pointer(old_ptr)?;
let new_size = this.read_target_usize(new_size)?; let new_size = this.read_target_usize(new_size)?;
let res = this.realloc(old_ptr, new_size)?; if new_size <= this.max_size_of_val().bytes() {
this.write_pointer(res, dest)?; let res = this.realloc(old_ptr, new_size)?;
this.write_pointer(res, dest)?;
} else {
// If this does not fit in an isize, return null and, on Unix, set errno.
if this.target_os_is_unix() {
let einval = this.eval_libc("ENOMEM");
this.set_last_error(einval)?;
}
this.write_null(dest)?;
}
} }
// Rust allocation // Rust allocation

View file

@ -363,14 +363,14 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
// //
// Linux: https://www.unix.com/man-page/linux/3/reallocarray/ // Linux: https://www.unix.com/man-page/linux/3/reallocarray/
// FreeBSD: https://man.freebsd.org/cgi/man.cgi?query=reallocarray // FreeBSD: https://man.freebsd.org/cgi/man.cgi?query=reallocarray
match nmemb.checked_mul(size) { match this.compute_size_in_bytes(Size::from_bytes(size), nmemb) {
None => { None => {
let einval = this.eval_libc("ENOMEM"); let einval = this.eval_libc("ENOMEM");
this.set_last_error(einval)?; this.set_last_error(einval)?;
this.write_null(dest)?; this.write_null(dest)?;
} }
Some(len) => { Some(len) => {
let res = this.realloc(ptr, len)?; let res = this.realloc(ptr, len.bytes())?;
this.write_pointer(res, dest)?; this.write_pointer(res, dest)?;
} }
} }

View file

@ -0,0 +1,57 @@
//@compile-flags: -Zmiri-preemption-rate=0.0 -Zmiri-disable-weak-memory-emulation
#![feature(core_intrinsics)]
#![feature(custom_mir)]
use std::intrinsics::mir::*;
use std::sync::atomic::Ordering::*;
use std::sync::atomic::*;
use std::thread::JoinHandle;
static P: AtomicPtr<u8> = AtomicPtr::new(core::ptr::null_mut());
fn spawn_thread() -> JoinHandle<()> {
std::thread::spawn(|| {
while P.load(Relaxed).is_null() {
std::hint::spin_loop();
}
unsafe {
// Initialize `*P`.
let ptr = P.load(Relaxed);
*ptr = 127;
//~^ ERROR: Data race detected between (1) creating a new allocation on thread `main` and (2) non-atomic write on thread `unnamed-1`
}
})
}
fn finish(t: JoinHandle<()>, val_ptr: *mut u8) {
P.store(val_ptr, Relaxed);
// Wait for the thread to be done.
t.join().unwrap();
// Read initialized value.
assert_eq!(unsafe { *val_ptr }, 127);
}
#[custom_mir(dialect = "runtime", phase = "optimized")]
fn main() {
mir! {
let t;
let val;
let val_ptr;
let _ret;
{
Call(t = spawn_thread(), ReturnTo(after_spawn), UnwindContinue())
}
after_spawn = {
// This races with the write in the other thread.
StorageLive(val);
val_ptr = &raw mut val;
Call(_ret = finish(t, val_ptr), ReturnTo(done), UnwindContinue())
}
done = {
Return()
}
}
}

View file

@ -0,0 +1,20 @@
error: Undefined Behavior: Data race detected between (1) creating a new allocation on thread `main` and (2) non-atomic write on thread `unnamed-ID` at ALLOC. (2) just happened here
--> $DIR/local_variable_alloc_race.rs:LL:CC
|
LL | *ptr = 127;
| ^^^^^^^^^^ Data race detected between (1) creating a new allocation on thread `main` and (2) non-atomic write on thread `unnamed-ID` at ALLOC. (2) just happened here
|
help: and (1) occurred earlier here
--> $DIR/local_variable_alloc_race.rs:LL:CC
|
LL | StorageLive(val);
| ^^^^^^^^^^^^^^^^
= help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
= help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information
= note: BACKTRACE (of the first span) on thread `unnamed-ID`:
= note: inside closure at $DIR/local_variable_alloc_race.rs:LL:CC
note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
error: aborting due to 1 previous error

View file

@ -0,0 +1,38 @@
//@compile-flags: -Zmiri-preemption-rate=0.0 -Zmiri-disable-weak-memory-emulation
use std::sync::atomic::Ordering::*;
use std::sync::atomic::*;
static P: AtomicPtr<u8> = AtomicPtr::new(core::ptr::null_mut());
fn main() {
// Create the local variable, and initialize it.
let mut val: u8 = 0;
let t1 = std::thread::spawn(|| {
while P.load(Relaxed).is_null() {
std::hint::spin_loop();
}
unsafe {
// Initialize `*P`.
let ptr = P.load(Relaxed);
*ptr = 127;
//~^ ERROR: Data race detected between (1) non-atomic read on thread `main` and (2) non-atomic write on thread `unnamed-1`
}
});
// This read is not ordered with the store above, and thus should be reported as a race.
let _val = val;
// Actually generate memory for the local variable.
// This is the time its value is actually written to memory.
// If we just "pre-date" the write to the beginning of time (since we don't know
// when it actually happened), we'd miss the UB in this test.
// Also, the UB error should point at the write above, not the addr-of here.
P.store(std::ptr::addr_of_mut!(val), Relaxed);
// Wait for the thread to be done.
t1.join().unwrap();
// Read initialized value.
assert_eq!(val, 127);
}

View file

@ -0,0 +1,20 @@
error: Undefined Behavior: Data race detected between (1) non-atomic read on thread `main` and (2) non-atomic write on thread `unnamed-ID` at ALLOC. (2) just happened here
--> $DIR/local_variable_read_race.rs:LL:CC
|
LL | *ptr = 127;
| ^^^^^^^^^^ Data race detected between (1) non-atomic read on thread `main` and (2) non-atomic write on thread `unnamed-ID` at ALLOC. (2) just happened here
|
help: and (1) occurred earlier here
--> $DIR/local_variable_read_race.rs:LL:CC
|
LL | let _val = val;
| ^^^
= help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
= help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information
= note: BACKTRACE (of the first span) on thread `unnamed-ID`:
= note: inside closure at $DIR/local_variable_read_race.rs:LL:CC
note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
error: aborting due to 1 previous error

View file

@ -0,0 +1,37 @@
//@compile-flags: -Zmiri-preemption-rate=0.0 -Zmiri-disable-weak-memory-emulation
use std::sync::atomic::Ordering::*;
use std::sync::atomic::*;
static P: AtomicPtr<u8> = AtomicPtr::new(core::ptr::null_mut());
fn main() {
let t1 = std::thread::spawn(|| {
while P.load(Relaxed).is_null() {
std::hint::spin_loop();
}
unsafe {
// Initialize `*P`.
let ptr = P.load(Relaxed);
*ptr = 127;
//~^ ERROR: Data race detected between (1) non-atomic write on thread `main` and (2) non-atomic write on thread `unnamed-1`
}
});
// Create the local variable, and initialize it.
// This is not ordered with the store above, so it's definitely UB
// for that thread to access this variable.
let mut val: u8 = 0;
// Actually generate memory for the local variable.
// This is the time its value is actually written to memory.
// If we just "pre-date" the write to the beginning of time (since we don't know
// when it actually happened), we'd miss the UB in this test.
// Also, the UB error should point at the write above, not the addr-of here.
P.store(std::ptr::addr_of_mut!(val), Relaxed);
// Wait for the thread to be done.
t1.join().unwrap();
// Read initialized value.
assert_eq!(val, 127);
}

View file

@ -0,0 +1,20 @@
error: Undefined Behavior: Data race detected between (1) non-atomic write on thread `main` and (2) non-atomic write on thread `unnamed-ID` at ALLOC. (2) just happened here
--> $DIR/local_variable_write_race.rs:LL:CC
|
LL | *ptr = 127;
| ^^^^^^^^^^ Data race detected between (1) non-atomic write on thread `main` and (2) non-atomic write on thread `unnamed-ID` at ALLOC. (2) just happened here
|
help: and (1) occurred earlier here
--> $DIR/local_variable_write_race.rs:LL:CC
|
LL | let mut val: u8 = 0;
| ^
= help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
= help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information
= note: BACKTRACE (of the first span) on thread `unnamed-ID`:
= note: inside closure at $DIR/local_variable_write_race.rs:LL:CC
note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
error: aborting due to 1 previous error

View file

@ -101,6 +101,10 @@ fn test_malloc() {
let slice = slice::from_raw_parts(p3 as *const u8, 20); let slice = slice::from_raw_parts(p3 as *const u8, 20);
assert_eq!(&slice, &[0_u8; 20]); assert_eq!(&slice, &[0_u8; 20]);
// new size way too big (so this doesn't actually realloc).
let p_too_big = libc::realloc(p3, usize::MAX);
assert!(p_too_big.is_null());
// old size > new size // old size > new size
let p4 = libc::realloc(p3, 10); let p4 = libc::realloc(p3, 10);
let slice = slice::from_raw_parts(p4 as *const u8, 10); let slice = slice::from_raw_parts(p4 as *const u8, 10);
@ -119,9 +123,13 @@ fn test_malloc() {
unsafe { unsafe {
let p1 = libc::realloc(ptr::null_mut(), 20); let p1 = libc::realloc(ptr::null_mut(), 20);
assert!(!p1.is_null()); assert!(!p1.is_null());
libc::free(p1); libc::free(p1);
} }
unsafe {
let p_too_big = libc::malloc(usize::MAX);
assert!(p_too_big.is_null());
}
} }
fn test_calloc() { fn test_calloc() {
@ -143,6 +151,9 @@ fn test_calloc() {
let slice = slice::from_raw_parts(p4 as *const u8, 4 * 8); let slice = slice::from_raw_parts(p4 as *const u8, 4 * 8);
assert_eq!(&slice, &[0_u8; 4 * 8]); assert_eq!(&slice, &[0_u8; 4 * 8]);
libc::free(p4); libc::free(p4);
let p_too_big = libc::calloc(usize::MAX / 4, 4);
assert!(p_too_big.is_null());
} }
} }

View file

@ -1,6 +1,6 @@
//@compile-flags: -Zmiri-disable-weak-memory-emulation -Zmiri-preemption-rate=0 //@compile-flags: -Zmiri-disable-weak-memory-emulation -Zmiri-preemption-rate=0
use std::sync::atomic::{fence, AtomicUsize, Ordering}; use std::sync::atomic::*;
use std::thread::spawn; use std::thread::spawn;
#[derive(Copy, Clone)] #[derive(Copy, Clone)]
@ -112,9 +112,41 @@ pub fn test_simple_release() {
} }
} }
fn test_local_variable_lazy_write() {
static P: AtomicPtr<u8> = AtomicPtr::new(core::ptr::null_mut());
// Create the local variable, and initialize it.
// This write happens before the thread is spanwed, so there is no data race.
let mut val: u8 = 0;
let t1 = std::thread::spawn(|| {
while P.load(Ordering::Relaxed).is_null() {
std::hint::spin_loop();
}
unsafe {
// Initialize `*P`.
let ptr = P.load(Ordering::Relaxed);
*ptr = 127;
}
});
// Actually generate memory for the local variable.
// This is the time its value is actually written to memory:
// that's *after* the thread above was spawned!
// This may hence look like a data race wrt the access in the thread above.
P.store(std::ptr::addr_of_mut!(val), Ordering::Relaxed);
// Wait for the thread to be done.
t1.join().unwrap();
// Read initialized value.
assert_eq!(val, 127);
}
pub fn main() { pub fn main() {
test_fence_sync(); test_fence_sync();
test_multiple_reads(); test_multiple_reads();
test_rmw_no_block(); test_rmw_no_block();
test_simple_release(); test_simple_release();
test_local_variable_lazy_write();
} }

View file

@ -0,0 +1,13 @@
const C: () = {
let value = [1, 2];
let ptr = value.as_ptr().wrapping_add(2);
let fat = std::ptr::slice_from_raw_parts(ptr, usize::MAX);
unsafe {
// This used to ICE, but it should just report UB.
let _ice = (*fat)[usize::MAX - 1];
//~^ERROR: constant value failed
//~| overflow
}
};
fn main() {}

View file

@ -0,0 +1,9 @@
error[E0080]: evaluation of constant value failed
--> $DIR/slice-index-overflow-issue-130284.rs:7:20
|
LL | let _ice = (*fat)[usize::MAX - 1];
| ^^^^^^^^^^^^^^^^^^^^^^ overflowing pointer arithmetic: the total offset in bytes does not fit in an `isize`
error: aborting due to 1 previous error
For more information about this error, try `rustc --explain E0080`.

View file

@ -0,0 +1,19 @@
// Regression test for issue #130142
// Checks that we emit no warnings when a lint's level
// is overridden by an expect or allow attr on a Stmt node
//@ check-pass
#[must_use]
pub fn must_use_result() -> i32 {
42
}
fn main() {
#[expect(unused_must_use)]
must_use_result();
#[allow(unused_must_use)]
must_use_result();
}

View file

@ -37,6 +37,8 @@ mod rustc_warn {
#[expect(invalid_nan_comparisons)] #[expect(invalid_nan_comparisons)]
//~^ WARNING this lint expectation is unfulfilled [unfulfilled_lint_expectations] //~^ WARNING this lint expectation is unfulfilled [unfulfilled_lint_expectations]
//~| WARNING this lint expectation is unfulfilled [unfulfilled_lint_expectations]
//~| NOTE duplicate diagnostic emitted due to `-Z deduplicate-diagnostics=no`
let _b = x == 5; let _b = x == 5;
} }
} }

View file

@ -12,5 +12,13 @@ warning: this lint expectation is unfulfilled
LL | #[expect(invalid_nan_comparisons)] LL | #[expect(invalid_nan_comparisons)]
| ^^^^^^^^^^^^^^^^^^^^^^^ | ^^^^^^^^^^^^^^^^^^^^^^^
warning: 2 warnings emitted warning: this lint expectation is unfulfilled
--> $DIR/expect_tool_lint_rfc_2383.rs:38:18
|
LL | #[expect(invalid_nan_comparisons)]
| ^^^^^^^^^^^^^^^^^^^^^^^
|
= note: duplicate diagnostic emitted due to `-Z deduplicate-diagnostics=no`
warning: 3 warnings emitted

View file

@ -16,15 +16,22 @@
pub fn normal_test_fn() { pub fn normal_test_fn() {
#[expect(unused_mut, reason = "this expectation will create a diagnostic with the default lint level")] #[expect(unused_mut, reason = "this expectation will create a diagnostic with the default lint level")]
//~^ WARNING this lint expectation is unfulfilled //~^ WARNING this lint expectation is unfulfilled
//~| WARNING this lint expectation is unfulfilled
//~| NOTE this expectation will create a diagnostic with the default lint level //~| NOTE this expectation will create a diagnostic with the default lint level
//~| NOTE this expectation will create a diagnostic with the default lint level
//~| NOTE duplicate diagnostic emitted due to `-Z deduplicate-diagnostics=no`
let mut v = vec![1, 1, 2, 3, 5]; let mut v = vec![1, 1, 2, 3, 5];
v.sort(); v.sort();
// Check that lint lists including `unfulfilled_lint_expectations` are also handled correctly // Check that lint lists including `unfulfilled_lint_expectations` are also handled correctly
#[expect(unused, unfulfilled_lint_expectations, reason = "the expectation for `unused` should be fulfilled")] #[expect(unused, unfulfilled_lint_expectations, reason = "the expectation for `unused` should be fulfilled")]
//~^ WARNING this lint expectation is unfulfilled //~^ WARNING this lint expectation is unfulfilled
//~| WARNING this lint expectation is unfulfilled
//~| NOTE the expectation for `unused` should be fulfilled
//~| NOTE the expectation for `unused` should be fulfilled //~| NOTE the expectation for `unused` should be fulfilled
//~| NOTE the `unfulfilled_lint_expectations` lint can't be expected and will always produce this message //~| NOTE the `unfulfilled_lint_expectations` lint can't be expected and will always produce this message
//~| NOTE the `unfulfilled_lint_expectations` lint can't be expected and will always produce this message
//~| NOTE duplicate diagnostic emitted due to `-Z deduplicate-diagnostics=no`
let value = "I'm unused"; let value = "I'm unused";
} }

View file

@ -26,7 +26,16 @@ LL | #[expect(unused_mut, reason = "this expectation will create a diagnosti
= note: this expectation will create a diagnostic with the default lint level = note: this expectation will create a diagnostic with the default lint level
warning: this lint expectation is unfulfilled warning: this lint expectation is unfulfilled
--> $DIR/expect_unfulfilled_expectation.rs:24:22 --> $DIR/expect_unfulfilled_expectation.rs:17:14
|
LL | #[expect(unused_mut, reason = "this expectation will create a diagnostic with the default lint level")]
| ^^^^^^^^^^
|
= note: this expectation will create a diagnostic with the default lint level
= note: duplicate diagnostic emitted due to `-Z deduplicate-diagnostics=no`
warning: this lint expectation is unfulfilled
--> $DIR/expect_unfulfilled_expectation.rs:27:22
| |
LL | #[expect(unused, unfulfilled_lint_expectations, reason = "the expectation for `unused` should be fulfilled")] LL | #[expect(unused, unfulfilled_lint_expectations, reason = "the expectation for `unused` should be fulfilled")]
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@ -34,5 +43,15 @@ LL | #[expect(unused, unfulfilled_lint_expectations, reason = "the expectati
= note: the expectation for `unused` should be fulfilled = note: the expectation for `unused` should be fulfilled
= note: the `unfulfilled_lint_expectations` lint can't be expected and will always produce this message = note: the `unfulfilled_lint_expectations` lint can't be expected and will always produce this message
warning: 4 warnings emitted warning: this lint expectation is unfulfilled
--> $DIR/expect_unfulfilled_expectation.rs:27:22
|
LL | #[expect(unused, unfulfilled_lint_expectations, reason = "the expectation for `unused` should be fulfilled")]
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
= note: the expectation for `unused` should be fulfilled
= note: the `unfulfilled_lint_expectations` lint can't be expected and will always produce this message
= note: duplicate diagnostic emitted due to `-Z deduplicate-diagnostics=no`
warning: 6 warnings emitted