Auto merge of #83698 - erikdesjardins:undefconst, r=RalfJung,oli-obk
Use undef for uninitialized bytes in constants Fixes #83657 This generates good code when the const is fully uninit, e.g. ```rust #[no_mangle] pub const fn fully_uninit() -> MaybeUninit<[u8; 10]> { const M: MaybeUninit<[u8; 10]> = MaybeUninit::uninit(); M } ``` generates ```asm fully_uninit: ret ``` as you would expect. There is no improvement, however, when it's partially uninit, e.g. ```rust pub struct PartiallyUninit { x: u64, y: MaybeUninit<[u8; 10]> } #[no_mangle] pub const fn partially_uninit() -> PartiallyUninit { const X: PartiallyUninit = PartiallyUninit { x: 0xdeadbeefcafe, y: MaybeUninit::uninit() }; X } ``` generates ```asm partially_uninit: mov rax, rdi mov rcx, qword ptr [rip + .L__unnamed_1+16] mov qword ptr [rdi + 16], rcx movups xmm0, xmmword ptr [rip + .L__unnamed_1] movups xmmword ptr [rdi], xmm0 ret .L__unnamed_1: .asciz "\376\312\357\276\255\336\000" .zero 16 .size .L__unnamed_1, 24 ``` which copies a bunch of zeros in place of the undef bytes, the same as before this change. Edit: generating partially-undef constants isn't viable at the moment anyways due to #84565, so it's disabled
This commit is contained in:
commit
20997f6ad8
9 changed files with 615 additions and 173 deletions
|
@ -11,7 +11,8 @@ use rustc_codegen_ssa::traits::*;
|
|||
use rustc_hir::def_id::DefId;
|
||||
use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrFlags, CodegenFnAttrs};
|
||||
use rustc_middle::mir::interpret::{
|
||||
read_target_uint, Allocation, ErrorHandled, GlobalAlloc, Pointer, Scalar as InterpScalar,
|
||||
read_target_uint, Allocation, ErrorHandled, GlobalAlloc, InitChunk, Pointer,
|
||||
Scalar as InterpScalar,
|
||||
};
|
||||
use rustc_middle::mir::mono::MonoItem;
|
||||
use rustc_middle::ty::{self, Instance, Ty};
|
||||
|
@ -19,6 +20,7 @@ use rustc_middle::{bug, span_bug};
|
|||
use rustc_target::abi::{
|
||||
AddressSpace, Align, HasDataLayout, LayoutOf, Primitive, Scalar, Size, WrappingRange,
|
||||
};
|
||||
use std::ops::Range;
|
||||
use tracing::debug;
|
||||
|
||||
pub fn const_alloc_to_llvm(cx: &CodegenCx<'ll, '_>, alloc: &Allocation) -> &'ll Value {
|
||||
|
@ -26,6 +28,57 @@ pub fn const_alloc_to_llvm(cx: &CodegenCx<'ll, '_>, alloc: &Allocation) -> &'ll
|
|||
let dl = cx.data_layout();
|
||||
let pointer_size = dl.pointer_size.bytes() as usize;
|
||||
|
||||
// Note: this function may call `inspect_with_uninit_and_ptr_outside_interpreter`,
|
||||
// so `range` must be within the bounds of `alloc` and not contain or overlap a relocation.
|
||||
fn append_chunks_of_init_and_uninit_bytes<'ll, 'a, 'b>(
|
||||
llvals: &mut Vec<&'ll Value>,
|
||||
cx: &'a CodegenCx<'ll, 'b>,
|
||||
alloc: &'a Allocation,
|
||||
range: Range<usize>,
|
||||
) {
|
||||
let mut chunks = alloc
|
||||
.init_mask()
|
||||
.range_as_init_chunks(Size::from_bytes(range.start), Size::from_bytes(range.end));
|
||||
|
||||
let chunk_to_llval = move |chunk| match chunk {
|
||||
InitChunk::Init(range) => {
|
||||
let range = (range.start.bytes() as usize)..(range.end.bytes() as usize);
|
||||
let bytes = alloc.inspect_with_uninit_and_ptr_outside_interpreter(range);
|
||||
cx.const_bytes(bytes)
|
||||
}
|
||||
InitChunk::Uninit(range) => {
|
||||
let len = range.end.bytes() - range.start.bytes();
|
||||
cx.const_undef(cx.type_array(cx.type_i8(), len))
|
||||
}
|
||||
};
|
||||
|
||||
// Generating partially-uninit consts inhibits optimizations, so it is disabled by default.
|
||||
// See https://github.com/rust-lang/rust/issues/84565.
|
||||
let allow_partially_uninit =
|
||||
match cx.sess().opts.debugging_opts.partially_uninit_const_threshold {
|
||||
Some(max) => range.len() <= max,
|
||||
None => false,
|
||||
};
|
||||
|
||||
if allow_partially_uninit {
|
||||
llvals.extend(chunks.map(chunk_to_llval));
|
||||
} else {
|
||||
let llval = match (chunks.next(), chunks.next()) {
|
||||
(Some(chunk), None) => {
|
||||
// exactly one chunk, either fully init or fully uninit
|
||||
chunk_to_llval(chunk)
|
||||
}
|
||||
_ => {
|
||||
// partially uninit, codegen as if it was initialized
|
||||
// (using some arbitrary value for uninit bytes)
|
||||
let bytes = alloc.inspect_with_uninit_and_ptr_outside_interpreter(range);
|
||||
cx.const_bytes(bytes)
|
||||
}
|
||||
};
|
||||
llvals.push(llval);
|
||||
}
|
||||
}
|
||||
|
||||
let mut next_offset = 0;
|
||||
for &(offset, alloc_id) in alloc.relocations().iter() {
|
||||
let offset = offset.bytes();
|
||||
|
@ -34,12 +87,8 @@ pub fn const_alloc_to_llvm(cx: &CodegenCx<'ll, '_>, alloc: &Allocation) -> &'ll
|
|||
if offset > next_offset {
|
||||
// This `inspect` is okay since we have checked that it is not within a relocation, it
|
||||
// is within the bounds of the allocation, and it doesn't affect interpreter execution
|
||||
// (we inspect the result after interpreter execution). Any undef byte is replaced with
|
||||
// some arbitrary byte value.
|
||||
//
|
||||
// FIXME: relay undef bytes to codegen as undef const bytes
|
||||
let bytes = alloc.inspect_with_uninit_and_ptr_outside_interpreter(next_offset..offset);
|
||||
llvals.push(cx.const_bytes(bytes));
|
||||
// (we inspect the result after interpreter execution).
|
||||
append_chunks_of_init_and_uninit_bytes(&mut llvals, cx, alloc, next_offset..offset);
|
||||
}
|
||||
let ptr_offset = read_target_uint(
|
||||
dl.endian,
|
||||
|
@ -70,12 +119,8 @@ pub fn const_alloc_to_llvm(cx: &CodegenCx<'ll, '_>, alloc: &Allocation) -> &'ll
|
|||
let range = next_offset..alloc.len();
|
||||
// This `inspect` is okay since we have check that it is after all relocations, it is
|
||||
// within the bounds of the allocation, and it doesn't affect interpreter execution (we
|
||||
// inspect the result after interpreter execution). Any undef byte is replaced with some
|
||||
// arbitrary byte value.
|
||||
//
|
||||
// FIXME: relay undef bytes to codegen as undef const bytes
|
||||
let bytes = alloc.inspect_with_uninit_and_ptr_outside_interpreter(range);
|
||||
llvals.push(cx.const_bytes(bytes));
|
||||
// inspect the result after interpreter execution).
|
||||
append_chunks_of_init_and_uninit_bytes(&mut llvals, cx, alloc, range);
|
||||
}
|
||||
|
||||
cx.const_struct(&llvals, true)
|
||||
|
|
|
@ -743,6 +743,7 @@ fn test_debugging_options_tracking_hash() {
|
|||
tracked!(no_profiler_runtime, true);
|
||||
tracked!(osx_rpath_install_name, true);
|
||||
tracked!(panic_abort_tests, true);
|
||||
tracked!(partially_uninit_const_threshold, Some(123));
|
||||
tracked!(plt, Some(true));
|
||||
tracked!(polonius, true);
|
||||
tracked!(precise_enum_drop_elaboration, false);
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
//! The virtual memory representation of the MIR interpreter.
|
||||
|
||||
use std::borrow::Cow;
|
||||
use std::convert::TryFrom;
|
||||
use std::convert::{TryFrom, TryInto};
|
||||
use std::iter;
|
||||
use std::ops::{Deref, Range};
|
||||
use std::ptr;
|
||||
|
@ -495,129 +495,6 @@ impl<Tag: Copy, Extra> Allocation<Tag, Extra> {
|
|||
}
|
||||
}
|
||||
|
||||
/// Uninitialized bytes.
|
||||
impl<Tag: Copy, Extra> Allocation<Tag, Extra> {
|
||||
/// Checks whether the given range is entirely initialized.
|
||||
///
|
||||
/// Returns `Ok(())` if it's initialized. Otherwise returns the range of byte
|
||||
/// indexes of the first contiguous uninitialized access.
|
||||
fn is_init(&self, range: AllocRange) -> Result<(), Range<Size>> {
|
||||
self.init_mask.is_range_initialized(range.start, range.end()) // `Size` addition
|
||||
}
|
||||
|
||||
/// Checks that a range of bytes is initialized. If not, returns the `InvalidUninitBytes`
|
||||
/// error which will report the first range of bytes which is uninitialized.
|
||||
fn check_init(&self, range: AllocRange) -> AllocResult {
|
||||
self.is_init(range).or_else(|idx_range| {
|
||||
Err(AllocError::InvalidUninitBytes(Some(UninitBytesAccess {
|
||||
access_offset: range.start,
|
||||
access_size: range.size,
|
||||
uninit_offset: idx_range.start,
|
||||
uninit_size: idx_range.end - idx_range.start, // `Size` subtraction
|
||||
})))
|
||||
})
|
||||
}
|
||||
|
||||
pub fn mark_init(&mut self, range: AllocRange, is_init: bool) {
|
||||
if range.size.bytes() == 0 {
|
||||
return;
|
||||
}
|
||||
assert!(self.mutability == Mutability::Mut);
|
||||
self.init_mask.set_range(range.start, range.end(), is_init);
|
||||
}
|
||||
}
|
||||
|
||||
/// Run-length encoding of the uninit mask.
|
||||
/// Used to copy parts of a mask multiple times to another allocation.
|
||||
pub struct InitMaskCompressed {
|
||||
/// Whether the first range is initialized.
|
||||
initial: bool,
|
||||
/// The lengths of ranges that are run-length encoded.
|
||||
/// The initialization state of the ranges alternate starting with `initial`.
|
||||
ranges: smallvec::SmallVec<[u64; 1]>,
|
||||
}
|
||||
|
||||
impl InitMaskCompressed {
|
||||
pub fn no_bytes_init(&self) -> bool {
|
||||
// The `ranges` are run-length encoded and of alternating initialization state.
|
||||
// So if `ranges.len() > 1` then the second block is an initialized range.
|
||||
!self.initial && self.ranges.len() == 1
|
||||
}
|
||||
}
|
||||
|
||||
/// Transferring the initialization mask to other allocations.
|
||||
impl<Tag, Extra> Allocation<Tag, Extra> {
|
||||
/// Creates a run-length encoding of the initialization mask.
|
||||
pub fn compress_uninit_range(&self, range: AllocRange) -> InitMaskCompressed {
|
||||
// Since we are copying `size` bytes from `src` to `dest + i * size` (`for i in 0..repeat`),
|
||||
// a naive initialization mask copying algorithm would repeatedly have to read the initialization mask from
|
||||
// the source and write it to the destination. Even if we optimized the memory accesses,
|
||||
// we'd be doing all of this `repeat` times.
|
||||
// Therefore we precompute a compressed version of the initialization mask of the source value and
|
||||
// then write it back `repeat` times without computing any more information from the source.
|
||||
|
||||
// A precomputed cache for ranges of initialized / uninitialized bits
|
||||
// 0000010010001110 will become
|
||||
// `[5, 1, 2, 1, 3, 3, 1]`,
|
||||
// where each element toggles the state.
|
||||
|
||||
let mut ranges = smallvec::SmallVec::<[u64; 1]>::new();
|
||||
let initial = self.init_mask.get(range.start);
|
||||
let mut cur_len = 1;
|
||||
let mut cur = initial;
|
||||
|
||||
for i in 1..range.size.bytes() {
|
||||
// FIXME: optimize to bitshift the current uninitialized block's bits and read the top bit.
|
||||
if self.init_mask.get(range.start + Size::from_bytes(i)) == cur {
|
||||
cur_len += 1;
|
||||
} else {
|
||||
ranges.push(cur_len);
|
||||
cur_len = 1;
|
||||
cur = !cur;
|
||||
}
|
||||
}
|
||||
|
||||
ranges.push(cur_len);
|
||||
|
||||
InitMaskCompressed { ranges, initial }
|
||||
}
|
||||
|
||||
/// Applies multiple instances of the run-length encoding to the initialization mask.
|
||||
pub fn mark_compressed_init_range(
|
||||
&mut self,
|
||||
defined: &InitMaskCompressed,
|
||||
range: AllocRange,
|
||||
repeat: u64,
|
||||
) {
|
||||
// An optimization where we can just overwrite an entire range of initialization
|
||||
// bits if they are going to be uniformly `1` or `0`.
|
||||
if defined.ranges.len() <= 1 {
|
||||
self.init_mask.set_range_inbounds(
|
||||
range.start,
|
||||
range.start + range.size * repeat, // `Size` operations
|
||||
defined.initial,
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
for mut j in 0..repeat {
|
||||
j *= range.size.bytes();
|
||||
j += range.start.bytes();
|
||||
let mut cur = defined.initial;
|
||||
for range in &defined.ranges {
|
||||
let old_j = j;
|
||||
j += range;
|
||||
self.init_mask.set_range_inbounds(
|
||||
Size::from_bytes(old_j),
|
||||
Size::from_bytes(j),
|
||||
cur,
|
||||
);
|
||||
cur = !cur;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// "Relocations" stores the provenance information of pointers stored in memory.
|
||||
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, TyEncodable, TyDecodable)]
|
||||
pub struct Relocations<Tag = AllocId>(SortedMap<Size, Tag>);
|
||||
|
@ -704,37 +581,30 @@ pub struct InitMask {
|
|||
impl InitMask {
|
||||
pub const BLOCK_SIZE: u64 = 64;
|
||||
|
||||
#[inline]
|
||||
fn bit_index(bits: Size) -> (usize, usize) {
|
||||
// BLOCK_SIZE is the number of bits that can fit in a `Block`.
|
||||
// Each bit in a `Block` represents the initialization state of one byte of an allocation,
|
||||
// so we use `.bytes()` here.
|
||||
let bits = bits.bytes();
|
||||
let a = bits / InitMask::BLOCK_SIZE;
|
||||
let b = bits % InitMask::BLOCK_SIZE;
|
||||
(usize::try_from(a).unwrap(), usize::try_from(b).unwrap())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn size_from_bit_index(block: impl TryInto<u64>, bit: impl TryInto<u64>) -> Size {
|
||||
let block = block.try_into().ok().unwrap();
|
||||
let bit = bit.try_into().ok().unwrap();
|
||||
Size::from_bytes(block * InitMask::BLOCK_SIZE + bit)
|
||||
}
|
||||
|
||||
pub fn new(size: Size, state: bool) -> Self {
|
||||
let mut m = InitMask { blocks: vec![], len: Size::ZERO };
|
||||
m.grow(size, state);
|
||||
m
|
||||
}
|
||||
|
||||
/// Checks whether the range `start..end` (end-exclusive) is entirely initialized.
|
||||
///
|
||||
/// Returns `Ok(())` if it's initialized. Otherwise returns a range of byte
|
||||
/// indexes for the first contiguous span of the uninitialized access.
|
||||
#[inline]
|
||||
pub fn is_range_initialized(&self, start: Size, end: Size) -> Result<(), Range<Size>> {
|
||||
if end > self.len {
|
||||
return Err(self.len..end);
|
||||
}
|
||||
|
||||
// FIXME(oli-obk): optimize this for allocations larger than a block.
|
||||
let idx = (start.bytes()..end.bytes()).map(Size::from_bytes).find(|&i| !self.get(i));
|
||||
|
||||
match idx {
|
||||
Some(idx) => {
|
||||
let uninit_end = (idx.bytes()..end.bytes())
|
||||
.map(Size::from_bytes)
|
||||
.find(|&i| self.get(i))
|
||||
.unwrap_or(end);
|
||||
Err(idx..uninit_end)
|
||||
}
|
||||
None => Ok(()),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_range(&mut self, start: Size, end: Size, new_state: bool) {
|
||||
let len = self.len;
|
||||
if end > len {
|
||||
|
@ -744,8 +614,8 @@ impl InitMask {
|
|||
}
|
||||
|
||||
pub fn set_range_inbounds(&mut self, start: Size, end: Size, new_state: bool) {
|
||||
let (blocka, bita) = bit_index(start);
|
||||
let (blockb, bitb) = bit_index(end);
|
||||
let (blocka, bita) = Self::bit_index(start);
|
||||
let (blockb, bitb) = Self::bit_index(end);
|
||||
if blocka == blockb {
|
||||
// First set all bits except the first `bita`,
|
||||
// then unset the last `64 - bitb` bits.
|
||||
|
@ -789,13 +659,13 @@ impl InitMask {
|
|||
|
||||
#[inline]
|
||||
pub fn get(&self, i: Size) -> bool {
|
||||
let (block, bit) = bit_index(i);
|
||||
let (block, bit) = Self::bit_index(i);
|
||||
(self.blocks[block] & (1 << bit)) != 0
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn set(&mut self, i: Size, new_state: bool) {
|
||||
let (block, bit) = bit_index(i);
|
||||
let (block, bit) = Self::bit_index(i);
|
||||
self.set_bit(block, bit, new_state);
|
||||
}
|
||||
|
||||
|
@ -825,12 +695,418 @@ impl InitMask {
|
|||
self.len += amount;
|
||||
self.set_range_inbounds(start, start + amount, new_state); // `Size` operation
|
||||
}
|
||||
|
||||
/// Returns the index of the first bit in `start..end` (end-exclusive) that is equal to is_init.
|
||||
fn find_bit(&self, start: Size, end: Size, is_init: bool) -> Option<Size> {
|
||||
/// A fast implementation of `find_bit`,
|
||||
/// which skips over an entire block at a time if it's all 0s (resp. 1s),
|
||||
/// and finds the first 1 (resp. 0) bit inside a block using `trailing_zeros` instead of a loop.
|
||||
///
|
||||
/// Note that all examples below are written with 8 (instead of 64) bit blocks for simplicity,
|
||||
/// and with the least significant bit (and lowest block) first:
|
||||
///
|
||||
/// 00000000|00000000
|
||||
/// ^ ^ ^ ^
|
||||
/// index: 0 7 8 15
|
||||
///
|
||||
/// Also, if not stated, assume that `is_init = true`, that is, we are searching for the first 1 bit.
|
||||
fn find_bit_fast(
|
||||
init_mask: &InitMask,
|
||||
start: Size,
|
||||
end: Size,
|
||||
is_init: bool,
|
||||
) -> Option<Size> {
|
||||
/// Search one block, returning the index of the first bit equal to `is_init`.
|
||||
fn search_block(
|
||||
bits: Block,
|
||||
block: usize,
|
||||
start_bit: usize,
|
||||
is_init: bool,
|
||||
) -> Option<Size> {
|
||||
// For the following examples, assume this function was called with:
|
||||
// bits = 0b00111011
|
||||
// start_bit = 3
|
||||
// is_init = false
|
||||
// Note that, for the examples in this function, the most significant bit is written first,
|
||||
// which is backwards compared to the comments in `find_bit`/`find_bit_fast`.
|
||||
|
||||
// Invert bits so we're always looking for the first set bit.
|
||||
// ! 0b00111011
|
||||
// bits = 0b11000100
|
||||
let bits = if is_init { bits } else { !bits };
|
||||
// Mask off unused start bits.
|
||||
// 0b11000100
|
||||
// & 0b11111000
|
||||
// bits = 0b11000000
|
||||
let bits = bits & (!0 << start_bit);
|
||||
// Find set bit, if any.
|
||||
// bit = trailing_zeros(0b11000000)
|
||||
// bit = 6
|
||||
if bits == 0 {
|
||||
None
|
||||
} else {
|
||||
let bit = bits.trailing_zeros();
|
||||
Some(InitMask::size_from_bit_index(block, bit))
|
||||
}
|
||||
}
|
||||
|
||||
if start >= end {
|
||||
return None;
|
||||
}
|
||||
|
||||
// Convert `start` and `end` to block indexes and bit indexes within each block.
|
||||
// We must convert `end` to an inclusive bound to handle block boundaries correctly.
|
||||
//
|
||||
// For example:
|
||||
//
|
||||
// (a) 00000000|00000000 (b) 00000000|
|
||||
// ^~~~~~~~~~~^ ^~~~~~~~~^
|
||||
// start end start end
|
||||
//
|
||||
// In both cases, the block index of `end` is 1.
|
||||
// But we do want to search block 1 in (a), and we don't in (b).
|
||||
//
|
||||
// We subtract 1 from both end positions to make them inclusive:
|
||||
//
|
||||
// (a) 00000000|00000000 (b) 00000000|
|
||||
// ^~~~~~~~~~^ ^~~~~~~^
|
||||
// start end_inclusive start end_inclusive
|
||||
//
|
||||
// For (a), the block index of `end_inclusive` is 1, and for (b), it's 0.
|
||||
// This provides the desired behavior of searching blocks 0 and 1 for (a),
|
||||
// and searching only block 0 for (b).
|
||||
// There is no concern of overflows since we checked for `start >= end` above.
|
||||
let (start_block, start_bit) = InitMask::bit_index(start);
|
||||
let end_inclusive = Size::from_bytes(end.bytes() - 1);
|
||||
let (end_block_inclusive, _) = InitMask::bit_index(end_inclusive);
|
||||
|
||||
// Handle first block: need to skip `start_bit` bits.
|
||||
//
|
||||
// We need to handle the first block separately,
|
||||
// because there may be bits earlier in the block that should be ignored,
|
||||
// such as the bit marked (1) in this example:
|
||||
//
|
||||
// (1)
|
||||
// -|------
|
||||
// (c) 01000000|00000000|00000001
|
||||
// ^~~~~~~~~~~~~~~~~~^
|
||||
// start end
|
||||
if let Some(i) =
|
||||
search_block(init_mask.blocks[start_block], start_block, start_bit, is_init)
|
||||
{
|
||||
// If the range is less than a block, we may find a matching bit after `end`.
|
||||
//
|
||||
// For example, we shouldn't successfully find bit (2), because it's after `end`:
|
||||
//
|
||||
// (2)
|
||||
// -------|
|
||||
// (d) 00000001|00000000|00000001
|
||||
// ^~~~~^
|
||||
// start end
|
||||
//
|
||||
// An alternative would be to mask off end bits in the same way as we do for start bits,
|
||||
// but performing this check afterwards is faster and simpler to implement.
|
||||
if i < end {
|
||||
return Some(i);
|
||||
} else {
|
||||
return None;
|
||||
}
|
||||
}
|
||||
|
||||
// Handle remaining blocks.
|
||||
//
|
||||
// We can skip over an entire block at once if it's all 0s (resp. 1s).
|
||||
// The block marked (3) in this example is the first block that will be handled by this loop,
|
||||
// and it will be skipped for that reason:
|
||||
//
|
||||
// (3)
|
||||
// --------
|
||||
// (e) 01000000|00000000|00000001
|
||||
// ^~~~~~~~~~~~~~~~~~^
|
||||
// start end
|
||||
if start_block < end_block_inclusive {
|
||||
// This loop is written in a specific way for performance.
|
||||
// Notably: `..end_block_inclusive + 1` is used for an inclusive range instead of `..=end_block_inclusive`,
|
||||
// and `.zip(start_block + 1..)` is used to track the index instead of `.enumerate().skip().take()`,
|
||||
// because both alternatives result in significantly worse codegen.
|
||||
// `end_block_inclusive + 1` is guaranteed not to wrap, because `end_block_inclusive <= end / BLOCK_SIZE`,
|
||||
// and `BLOCK_SIZE` (the number of bits per block) will always be at least 8 (1 byte).
|
||||
for (&bits, block) in init_mask.blocks[start_block + 1..end_block_inclusive + 1]
|
||||
.iter()
|
||||
.zip(start_block + 1..)
|
||||
{
|
||||
if let Some(i) = search_block(bits, block, 0, is_init) {
|
||||
// If this is the last block, we may find a matching bit after `end`.
|
||||
//
|
||||
// For example, we shouldn't successfully find bit (4), because it's after `end`:
|
||||
//
|
||||
// (4)
|
||||
// -------|
|
||||
// (f) 00000001|00000000|00000001
|
||||
// ^~~~~~~~~~~~~~~~~~^
|
||||
// start end
|
||||
//
|
||||
// As above with example (d), we could handle the end block separately and mask off end bits,
|
||||
// but unconditionally searching an entire block at once and performing this check afterwards
|
||||
// is faster and much simpler to implement.
|
||||
if i < end {
|
||||
return Some(i);
|
||||
} else {
|
||||
return None;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
#[cfg_attr(not(debug_assertions), allow(dead_code))]
|
||||
fn find_bit_slow(
|
||||
init_mask: &InitMask,
|
||||
start: Size,
|
||||
end: Size,
|
||||
is_init: bool,
|
||||
) -> Option<Size> {
|
||||
(start..end).find(|&i| init_mask.get(i) == is_init)
|
||||
}
|
||||
|
||||
let result = find_bit_fast(self, start, end, is_init);
|
||||
|
||||
debug_assert_eq!(
|
||||
result,
|
||||
find_bit_slow(self, start, end, is_init),
|
||||
"optimized implementation of find_bit is wrong for start={:?} end={:?} is_init={} init_mask={:#?}",
|
||||
start,
|
||||
end,
|
||||
is_init,
|
||||
self
|
||||
);
|
||||
|
||||
result
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn bit_index(bits: Size) -> (usize, usize) {
|
||||
let bits = bits.bytes();
|
||||
let a = bits / InitMask::BLOCK_SIZE;
|
||||
let b = bits % InitMask::BLOCK_SIZE;
|
||||
(usize::try_from(a).unwrap(), usize::try_from(b).unwrap())
|
||||
/// A contiguous chunk of initialized or uninitialized memory.
|
||||
pub enum InitChunk {
|
||||
Init(Range<Size>),
|
||||
Uninit(Range<Size>),
|
||||
}
|
||||
|
||||
impl InitChunk {
|
||||
#[inline]
|
||||
pub fn is_init(&self) -> bool {
|
||||
match self {
|
||||
Self::Init(_) => true,
|
||||
Self::Uninit(_) => false,
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn range(&self) -> Range<Size> {
|
||||
match self {
|
||||
Self::Init(r) => r.clone(),
|
||||
Self::Uninit(r) => r.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl InitMask {
|
||||
/// Checks whether the range `start..end` (end-exclusive) is entirely initialized.
|
||||
///
|
||||
/// Returns `Ok(())` if it's initialized. Otherwise returns a range of byte
|
||||
/// indexes for the first contiguous span of the uninitialized access.
|
||||
#[inline]
|
||||
pub fn is_range_initialized(&self, start: Size, end: Size) -> Result<(), Range<Size>> {
|
||||
if end > self.len {
|
||||
return Err(self.len..end);
|
||||
}
|
||||
|
||||
let uninit_start = self.find_bit(start, end, false);
|
||||
|
||||
match uninit_start {
|
||||
Some(uninit_start) => {
|
||||
let uninit_end = self.find_bit(uninit_start, end, true).unwrap_or(end);
|
||||
Err(uninit_start..uninit_end)
|
||||
}
|
||||
None => Ok(()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns an iterator, yielding a range of byte indexes for each contiguous region
|
||||
/// of initialized or uninitialized bytes inside the range `start..end` (end-exclusive).
|
||||
///
|
||||
/// The iterator guarantees the following:
|
||||
/// - Chunks are nonempty.
|
||||
/// - Chunks are adjacent (each range's start is equal to the previous range's end).
|
||||
/// - Chunks span exactly `start..end` (the first starts at `start`, the last ends at `end`).
|
||||
/// - Chunks alternate between [`InitChunk::Init`] and [`InitChunk::Uninit`].
|
||||
#[inline]
|
||||
pub fn range_as_init_chunks(&self, start: Size, end: Size) -> InitChunkIter<'_> {
|
||||
assert!(end <= self.len);
|
||||
|
||||
let is_init = if start < end {
|
||||
self.get(start)
|
||||
} else {
|
||||
// `start..end` is empty: there are no chunks, so use some arbitrary value
|
||||
false
|
||||
};
|
||||
|
||||
InitChunkIter { init_mask: self, is_init, start, end }
|
||||
}
|
||||
}
|
||||
|
||||
/// Yields [`InitChunk`]s. See [`InitMask::range_as_init_chunks`].
|
||||
pub struct InitChunkIter<'a> {
|
||||
init_mask: &'a InitMask,
|
||||
/// Whether the next chunk we will return is initialized.
|
||||
/// If there are no more chunks, contains some arbitrary value.
|
||||
is_init: bool,
|
||||
/// The current byte index into `init_mask`.
|
||||
start: Size,
|
||||
/// The end byte index into `init_mask`.
|
||||
end: Size,
|
||||
}
|
||||
|
||||
impl<'a> Iterator for InitChunkIter<'a> {
|
||||
type Item = InitChunk;
|
||||
|
||||
#[inline]
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
if self.start >= self.end {
|
||||
return None;
|
||||
}
|
||||
|
||||
let end_of_chunk =
|
||||
self.init_mask.find_bit(self.start, self.end, !self.is_init).unwrap_or(self.end);
|
||||
let range = self.start..end_of_chunk;
|
||||
|
||||
let ret =
|
||||
Some(if self.is_init { InitChunk::Init(range) } else { InitChunk::Uninit(range) });
|
||||
|
||||
self.is_init = !self.is_init;
|
||||
self.start = end_of_chunk;
|
||||
|
||||
ret
|
||||
}
|
||||
}
|
||||
|
||||
/// Uninitialized bytes.
|
||||
impl<Tag: Copy, Extra> Allocation<Tag, Extra> {
|
||||
/// Checks whether the given range is entirely initialized.
|
||||
///
|
||||
/// Returns `Ok(())` if it's initialized. Otherwise returns the range of byte
|
||||
/// indexes of the first contiguous uninitialized access.
|
||||
fn is_init(&self, range: AllocRange) -> Result<(), Range<Size>> {
|
||||
self.init_mask.is_range_initialized(range.start, range.end()) // `Size` addition
|
||||
}
|
||||
|
||||
/// Checks that a range of bytes is initialized. If not, returns the `InvalidUninitBytes`
|
||||
/// error which will report the first range of bytes which is uninitialized.
|
||||
fn check_init(&self, range: AllocRange) -> AllocResult {
|
||||
self.is_init(range).or_else(|idx_range| {
|
||||
Err(AllocError::InvalidUninitBytes(Some(UninitBytesAccess {
|
||||
access_offset: range.start,
|
||||
access_size: range.size,
|
||||
uninit_offset: idx_range.start,
|
||||
uninit_size: idx_range.end - idx_range.start, // `Size` subtraction
|
||||
})))
|
||||
})
|
||||
}
|
||||
|
||||
pub fn mark_init(&mut self, range: AllocRange, is_init: bool) {
|
||||
if range.size.bytes() == 0 {
|
||||
return;
|
||||
}
|
||||
assert!(self.mutability == Mutability::Mut);
|
||||
self.init_mask.set_range(range.start, range.end(), is_init);
|
||||
}
|
||||
}
|
||||
|
||||
/// Run-length encoding of the uninit mask.
|
||||
/// Used to copy parts of a mask multiple times to another allocation.
|
||||
pub struct InitMaskCompressed {
|
||||
/// Whether the first range is initialized.
|
||||
initial: bool,
|
||||
/// The lengths of ranges that are run-length encoded.
|
||||
/// The initialization state of the ranges alternate starting with `initial`.
|
||||
ranges: smallvec::SmallVec<[u64; 1]>,
|
||||
}
|
||||
|
||||
impl InitMaskCompressed {
|
||||
pub fn no_bytes_init(&self) -> bool {
|
||||
// The `ranges` are run-length encoded and of alternating initialization state.
|
||||
// So if `ranges.len() > 1` then the second block is an initialized range.
|
||||
!self.initial && self.ranges.len() == 1
|
||||
}
|
||||
}
|
||||
|
||||
/// Transferring the initialization mask to other allocations.
|
||||
impl<Tag, Extra> Allocation<Tag, Extra> {
|
||||
/// Creates a run-length encoding of the initialization mask; panics if range is empty.
|
||||
///
|
||||
/// This is essentially a more space-efficient version of
|
||||
/// `InitMask::range_as_init_chunks(...).collect::<Vec<_>>()`.
|
||||
pub fn compress_uninit_range(&self, range: AllocRange) -> InitMaskCompressed {
|
||||
// Since we are copying `size` bytes from `src` to `dest + i * size` (`for i in 0..repeat`),
|
||||
// a naive initialization mask copying algorithm would repeatedly have to read the initialization mask from
|
||||
// the source and write it to the destination. Even if we optimized the memory accesses,
|
||||
// we'd be doing all of this `repeat` times.
|
||||
// Therefore we precompute a compressed version of the initialization mask of the source value and
|
||||
// then write it back `repeat` times without computing any more information from the source.
|
||||
|
||||
// A precomputed cache for ranges of initialized / uninitialized bits
|
||||
// 0000010010001110 will become
|
||||
// `[5, 1, 2, 1, 3, 3, 1]`,
|
||||
// where each element toggles the state.
|
||||
|
||||
let mut ranges = smallvec::SmallVec::<[u64; 1]>::new();
|
||||
|
||||
let mut chunks = self.init_mask.range_as_init_chunks(range.start, range.end()).peekable();
|
||||
|
||||
let initial = chunks.peek().expect("range should be nonempty").is_init();
|
||||
|
||||
// Here we rely on `range_as_init_chunks` to yield alternating init/uninit chunks.
|
||||
for chunk in chunks {
|
||||
let len = chunk.range().end.bytes() - chunk.range().start.bytes();
|
||||
ranges.push(len);
|
||||
}
|
||||
|
||||
InitMaskCompressed { ranges, initial }
|
||||
}
|
||||
|
||||
/// Applies multiple instances of the run-length encoding to the initialization mask.
|
||||
pub fn mark_compressed_init_range(
|
||||
&mut self,
|
||||
defined: &InitMaskCompressed,
|
||||
range: AllocRange,
|
||||
repeat: u64,
|
||||
) {
|
||||
// An optimization where we can just overwrite an entire range of initialization
|
||||
// bits if they are going to be uniformly `1` or `0`.
|
||||
if defined.ranges.len() <= 1 {
|
||||
self.init_mask.set_range_inbounds(
|
||||
range.start,
|
||||
range.start + range.size * repeat, // `Size` operations
|
||||
defined.initial,
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
for mut j in 0..repeat {
|
||||
j *= range.size.bytes();
|
||||
j += range.start.bytes();
|
||||
let mut cur = defined.initial;
|
||||
for range in &defined.ranges {
|
||||
let old_j = j;
|
||||
j += range;
|
||||
self.init_mask.set_range_inbounds(
|
||||
Size::from_bytes(old_j),
|
||||
Size::from_bytes(j),
|
||||
cur,
|
||||
);
|
||||
cur = !cur;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -125,7 +125,9 @@ pub use self::error::{
|
|||
|
||||
pub use self::value::{get_slice_bytes, ConstAlloc, ConstValue, Scalar, ScalarMaybeUninit};
|
||||
|
||||
pub use self::allocation::{alloc_range, AllocRange, Allocation, InitMask, Relocations};
|
||||
pub use self::allocation::{
|
||||
alloc_range, AllocRange, Allocation, InitChunk, InitChunkIter, InitMask, Relocations,
|
||||
};
|
||||
|
||||
pub use self::pointer::{Pointer, PointerArithmetic, Provenance};
|
||||
|
||||
|
|
|
@ -1186,6 +1186,9 @@ options! {
|
|||
"support compiling tests with panic=abort (default: no)"),
|
||||
parse_only: bool = (false, parse_bool, [UNTRACKED],
|
||||
"parse only; do not compile, assemble, or link (default: no)"),
|
||||
partially_uninit_const_threshold: Option<usize> = (None, parse_opt_number, [TRACKED],
|
||||
"allow generating const initializers with mixed init/uninit bytes, \
|
||||
and set the maximum total size of a const allocation for which this is allowed (default: never)"),
|
||||
perf_stats: bool = (false, parse_bool, [UNTRACKED],
|
||||
"print some performance-related statistics (default: no)"),
|
||||
plt: Option<bool> = (None, parse_opt_bool, [TRACKED],
|
||||
|
|
|
@ -5,6 +5,7 @@ use crate::spec::Target;
|
|||
|
||||
use std::convert::{TryFrom, TryInto};
|
||||
use std::fmt;
|
||||
use std::iter::Step;
|
||||
use std::num::NonZeroUsize;
|
||||
use std::ops::{Add, AddAssign, Deref, Mul, Range, RangeInclusive, Sub};
|
||||
use std::str::FromStr;
|
||||
|
@ -440,6 +441,43 @@ impl AddAssign for Size {
|
|||
}
|
||||
}
|
||||
|
||||
impl Step for Size {
|
||||
#[inline]
|
||||
fn steps_between(start: &Self, end: &Self) -> Option<usize> {
|
||||
u64::steps_between(&start.bytes(), &end.bytes())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn forward_checked(start: Self, count: usize) -> Option<Self> {
|
||||
u64::forward_checked(start.bytes(), count).map(Self::from_bytes)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn forward(start: Self, count: usize) -> Self {
|
||||
Self::from_bytes(u64::forward(start.bytes(), count))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
unsafe fn forward_unchecked(start: Self, count: usize) -> Self {
|
||||
Self::from_bytes(u64::forward_unchecked(start.bytes(), count))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn backward_checked(start: Self, count: usize) -> Option<Self> {
|
||||
u64::backward_checked(start.bytes(), count).map(Self::from_bytes)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn backward(start: Self, count: usize) -> Self {
|
||||
Self::from_bytes(u64::backward(start.bytes(), count))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
unsafe fn backward_unchecked(start: Self, count: usize) -> Self {
|
||||
Self::from_bytes(u64::backward_unchecked(start.bytes(), count))
|
||||
}
|
||||
}
|
||||
|
||||
/// Alignment of a type in bytes (always a power of two).
|
||||
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Encodable, Decodable)]
|
||||
#[derive(HashStable_Generic)]
|
||||
|
|
|
@ -14,6 +14,8 @@
|
|||
#![feature(associated_type_bounds)]
|
||||
#![feature(exhaustive_patterns)]
|
||||
#![feature(min_specialization)]
|
||||
#![feature(step_trait)]
|
||||
#![feature(unchecked_math)]
|
||||
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
|
|
35
src/test/codegen/uninit-consts-allow-partially-uninit.rs
Normal file
35
src/test/codegen/uninit-consts-allow-partially-uninit.rs
Normal file
|
@ -0,0 +1,35 @@
|
|||
// compile-flags: -C no-prepopulate-passes -Z partially_uninit_const_threshold=1024
|
||||
|
||||
// Like uninit-consts.rs, but tests that we correctly generate partially-uninit consts
|
||||
// when the (disabled by default) partially_uninit_const_threshold flag is used.
|
||||
|
||||
#![crate_type = "lib"]
|
||||
|
||||
use std::mem::MaybeUninit;
|
||||
|
||||
pub struct PartiallyUninit {
|
||||
x: u32,
|
||||
y: MaybeUninit<[u8; 10]>
|
||||
}
|
||||
|
||||
// This should be partially undef.
|
||||
// CHECK: [[PARTIALLY_UNINIT:@[0-9]+]] = private unnamed_addr constant <{ [4 x i8], [12 x i8] }> <{ [4 x i8] c"\EF\BE\AD\DE", [12 x i8] undef }>, align 4
|
||||
|
||||
// This shouldn't contain undef, since it's larger than the 1024 byte limit.
|
||||
// CHECK: [[UNINIT_PADDING_HUGE:@[0-9]+]] = private unnamed_addr constant <{ [32768 x i8] }> <{ [32768 x i8] c"{{.+}}" }>, align 4
|
||||
|
||||
// CHECK-LABEL: @partially_uninit
|
||||
#[no_mangle]
|
||||
pub const fn partially_uninit() -> PartiallyUninit {
|
||||
const X: PartiallyUninit = PartiallyUninit { x: 0xdeadbeef, y: MaybeUninit::uninit() };
|
||||
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i{{(32|64)}}(i8* align 4 %1, i8* align 4 getelementptr inbounds (<{ [4 x i8], [12 x i8] }>, <{ [4 x i8], [12 x i8] }>* [[PARTIALLY_UNINIT]], i32 0, i32 0, i32 0), i{{(32|64)}} 16, i1 false)
|
||||
X
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @uninit_padding_huge
|
||||
#[no_mangle]
|
||||
pub const fn uninit_padding_huge() -> [(u32, u8); 4096] {
|
||||
const X: [(u32, u8); 4096] = [(123, 45); 4096];
|
||||
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i{{(32|64)}}(i8* align 4 %1, i8* align 4 getelementptr inbounds (<{ [32768 x i8] }>, <{ [32768 x i8] }>* [[UNINIT_PADDING_HUGE]], i32 0, i32 0, i32 0), i{{(32|64)}} 32768, i1 false)
|
||||
X
|
||||
}
|
40
src/test/codegen/uninit-consts.rs
Normal file
40
src/test/codegen/uninit-consts.rs
Normal file
|
@ -0,0 +1,40 @@
|
|||
// compile-flags: -C no-prepopulate-passes
|
||||
|
||||
// Check that we use undef (and not zero) for uninitialized bytes in constants.
|
||||
|
||||
#![crate_type = "lib"]
|
||||
|
||||
use std::mem::MaybeUninit;
|
||||
|
||||
pub struct PartiallyUninit {
|
||||
x: u32,
|
||||
y: MaybeUninit<[u8; 10]>
|
||||
}
|
||||
|
||||
// CHECK: [[FULLY_UNINIT:@[0-9]+]] = private unnamed_addr constant <{ [10 x i8] }> undef
|
||||
// CHECK: [[PARTIALLY_UNINIT:@[0-9]+]] = private unnamed_addr constant <{ [16 x i8] }> <{ [16 x i8] c"\EF\BE\AD\DE\00\00\00\00\00\00\00\00\00\00\00\00" }>, align 4
|
||||
// CHECK: [[FULLY_UNINIT_HUGE:@[0-9]+]] = private unnamed_addr constant <{ [16384 x i8] }> undef
|
||||
|
||||
// CHECK-LABEL: @fully_uninit
|
||||
#[no_mangle]
|
||||
pub const fn fully_uninit() -> MaybeUninit<[u8; 10]> {
|
||||
const M: MaybeUninit<[u8; 10]> = MaybeUninit::uninit();
|
||||
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i{{(32|64)}}(i8* align 1 %1, i8* align 1 getelementptr inbounds (<{ [10 x i8] }>, <{ [10 x i8] }>* [[FULLY_UNINIT]], i32 0, i32 0, i32 0), i{{(32|64)}} 10, i1 false)
|
||||
M
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @partially_uninit
|
||||
#[no_mangle]
|
||||
pub const fn partially_uninit() -> PartiallyUninit {
|
||||
const X: PartiallyUninit = PartiallyUninit { x: 0xdeadbeef, y: MaybeUninit::uninit() };
|
||||
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i{{(32|64)}}(i8* align 4 %1, i8* align 4 getelementptr inbounds (<{ [16 x i8] }>, <{ [16 x i8] }>* [[PARTIALLY_UNINIT]], i32 0, i32 0, i32 0), i{{(32|64)}} 16, i1 false)
|
||||
X
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @fully_uninit_huge
|
||||
#[no_mangle]
|
||||
pub const fn fully_uninit_huge() -> MaybeUninit<[u32; 4096]> {
|
||||
const F: MaybeUninit<[u32; 4096]> = MaybeUninit::uninit();
|
||||
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i{{(32|64)}}(i8* align 4 %1, i8* align 4 getelementptr inbounds (<{ [16384 x i8] }>, <{ [16384 x i8] }>* [[FULLY_UNINIT_HUGE]], i32 0, i32 0, i32 0), i{{(32|64)}} 16384, i1 false)
|
||||
F
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue