put code in a more logical order
This commit is contained in:
parent
5bef23d0fa
commit
e950f11019
1 changed files with 479 additions and 474 deletions
|
@ -495,6 +495,485 @@ impl<Tag: Copy, Extra> Allocation<Tag, Extra> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// "Relocations" stores the provenance information of pointers stored in memory.
|
||||||
|
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, TyEncodable, TyDecodable)]
|
||||||
|
pub struct Relocations<Tag = AllocId>(SortedMap<Size, Tag>);
|
||||||
|
|
||||||
|
impl<Tag> Relocations<Tag> {
|
||||||
|
pub fn new() -> Self {
|
||||||
|
Relocations(SortedMap::new())
|
||||||
|
}
|
||||||
|
|
||||||
|
// The caller must guarantee that the given relocations are already sorted
|
||||||
|
// by address and contain no duplicates.
|
||||||
|
pub fn from_presorted(r: Vec<(Size, Tag)>) -> Self {
|
||||||
|
Relocations(SortedMap::from_presorted_elements(r))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<Tag> Deref for Relocations<Tag> {
|
||||||
|
type Target = SortedMap<Size, Tag>;
|
||||||
|
|
||||||
|
fn deref(&self) -> &Self::Target {
|
||||||
|
&self.0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A partial, owned list of relocations to transfer into another allocation.
|
||||||
|
pub struct AllocationRelocations<Tag> {
|
||||||
|
relative_relocations: Vec<(Size, Tag)>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<Tag: Copy, Extra> Allocation<Tag, Extra> {
|
||||||
|
pub fn prepare_relocation_copy(
|
||||||
|
&self,
|
||||||
|
cx: &impl HasDataLayout,
|
||||||
|
src: AllocRange,
|
||||||
|
dest: Size,
|
||||||
|
count: u64,
|
||||||
|
) -> AllocationRelocations<Tag> {
|
||||||
|
let relocations = self.get_relocations(cx, src);
|
||||||
|
if relocations.is_empty() {
|
||||||
|
return AllocationRelocations { relative_relocations: Vec::new() };
|
||||||
|
}
|
||||||
|
|
||||||
|
let size = src.size;
|
||||||
|
let mut new_relocations = Vec::with_capacity(relocations.len() * (count as usize));
|
||||||
|
|
||||||
|
for i in 0..count {
|
||||||
|
new_relocations.extend(relocations.iter().map(|&(offset, reloc)| {
|
||||||
|
// compute offset for current repetition
|
||||||
|
let dest_offset = dest + size * i; // `Size` operations
|
||||||
|
(
|
||||||
|
// shift offsets from source allocation to destination allocation
|
||||||
|
(offset + dest_offset) - src.start, // `Size` operations
|
||||||
|
reloc,
|
||||||
|
)
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
|
AllocationRelocations { relative_relocations: new_relocations }
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Applies a relocation copy.
|
||||||
|
/// The affected range, as defined in the parameters to `prepare_relocation_copy` is expected
|
||||||
|
/// to be clear of relocations.
|
||||||
|
pub fn mark_relocation_range(&mut self, relocations: AllocationRelocations<Tag>) {
|
||||||
|
self.relocations.0.insert_presorted(relocations.relative_relocations);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
// Uninitialized byte tracking
|
||||||
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
type Block = u64;
|
||||||
|
|
||||||
|
/// A bitmask where each bit refers to the byte with the same index. If the bit is `true`, the byte
|
||||||
|
/// is initialized. If it is `false` the byte is uninitialized.
|
||||||
|
#[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash, TyEncodable, TyDecodable)]
|
||||||
|
#[derive(HashStable)]
|
||||||
|
pub struct InitMask {
|
||||||
|
blocks: Vec<Block>,
|
||||||
|
len: Size,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl InitMask {
|
||||||
|
pub const BLOCK_SIZE: u64 = 64;
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
fn bit_index(bits: Size) -> (usize, usize) {
|
||||||
|
let bits = bits.bytes();
|
||||||
|
let a = bits / InitMask::BLOCK_SIZE;
|
||||||
|
let b = bits % InitMask::BLOCK_SIZE;
|
||||||
|
(usize::try_from(a).unwrap(), usize::try_from(b).unwrap())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
fn size_from_bit_index(block: impl TryInto<u64>, bit: impl TryInto<u64>) -> Size {
|
||||||
|
let block = block.try_into().ok().unwrap();
|
||||||
|
let bit = bit.try_into().ok().unwrap();
|
||||||
|
Size::from_bytes(block * InitMask::BLOCK_SIZE + bit)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn new(size: Size, state: bool) -> Self {
|
||||||
|
let mut m = InitMask { blocks: vec![], len: Size::ZERO };
|
||||||
|
m.grow(size, state);
|
||||||
|
m
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn set_range(&mut self, start: Size, end: Size, new_state: bool) {
|
||||||
|
let len = self.len;
|
||||||
|
if end > len {
|
||||||
|
self.grow(end - len, new_state);
|
||||||
|
}
|
||||||
|
self.set_range_inbounds(start, end, new_state);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn set_range_inbounds(&mut self, start: Size, end: Size, new_state: bool) {
|
||||||
|
let (blocka, bita) = Self::bit_index(start);
|
||||||
|
let (blockb, bitb) = Self::bit_index(end);
|
||||||
|
if blocka == blockb {
|
||||||
|
// First set all bits except the first `bita`,
|
||||||
|
// then unset the last `64 - bitb` bits.
|
||||||
|
let range = if bitb == 0 {
|
||||||
|
u64::MAX << bita
|
||||||
|
} else {
|
||||||
|
(u64::MAX << bita) & (u64::MAX >> (64 - bitb))
|
||||||
|
};
|
||||||
|
if new_state {
|
||||||
|
self.blocks[blocka] |= range;
|
||||||
|
} else {
|
||||||
|
self.blocks[blocka] &= !range;
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
// across block boundaries
|
||||||
|
if new_state {
|
||||||
|
// Set `bita..64` to `1`.
|
||||||
|
self.blocks[blocka] |= u64::MAX << bita;
|
||||||
|
// Set `0..bitb` to `1`.
|
||||||
|
if bitb != 0 {
|
||||||
|
self.blocks[blockb] |= u64::MAX >> (64 - bitb);
|
||||||
|
}
|
||||||
|
// Fill in all the other blocks (much faster than one bit at a time).
|
||||||
|
for block in (blocka + 1)..blockb {
|
||||||
|
self.blocks[block] = u64::MAX;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Set `bita..64` to `0`.
|
||||||
|
self.blocks[blocka] &= !(u64::MAX << bita);
|
||||||
|
// Set `0..bitb` to `0`.
|
||||||
|
if bitb != 0 {
|
||||||
|
self.blocks[blockb] &= !(u64::MAX >> (64 - bitb));
|
||||||
|
}
|
||||||
|
// Fill in all the other blocks (much faster than one bit at a time).
|
||||||
|
for block in (blocka + 1)..blockb {
|
||||||
|
self.blocks[block] = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
pub fn get(&self, i: Size) -> bool {
|
||||||
|
let (block, bit) = Self::bit_index(i);
|
||||||
|
(self.blocks[block] & (1 << bit)) != 0
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
pub fn set(&mut self, i: Size, new_state: bool) {
|
||||||
|
let (block, bit) = Self::bit_index(i);
|
||||||
|
self.set_bit(block, bit, new_state);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
fn set_bit(&mut self, block: usize, bit: usize, new_state: bool) {
|
||||||
|
if new_state {
|
||||||
|
self.blocks[block] |= 1 << bit;
|
||||||
|
} else {
|
||||||
|
self.blocks[block] &= !(1 << bit);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn grow(&mut self, amount: Size, new_state: bool) {
|
||||||
|
if amount.bytes() == 0 {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
let unused_trailing_bits =
|
||||||
|
u64::try_from(self.blocks.len()).unwrap() * Self::BLOCK_SIZE - self.len.bytes();
|
||||||
|
if amount.bytes() > unused_trailing_bits {
|
||||||
|
let additional_blocks = amount.bytes() / Self::BLOCK_SIZE + 1;
|
||||||
|
self.blocks.extend(
|
||||||
|
// FIXME(oli-obk): optimize this by repeating `new_state as Block`.
|
||||||
|
iter::repeat(0).take(usize::try_from(additional_blocks).unwrap()),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
let start = self.len;
|
||||||
|
self.len += amount;
|
||||||
|
self.set_range_inbounds(start, start + amount, new_state); // `Size` operation
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the index of the first bit in `start..end` (end-exclusive) that is equal to is_init.
|
||||||
|
fn find_bit(&self, start: Size, end: Size, is_init: bool) -> Option<Size> {
|
||||||
|
/// A fast implementation of `find_bit`,
|
||||||
|
/// which skips over an entire block at a time if it's all 0s (resp. 1s),
|
||||||
|
/// and finds the first 1 (resp. 0) bit inside a block using `trailing_zeros` instead of a loop.
|
||||||
|
///
|
||||||
|
/// Note that all examples below are written with 8 (instead of 64) bit blocks for simplicity,
|
||||||
|
/// and with the least significant bit (and lowest block) first:
|
||||||
|
///
|
||||||
|
/// 00000000|00000000
|
||||||
|
/// ^ ^ ^ ^
|
||||||
|
/// index: 0 7 8 15
|
||||||
|
///
|
||||||
|
/// Also, if not stated, assume that `is_init = true`, that is, we are searching for the first 1 bit.
|
||||||
|
fn find_bit_fast(
|
||||||
|
init_mask: &InitMask,
|
||||||
|
start: Size,
|
||||||
|
end: Size,
|
||||||
|
is_init: bool,
|
||||||
|
) -> Option<Size> {
|
||||||
|
/// Search one block, returning the index of the first bit equal to `is_init`.
|
||||||
|
fn search_block(
|
||||||
|
bits: Block,
|
||||||
|
block: usize,
|
||||||
|
start_bit: usize,
|
||||||
|
is_init: bool,
|
||||||
|
) -> Option<Size> {
|
||||||
|
// For the following examples, assume this function was called with:
|
||||||
|
// bits = 11011100
|
||||||
|
// start_bit = 3
|
||||||
|
// is_init = false
|
||||||
|
// Note again that the least significant bit is written first,
|
||||||
|
// which is backwards compared to how we normally write numbers.
|
||||||
|
|
||||||
|
// Invert bits so we're always looking for the first set bit.
|
||||||
|
// ! 11011100
|
||||||
|
// bits = 00100011
|
||||||
|
let bits = if is_init { bits } else { !bits };
|
||||||
|
// Mask off unused start bits.
|
||||||
|
// 00100011
|
||||||
|
// & 00011111
|
||||||
|
// bits = 00000011
|
||||||
|
let bits = bits & (!0 << start_bit);
|
||||||
|
// Find set bit, if any.
|
||||||
|
// bit = trailing_zeros(00000011)
|
||||||
|
// bit = 6
|
||||||
|
if bits == 0 {
|
||||||
|
None
|
||||||
|
} else {
|
||||||
|
let bit = bits.trailing_zeros();
|
||||||
|
Some(InitMask::size_from_bit_index(block, bit))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if start >= end {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert `start` and `end` to block indexes and bit indexes within each block.
|
||||||
|
// We must convert `end` to an inclusive bound to handle block boundaries correctly.
|
||||||
|
//
|
||||||
|
// For example:
|
||||||
|
//
|
||||||
|
// (a) 00000000|00000000 (b) 00000000|
|
||||||
|
// ^~~~~~~~~~~^ ^~~~~~~~~^
|
||||||
|
// start end start end
|
||||||
|
//
|
||||||
|
// In both cases, the block index of `end` is 1.
|
||||||
|
// But we do want to search block 1 in (a), and we don't in (b).
|
||||||
|
//
|
||||||
|
// If we subtract 1 from both end positions to make them inclusive:
|
||||||
|
//
|
||||||
|
// (a) 00000000|00000000 (b) 00000000|
|
||||||
|
// ^~~~~~~~~~^ ^~~~~~~^
|
||||||
|
// start end_inclusive start end_inclusive
|
||||||
|
//
|
||||||
|
// For (a), the block index of `end_inclusive` is 1, and for (b), it's 0.
|
||||||
|
// This provides the desired behavior of searching blocks 0 and 1 for (a),
|
||||||
|
// and searching only block 0 for (b).
|
||||||
|
let (start_block, start_bit) = InitMask::bit_index(start);
|
||||||
|
let end_inclusive = Size::from_bytes(end.bytes() - 1);
|
||||||
|
let (end_block_inclusive, _) = InitMask::bit_index(end_inclusive);
|
||||||
|
|
||||||
|
// Handle first block: need to skip `start_bit` bits.
|
||||||
|
//
|
||||||
|
// We need to handle the first block separately,
|
||||||
|
// because there may be bits earlier in the block that should be ignored,
|
||||||
|
// such as the bit marked (1) in this example:
|
||||||
|
//
|
||||||
|
// (1)
|
||||||
|
// -|------
|
||||||
|
// (c) 01000000|00000000|00000001
|
||||||
|
// ^~~~~~~~~~~~~~~~~~^
|
||||||
|
// start end
|
||||||
|
if let Some(i) =
|
||||||
|
search_block(init_mask.blocks[start_block], start_block, start_bit, is_init)
|
||||||
|
{
|
||||||
|
// If the range is less than a block, we may find a matching bit after `end`.
|
||||||
|
//
|
||||||
|
// For example, we shouldn't successfully find bit (2), because it's after `end`:
|
||||||
|
//
|
||||||
|
// (2)
|
||||||
|
// -------|
|
||||||
|
// (d) 00000001|00000000|00000001
|
||||||
|
// ^~~~~^
|
||||||
|
// start end
|
||||||
|
//
|
||||||
|
// An alternative would be to mask off end bits in the same way as we do for start bits,
|
||||||
|
// but performing this check afterwards is faster and simpler to implement.
|
||||||
|
if i < end {
|
||||||
|
return Some(i);
|
||||||
|
} else {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle remaining blocks.
|
||||||
|
//
|
||||||
|
// We can skip over an entire block at once if it's all 0s (resp. 1s).
|
||||||
|
// The block marked (3) in this example is the first block that will be handled by this loop,
|
||||||
|
// and it will be skipped for that reason:
|
||||||
|
//
|
||||||
|
// (3)
|
||||||
|
// --------
|
||||||
|
// (e) 01000000|00000000|00000001
|
||||||
|
// ^~~~~~~~~~~~~~~~~~^
|
||||||
|
// start end
|
||||||
|
if start_block < end_block_inclusive {
|
||||||
|
// This loop is written in a specific way for performance.
|
||||||
|
// Notably: `..end_block_inclusive + 1` is used for an inclusive range instead of `..=end_block_inclusive`,
|
||||||
|
// and `.zip(start_block + 1..)` is used to track the index instead of `.enumerate().skip().take()`,
|
||||||
|
// because both alternatives result in significantly worse codegen.
|
||||||
|
// `end_block_inclusive + 1` is guaranteed not to wrap, because `end_block_inclusive <= end / BLOCK_SIZE`,
|
||||||
|
// and `BLOCK_SIZE` (the number of bits per block) will always be at least 8 (1 byte).
|
||||||
|
for (&bits, block) in init_mask.blocks[start_block + 1..end_block_inclusive + 1]
|
||||||
|
.iter()
|
||||||
|
.zip(start_block + 1..)
|
||||||
|
{
|
||||||
|
if let Some(i) = search_block(bits, block, 0, is_init) {
|
||||||
|
// If this is the last block, we may find a matching bit after `end`.
|
||||||
|
//
|
||||||
|
// For example, we shouldn't successfully find bit (4), because it's after `end`:
|
||||||
|
//
|
||||||
|
// (4)
|
||||||
|
// -------|
|
||||||
|
// (f) 00000001|00000000|00000001
|
||||||
|
// ^~~~~~~~~~~~~~~~~~^
|
||||||
|
// start end
|
||||||
|
//
|
||||||
|
// As above with example (d), we could handle the end block separately and mask off end bits,
|
||||||
|
// but unconditionally searching an entire block at once and performing this check afterwards
|
||||||
|
// is faster and much simpler to implement.
|
||||||
|
if i < end {
|
||||||
|
return Some(i);
|
||||||
|
} else {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg_attr(not(debug_assertions), allow(dead_code))]
|
||||||
|
fn find_bit_slow(
|
||||||
|
init_mask: &InitMask,
|
||||||
|
start: Size,
|
||||||
|
end: Size,
|
||||||
|
is_init: bool,
|
||||||
|
) -> Option<Size> {
|
||||||
|
(start..end).find(|&i| init_mask.get(i) == is_init)
|
||||||
|
}
|
||||||
|
|
||||||
|
let result = find_bit_fast(self, start, end, is_init);
|
||||||
|
|
||||||
|
debug_assert_eq!(
|
||||||
|
result,
|
||||||
|
find_bit_slow(self, start, end, is_init),
|
||||||
|
"optimized implementation of find_bit is wrong for start={:?} end={:?} is_init={} init_mask={:#?}",
|
||||||
|
start,
|
||||||
|
end,
|
||||||
|
is_init,
|
||||||
|
self
|
||||||
|
);
|
||||||
|
|
||||||
|
result
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A contiguous chunk of initialized or uninitialized memory.
|
||||||
|
pub enum InitChunk {
|
||||||
|
Init(Range<Size>),
|
||||||
|
Uninit(Range<Size>),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl InitChunk {
|
||||||
|
#[inline]
|
||||||
|
pub fn range(&self) -> Range<Size> {
|
||||||
|
match self {
|
||||||
|
Self::Init(r) => r.clone(),
|
||||||
|
Self::Uninit(r) => r.clone(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl InitMask {
|
||||||
|
/// Checks whether the range `start..end` (end-exclusive) is entirely initialized.
|
||||||
|
///
|
||||||
|
/// Returns `Ok(())` if it's initialized. Otherwise returns a range of byte
|
||||||
|
/// indexes for the first contiguous span of the uninitialized access.
|
||||||
|
#[inline]
|
||||||
|
pub fn is_range_initialized(&self, start: Size, end: Size) -> Result<(), Range<Size>> {
|
||||||
|
if end > self.len {
|
||||||
|
return Err(self.len..end);
|
||||||
|
}
|
||||||
|
|
||||||
|
let uninit_start = self.find_bit(start, end, false);
|
||||||
|
|
||||||
|
match uninit_start {
|
||||||
|
Some(uninit_start) => {
|
||||||
|
let uninit_end = self.find_bit(uninit_start, end, true).unwrap_or(end);
|
||||||
|
Err(uninit_start..uninit_end)
|
||||||
|
}
|
||||||
|
None => Ok(()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns an iterator, yielding a range of byte indexes for each contiguous region
|
||||||
|
/// of initialized or uninitialized bytes inside the range `start..end` (end-exclusive).
|
||||||
|
///
|
||||||
|
/// The iterator guarantees the following:
|
||||||
|
/// - Chunks are nonempty.
|
||||||
|
/// - Chunks are adjacent (each range's start is equal to the previous range's end).
|
||||||
|
/// - Chunks span exactly `start..end` (the first starts at `start`, the last ends at `end`).
|
||||||
|
/// - Chunks alternate between [`InitChunk::Init`] and [`InitChunk::Uninit`].
|
||||||
|
#[inline]
|
||||||
|
pub fn range_as_init_chunks(&self, start: Size, end: Size) -> InitChunkIter<'_> {
|
||||||
|
assert!(end <= self.len);
|
||||||
|
|
||||||
|
let is_init = if start < end { self.get(start) } else { false };
|
||||||
|
|
||||||
|
InitChunkIter { init_mask: self, is_init, start, end }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Yields [`InitChunk`]s. See [`InitMask::range_as_init_chunks`].
|
||||||
|
pub struct InitChunkIter<'a> {
|
||||||
|
init_mask: &'a InitMask,
|
||||||
|
/// Whether the next chunk we will return is initialized.
|
||||||
|
/// If there are no more chunks, contains some arbitrary value.
|
||||||
|
is_init: bool,
|
||||||
|
/// The current byte index into `init_mask`.
|
||||||
|
start: Size,
|
||||||
|
/// The end byte index into `init_mask`.
|
||||||
|
end: Size,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> Iterator for InitChunkIter<'a> {
|
||||||
|
type Item = InitChunk;
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
fn next(&mut self) -> Option<Self::Item> {
|
||||||
|
if self.start >= self.end {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
let end_of_chunk =
|
||||||
|
self.init_mask.find_bit(self.start, self.end, !self.is_init).unwrap_or(self.end);
|
||||||
|
let range = self.start..end_of_chunk;
|
||||||
|
|
||||||
|
let ret =
|
||||||
|
Some(if self.is_init { InitChunk::Init(range) } else { InitChunk::Uninit(range) });
|
||||||
|
|
||||||
|
self.is_init = !self.is_init;
|
||||||
|
self.start = end_of_chunk;
|
||||||
|
|
||||||
|
ret
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Uninitialized bytes.
|
/// Uninitialized bytes.
|
||||||
impl<Tag: Copy, Extra> Allocation<Tag, Extra> {
|
impl<Tag: Copy, Extra> Allocation<Tag, Extra> {
|
||||||
/// Checks whether the given range is entirely initialized.
|
/// Checks whether the given range is entirely initialized.
|
||||||
|
@ -610,477 +1089,3 @@ impl<Tag, Extra> Allocation<Tag, Extra> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// "Relocations" stores the provenance information of pointers stored in memory.
|
|
||||||
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, TyEncodable, TyDecodable)]
|
|
||||||
pub struct Relocations<Tag = AllocId>(SortedMap<Size, Tag>);
|
|
||||||
|
|
||||||
impl<Tag> Relocations<Tag> {
|
|
||||||
pub fn new() -> Self {
|
|
||||||
Relocations(SortedMap::new())
|
|
||||||
}
|
|
||||||
|
|
||||||
// The caller must guarantee that the given relocations are already sorted
|
|
||||||
// by address and contain no duplicates.
|
|
||||||
pub fn from_presorted(r: Vec<(Size, Tag)>) -> Self {
|
|
||||||
Relocations(SortedMap::from_presorted_elements(r))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<Tag> Deref for Relocations<Tag> {
|
|
||||||
type Target = SortedMap<Size, Tag>;
|
|
||||||
|
|
||||||
fn deref(&self) -> &Self::Target {
|
|
||||||
&self.0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A partial, owned list of relocations to transfer into another allocation.
|
|
||||||
pub struct AllocationRelocations<Tag> {
|
|
||||||
relative_relocations: Vec<(Size, Tag)>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<Tag: Copy, Extra> Allocation<Tag, Extra> {
|
|
||||||
pub fn prepare_relocation_copy(
|
|
||||||
&self,
|
|
||||||
cx: &impl HasDataLayout,
|
|
||||||
src: AllocRange,
|
|
||||||
dest: Size,
|
|
||||||
count: u64,
|
|
||||||
) -> AllocationRelocations<Tag> {
|
|
||||||
let relocations = self.get_relocations(cx, src);
|
|
||||||
if relocations.is_empty() {
|
|
||||||
return AllocationRelocations { relative_relocations: Vec::new() };
|
|
||||||
}
|
|
||||||
|
|
||||||
let size = src.size;
|
|
||||||
let mut new_relocations = Vec::with_capacity(relocations.len() * (count as usize));
|
|
||||||
|
|
||||||
for i in 0..count {
|
|
||||||
new_relocations.extend(relocations.iter().map(|&(offset, reloc)| {
|
|
||||||
// compute offset for current repetition
|
|
||||||
let dest_offset = dest + size * i; // `Size` operations
|
|
||||||
(
|
|
||||||
// shift offsets from source allocation to destination allocation
|
|
||||||
(offset + dest_offset) - src.start, // `Size` operations
|
|
||||||
reloc,
|
|
||||||
)
|
|
||||||
}));
|
|
||||||
}
|
|
||||||
|
|
||||||
AllocationRelocations { relative_relocations: new_relocations }
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Applies a relocation copy.
|
|
||||||
/// The affected range, as defined in the parameters to `prepare_relocation_copy` is expected
|
|
||||||
/// to be clear of relocations.
|
|
||||||
pub fn mark_relocation_range(&mut self, relocations: AllocationRelocations<Tag>) {
|
|
||||||
self.relocations.0.insert_presorted(relocations.relative_relocations);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
|
||||||
// Uninitialized byte tracking
|
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
|
||||||
|
|
||||||
type Block = u64;
|
|
||||||
|
|
||||||
/// A bitmask where each bit refers to the byte with the same index. If the bit is `true`, the byte
|
|
||||||
/// is initialized. If it is `false` the byte is uninitialized.
|
|
||||||
#[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash, TyEncodable, TyDecodable)]
|
|
||||||
#[derive(HashStable)]
|
|
||||||
pub struct InitMask {
|
|
||||||
blocks: Vec<Block>,
|
|
||||||
len: Size,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl InitMask {
|
|
||||||
pub const BLOCK_SIZE: u64 = 64;
|
|
||||||
|
|
||||||
pub fn new(size: Size, state: bool) -> Self {
|
|
||||||
let mut m = InitMask { blocks: vec![], len: Size::ZERO };
|
|
||||||
m.grow(size, state);
|
|
||||||
m
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Checks whether the range `start..end` (end-exclusive) is entirely initialized.
|
|
||||||
///
|
|
||||||
/// Returns `Ok(())` if it's initialized. Otherwise returns a range of byte
|
|
||||||
/// indexes for the first contiguous span of the uninitialized access.
|
|
||||||
#[inline]
|
|
||||||
pub fn is_range_initialized(&self, start: Size, end: Size) -> Result<(), Range<Size>> {
|
|
||||||
if end > self.len {
|
|
||||||
return Err(self.len..end);
|
|
||||||
}
|
|
||||||
|
|
||||||
let uninit_start = find_bit(self, start, end, false);
|
|
||||||
|
|
||||||
match uninit_start {
|
|
||||||
Some(uninit_start) => {
|
|
||||||
let uninit_end = find_bit(self, uninit_start, end, true).unwrap_or(end);
|
|
||||||
Err(uninit_start..uninit_end)
|
|
||||||
}
|
|
||||||
None => Ok(()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns an iterator, yielding a range of byte indexes for each contiguous region
|
|
||||||
/// of initialized or uninitialized bytes inside the range `start..end` (end-exclusive).
|
|
||||||
///
|
|
||||||
/// The iterator guarantees the following:
|
|
||||||
/// - Chunks are nonempty.
|
|
||||||
/// - Chunks are adjacent (each range's start is equal to the previous range's end).
|
|
||||||
/// - Chunks span exactly `start..end` (the first starts at `start`, the last ends at `end`).
|
|
||||||
/// - Chunks alternate between [`InitChunk::Init`] and [`InitChunk::Uninit`].
|
|
||||||
#[inline]
|
|
||||||
pub fn range_as_init_chunks(&self, start: Size, end: Size) -> InitChunkIter<'_> {
|
|
||||||
InitChunkIter::new(self, start, end)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn set_range(&mut self, start: Size, end: Size, new_state: bool) {
|
|
||||||
let len = self.len;
|
|
||||||
if end > len {
|
|
||||||
self.grow(end - len, new_state);
|
|
||||||
}
|
|
||||||
self.set_range_inbounds(start, end, new_state);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn set_range_inbounds(&mut self, start: Size, end: Size, new_state: bool) {
|
|
||||||
let (blocka, bita) = bit_index(start);
|
|
||||||
let (blockb, bitb) = bit_index(end);
|
|
||||||
if blocka == blockb {
|
|
||||||
// First set all bits except the first `bita`,
|
|
||||||
// then unset the last `64 - bitb` bits.
|
|
||||||
let range = if bitb == 0 {
|
|
||||||
u64::MAX << bita
|
|
||||||
} else {
|
|
||||||
(u64::MAX << bita) & (u64::MAX >> (64 - bitb))
|
|
||||||
};
|
|
||||||
if new_state {
|
|
||||||
self.blocks[blocka] |= range;
|
|
||||||
} else {
|
|
||||||
self.blocks[blocka] &= !range;
|
|
||||||
}
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
// across block boundaries
|
|
||||||
if new_state {
|
|
||||||
// Set `bita..64` to `1`.
|
|
||||||
self.blocks[blocka] |= u64::MAX << bita;
|
|
||||||
// Set `0..bitb` to `1`.
|
|
||||||
if bitb != 0 {
|
|
||||||
self.blocks[blockb] |= u64::MAX >> (64 - bitb);
|
|
||||||
}
|
|
||||||
// Fill in all the other blocks (much faster than one bit at a time).
|
|
||||||
for block in (blocka + 1)..blockb {
|
|
||||||
self.blocks[block] = u64::MAX;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Set `bita..64` to `0`.
|
|
||||||
self.blocks[blocka] &= !(u64::MAX << bita);
|
|
||||||
// Set `0..bitb` to `0`.
|
|
||||||
if bitb != 0 {
|
|
||||||
self.blocks[blockb] &= !(u64::MAX >> (64 - bitb));
|
|
||||||
}
|
|
||||||
// Fill in all the other blocks (much faster than one bit at a time).
|
|
||||||
for block in (blocka + 1)..blockb {
|
|
||||||
self.blocks[block] = 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
pub fn get(&self, i: Size) -> bool {
|
|
||||||
let (block, bit) = bit_index(i);
|
|
||||||
(self.blocks[block] & (1 << bit)) != 0
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
pub fn set(&mut self, i: Size, new_state: bool) {
|
|
||||||
let (block, bit) = bit_index(i);
|
|
||||||
self.set_bit(block, bit, new_state);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
fn set_bit(&mut self, block: usize, bit: usize, new_state: bool) {
|
|
||||||
if new_state {
|
|
||||||
self.blocks[block] |= 1 << bit;
|
|
||||||
} else {
|
|
||||||
self.blocks[block] &= !(1 << bit);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn grow(&mut self, amount: Size, new_state: bool) {
|
|
||||||
if amount.bytes() == 0 {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
let unused_trailing_bits =
|
|
||||||
u64::try_from(self.blocks.len()).unwrap() * Self::BLOCK_SIZE - self.len.bytes();
|
|
||||||
if amount.bytes() > unused_trailing_bits {
|
|
||||||
let additional_blocks = amount.bytes() / Self::BLOCK_SIZE + 1;
|
|
||||||
self.blocks.extend(
|
|
||||||
// FIXME(oli-obk): optimize this by repeating `new_state as Block`.
|
|
||||||
iter::repeat(0).take(usize::try_from(additional_blocks).unwrap()),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
let start = self.len;
|
|
||||||
self.len += amount;
|
|
||||||
self.set_range_inbounds(start, start + amount, new_state); // `Size` operation
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A contiguous chunk of initialized or uninitialized memory.
|
|
||||||
pub enum InitChunk {
|
|
||||||
Init(Range<Size>),
|
|
||||||
Uninit(Range<Size>),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl InitChunk {
|
|
||||||
#[inline]
|
|
||||||
pub fn range(&self) -> Range<Size> {
|
|
||||||
match self {
|
|
||||||
Self::Init(r) => r.clone(),
|
|
||||||
Self::Uninit(r) => r.clone(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Yields [`InitChunk`]s. See [`InitMask::range_as_init_chunks`].
|
|
||||||
pub struct InitChunkIter<'a> {
|
|
||||||
init_mask: &'a InitMask,
|
|
||||||
/// Whether the next chunk we will return is initialized.
|
|
||||||
is_init: bool,
|
|
||||||
/// The current byte index into `init_mask`.
|
|
||||||
start: Size,
|
|
||||||
/// The end byte index into `init_mask`.
|
|
||||||
end: Size,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a> InitChunkIter<'a> {
|
|
||||||
#[inline]
|
|
||||||
fn new(init_mask: &'a InitMask, start: Size, end: Size) -> Self {
|
|
||||||
assert!(start <= end);
|
|
||||||
assert!(end <= init_mask.len);
|
|
||||||
|
|
||||||
let is_init = if start < end { init_mask.get(start) } else { false };
|
|
||||||
|
|
||||||
Self { init_mask, is_init, start, end }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a> Iterator for InitChunkIter<'a> {
|
|
||||||
type Item = InitChunk;
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
fn next(&mut self) -> Option<Self::Item> {
|
|
||||||
if self.start >= self.end {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
|
|
||||||
let end_of_chunk =
|
|
||||||
find_bit(&self.init_mask, self.start, self.end, !self.is_init).unwrap_or(self.end);
|
|
||||||
let range = self.start..end_of_chunk;
|
|
||||||
|
|
||||||
let ret =
|
|
||||||
Some(if self.is_init { InitChunk::Init(range) } else { InitChunk::Uninit(range) });
|
|
||||||
|
|
||||||
self.is_init = !self.is_init;
|
|
||||||
self.start = end_of_chunk;
|
|
||||||
|
|
||||||
ret
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the index of the first bit in `start..end` (end-exclusive) that is equal to is_init.
|
|
||||||
fn find_bit(init_mask: &InitMask, start: Size, end: Size, is_init: bool) -> Option<Size> {
|
|
||||||
/// A fast implementation of `find_bit`,
|
|
||||||
/// which skips over an entire block at a time if it's all 0s (resp. 1s),
|
|
||||||
/// and finds the first 1 (resp. 0) bit inside a block using `trailing_zeros` instead of a loop.
|
|
||||||
///
|
|
||||||
/// Note that all examples below are written with 8 (instead of 64) bit blocks for simplicity,
|
|
||||||
/// and with the least significant bit (and lowest block) first:
|
|
||||||
///
|
|
||||||
/// 00000000|00000000
|
|
||||||
/// ^ ^ ^ ^
|
|
||||||
/// index: 0 7 8 15
|
|
||||||
///
|
|
||||||
/// Also, if not stated, assume that `is_init = true`, that is, we are searching for the first 1 bit.
|
|
||||||
fn find_bit_fast(init_mask: &InitMask, start: Size, end: Size, is_init: bool) -> Option<Size> {
|
|
||||||
/// Search one block, returning the index of the first bit equal to `is_init`.
|
|
||||||
fn search_block(
|
|
||||||
bits: Block,
|
|
||||||
block: usize,
|
|
||||||
start_bit: usize,
|
|
||||||
is_init: bool,
|
|
||||||
) -> Option<Size> {
|
|
||||||
// For the following examples, assume this function was called with:
|
|
||||||
// bits = 11011100
|
|
||||||
// start_bit = 3
|
|
||||||
// is_init = false
|
|
||||||
// Note again that the least significant bit is written first,
|
|
||||||
// which is backwards compared to how we normally write numbers.
|
|
||||||
|
|
||||||
// Invert bits so we're always looking for the first set bit.
|
|
||||||
// ! 11011100
|
|
||||||
// bits = 00100011
|
|
||||||
let bits = if is_init { bits } else { !bits };
|
|
||||||
// Mask off unused start bits.
|
|
||||||
// 00100011
|
|
||||||
// & 00011111
|
|
||||||
// bits = 00000011
|
|
||||||
let bits = bits & (!0 << start_bit);
|
|
||||||
// Find set bit, if any.
|
|
||||||
// bit = trailing_zeros(00000011)
|
|
||||||
// bit = 6
|
|
||||||
if bits == 0 {
|
|
||||||
None
|
|
||||||
} else {
|
|
||||||
let bit = bits.trailing_zeros();
|
|
||||||
Some(size_from_bit_index(block, bit))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if start >= end {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Convert `start` and `end` to block indexes and bit indexes within each block.
|
|
||||||
// We must convert `end` to an inclusive bound to handle block boundaries correctly.
|
|
||||||
//
|
|
||||||
// For example:
|
|
||||||
//
|
|
||||||
// (a) 00000000|00000000 (b) 00000000|
|
|
||||||
// ^~~~~~~~~~~^ ^~~~~~~~~^
|
|
||||||
// start end start end
|
|
||||||
//
|
|
||||||
// In both cases, the block index of `end` is 1.
|
|
||||||
// But we do want to search block 1 in (a), and we don't in (b).
|
|
||||||
//
|
|
||||||
// If we subtract 1 from both end positions to make them inclusive:
|
|
||||||
//
|
|
||||||
// (a) 00000000|00000000 (b) 00000000|
|
|
||||||
// ^~~~~~~~~~^ ^~~~~~~^
|
|
||||||
// start end_inclusive start end_inclusive
|
|
||||||
//
|
|
||||||
// For (a), the block index of `end_inclusive` is 1, and for (b), it's 0.
|
|
||||||
// This provides the desired behavior of searching blocks 0 and 1 for (a),
|
|
||||||
// and searching only block 0 for (b).
|
|
||||||
let (start_block, start_bit) = bit_index(start);
|
|
||||||
let end_inclusive = Size::from_bytes(end.bytes() - 1);
|
|
||||||
let (end_block_inclusive, _) = bit_index(end_inclusive);
|
|
||||||
|
|
||||||
// Handle first block: need to skip `start_bit` bits.
|
|
||||||
//
|
|
||||||
// We need to handle the first block separately,
|
|
||||||
// because there may be bits earlier in the block that should be ignored,
|
|
||||||
// such as the bit marked (1) in this example:
|
|
||||||
//
|
|
||||||
// (1)
|
|
||||||
// -|------
|
|
||||||
// (c) 01000000|00000000|00000001
|
|
||||||
// ^~~~~~~~~~~~~~~~~~^
|
|
||||||
// start end
|
|
||||||
if let Some(i) =
|
|
||||||
search_block(init_mask.blocks[start_block], start_block, start_bit, is_init)
|
|
||||||
{
|
|
||||||
if i < end {
|
|
||||||
return Some(i);
|
|
||||||
} else {
|
|
||||||
// If the range is less than a block, we may find a matching bit after `end`.
|
|
||||||
//
|
|
||||||
// For example, we shouldn't successfully find bit (2), because it's after `end`:
|
|
||||||
//
|
|
||||||
// (2)
|
|
||||||
// -------|
|
|
||||||
// (d) 00000001|00000000|00000001
|
|
||||||
// ^~~~~^
|
|
||||||
// start end
|
|
||||||
//
|
|
||||||
// An alternative would be to mask off end bits in the same way as we do for start bits,
|
|
||||||
// but performing this check afterwards is faster and simpler to implement.
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handle remaining blocks.
|
|
||||||
//
|
|
||||||
// We can skip over an entire block at once if it's all 0s (resp. 1s).
|
|
||||||
// The block marked (3) in this example is the first block that will be handled by this loop,
|
|
||||||
// and it will be skipped for that reason:
|
|
||||||
//
|
|
||||||
// (3)
|
|
||||||
// --------
|
|
||||||
// (e) 01000000|00000000|00000001
|
|
||||||
// ^~~~~~~~~~~~~~~~~~^
|
|
||||||
// start end
|
|
||||||
if start_block < end_block_inclusive {
|
|
||||||
// This loop is written in a specific way for performance.
|
|
||||||
// Notably: `..end_block_inclusive + 1` is used for an inclusive range instead of `..=end_block_inclusive`,
|
|
||||||
// and `.zip(start_block + 1..)` is used to track the index instead of `.enumerate().skip().take()`,
|
|
||||||
// because both alternatives result in significantly worse codegen.
|
|
||||||
// `end_block_inclusive + 1` is guaranteed not to wrap, because `end_block_inclusive <= end / BLOCK_SIZE`,
|
|
||||||
// and `BLOCK_SIZE` (the number of bits per block) will always be at least 8 (1 byte).
|
|
||||||
for (&bits, block) in init_mask.blocks[start_block + 1..end_block_inclusive + 1]
|
|
||||||
.iter()
|
|
||||||
.zip(start_block + 1..)
|
|
||||||
{
|
|
||||||
if let Some(i) = search_block(bits, block, 0, is_init) {
|
|
||||||
if i < end {
|
|
||||||
return Some(i);
|
|
||||||
} else {
|
|
||||||
// If this is the last block, we may find a matching bit after `end`.
|
|
||||||
//
|
|
||||||
// For example, we shouldn't successfully find bit (4), because it's after `end`:
|
|
||||||
//
|
|
||||||
// (4)
|
|
||||||
// -------|
|
|
||||||
// (f) 00000001|00000000|00000001
|
|
||||||
// ^~~~~~~~~~~~~~~~~~^
|
|
||||||
// start end
|
|
||||||
//
|
|
||||||
// As above with example (d), we could handle the end block separately and mask off end bits,
|
|
||||||
// but unconditionally searching an entire block at once and performing this check afterwards
|
|
||||||
// is faster and much simpler to implement.
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
None
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg_attr(not(debug_assertions), allow(dead_code))]
|
|
||||||
fn find_bit_slow(init_mask: &InitMask, start: Size, end: Size, is_init: bool) -> Option<Size> {
|
|
||||||
(start..end).find(|&i| init_mask.get(i) == is_init)
|
|
||||||
}
|
|
||||||
|
|
||||||
let result = find_bit_fast(init_mask, start, end, is_init);
|
|
||||||
|
|
||||||
debug_assert_eq!(
|
|
||||||
result,
|
|
||||||
find_bit_slow(init_mask, start, end, is_init),
|
|
||||||
"optimized implementation of find_bit is wrong for start={:?} end={:?} is_init={} init_mask={:#?}",
|
|
||||||
start,
|
|
||||||
end,
|
|
||||||
is_init,
|
|
||||||
init_mask
|
|
||||||
);
|
|
||||||
|
|
||||||
result
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
fn bit_index(bits: Size) -> (usize, usize) {
|
|
||||||
let bits = bits.bytes();
|
|
||||||
let a = bits / InitMask::BLOCK_SIZE;
|
|
||||||
let b = bits % InitMask::BLOCK_SIZE;
|
|
||||||
(usize::try_from(a).unwrap(), usize::try_from(b).unwrap())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
fn size_from_bit_index(block: impl TryInto<u64>, bit: impl TryInto<u64>) -> Size {
|
|
||||||
let block = block.try_into().ok().unwrap();
|
|
||||||
let bit = bit.try_into().ok().unwrap();
|
|
||||||
Size::from_bytes(block * InitMask::BLOCK_SIZE + bit)
|
|
||||||
}
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue