1
Fork 0

Auto merge of #65826 - JohnTitor:rollup-mr6crka, r=JohnTitor

Rollup of 6 pull requests

Successful merges:

 - #65705 (Add {String,Vec}::into_raw_parts)
 - #65749 (Insurance policy in case `iter.size_hint()` lies.)
 - #65799 (Fill tracking issue number for `array_value_iter`)
 - #65800 (self-profiling: Update measureme to 0.4.0 and remove non-RAII methods from profiler.)
 - #65806 (Add [T]::as_ptr_range() and [T]::as_mut_ptr_range().)
 - #65810 (SGX: Clear additional flag on enclave entry)

Failed merges:

r? @ghost
This commit is contained in:
bors 2019-10-25 20:41:28 +00:00
commit 246be7e1a5
12 changed files with 225 additions and 130 deletions

View file

@ -1966,9 +1966,9 @@ dependencies = [
[[package]]
name = "measureme"
version = "0.3.0"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d09de7dafa3aa334bc806447c7e4de69419723312f4b88b80b561dea66601ce8"
checksum = "cd21b0e6e1af976b269ce062038fe5e1b9ca2f817ab7a3af09ec4210aebf0d30"
dependencies = [
"byteorder",
"memmap",

View file

@ -196,20 +196,21 @@ use crate::vec::Vec;
///
/// let story = String::from("Once upon a time...");
///
/// let ptr = story.as_ptr();
// FIXME Update this when vec_into_raw_parts is stabilized
/// // Prevent automatically dropping the String's data
/// let mut story = mem::ManuallyDrop::new(story);
///
/// let ptr = story.as_mut_ptr();
/// let len = story.len();
/// let capacity = story.capacity();
///
/// // story has nineteen bytes
/// assert_eq!(19, len);
///
/// // Now that we have our parts, we throw the story away.
/// mem::forget(story);
///
/// // We can re-build a String out of ptr, len, and capacity. This is all
/// // unsafe because we are responsible for making sure the components are
/// // valid:
/// let s = unsafe { String::from_raw_parts(ptr as *mut _, len, capacity) } ;
/// let s = unsafe { String::from_raw_parts(ptr, len, capacity) } ;
///
/// assert_eq!(String::from("Once upon a time..."), s);
/// ```
@ -647,6 +648,37 @@ impl String {
decode_utf16(v.iter().cloned()).map(|r| r.unwrap_or(REPLACEMENT_CHARACTER)).collect()
}
/// Decomposes a `String` into its raw components.
///
/// Returns the raw pointer to the underlying data, the length of
/// the string (in bytes), and the allocated capacity of the data
/// (in bytes). These are the same arguments in the same order as
/// the arguments to [`from_raw_parts`].
///
/// After calling this function, the caller is responsible for the
/// memory previously managed by the `String`. The only way to do
/// this is to convert the raw pointer, length, and capacity back
/// into a `String` with the [`from_raw_parts`] function, allowing
/// the destructor to perform the cleanup.
///
/// [`from_raw_parts`]: #method.from_raw_parts
///
/// # Examples
///
/// ```
/// #![feature(vec_into_raw_parts)]
/// let s = String::from("hello");
///
/// let (ptr, len, cap) = s.into_raw_parts();
///
/// let rebuilt = unsafe { String::from_raw_parts(ptr, len, cap) };
/// assert_eq!(rebuilt, "hello");
/// ```
#[unstable(feature = "vec_into_raw_parts", reason = "new API", issue = "65816")]
pub fn into_raw_parts(self) -> (*mut u8, usize, usize) {
self.vec.into_raw_parts()
}
/// Creates a new `String` from a length, capacity, and pointer.
///
/// # Safety
@ -677,13 +709,16 @@ impl String {
///
/// unsafe {
/// let s = String::from("hello");
/// let ptr = s.as_ptr();
///
// FIXME Update this when vec_into_raw_parts is stabilized
/// // Prevent automatically dropping the String's data
/// let mut s = mem::ManuallyDrop::new(s);
///
/// let ptr = s.as_mut_ptr();
/// let len = s.len();
/// let capacity = s.capacity();
///
/// mem::forget(s);
///
/// let s = String::from_raw_parts(ptr as *mut _, len, capacity);
/// let s = String::from_raw_parts(ptr, len, capacity);
///
/// assert_eq!(String::from("hello"), s);
/// }

View file

@ -358,6 +358,44 @@ impl<T> Vec<T> {
}
}
/// Decomposes a `Vec<T>` into its raw components.
///
/// Returns the raw pointer to the underlying data, the length of
/// the vector (in elements), and the allocated capacity of the
/// data (in elements). These are the same arguments in the same
/// order as the arguments to [`from_raw_parts`].
///
/// After calling this function, the caller is responsible for the
/// memory previously managed by the `Vec`. The only way to do
/// this is to convert the raw pointer, length, and capacity back
/// into a `Vec` with the [`from_raw_parts`] function, allowing
/// the destructor to perform the cleanup.
///
/// [`from_raw_parts`]: #method.from_raw_parts
///
/// # Examples
///
/// ```
/// #![feature(vec_into_raw_parts)]
/// let v: Vec<i32> = vec![-1, 0, 1];
///
/// let (ptr, len, cap) = v.into_raw_parts();
///
/// let rebuilt = unsafe {
/// // We can now make changes to the components, such as
/// // transmuting the raw pointer to a compatible type.
/// let ptr = ptr as *mut u32;
///
/// Vec::from_raw_parts(ptr, len, cap)
/// };
/// assert_eq!(rebuilt, [4294967295, 0, 1]);
/// ```
#[unstable(feature = "vec_into_raw_parts", reason = "new API", issue = "65816")]
pub fn into_raw_parts(self) -> (*mut T, usize, usize) {
let mut me = mem::ManuallyDrop::new(self);
(me.as_mut_ptr(), me.len(), me.capacity())
}
/// Creates a `Vec<T>` directly from the raw components of another vector.
///
/// # Safety
@ -389,7 +427,12 @@ impl<T> Vec<T> {
/// use std::ptr;
/// use std::mem;
///
/// let mut v = vec![1, 2, 3];
/// let v = vec![1, 2, 3];
///
// FIXME Update this when vec_into_raw_parts is stabilized
/// // Prevent running `v`'s destructor so we are in complete control
/// // of the allocation.
/// let mut v = mem::ManuallyDrop::new(v);
///
/// // Pull out the various important pieces of information about `v`
/// let p = v.as_mut_ptr();
@ -397,10 +440,6 @@ impl<T> Vec<T> {
/// let cap = v.capacity();
///
/// unsafe {
/// // Cast `v` into the void: no destructor run, so we are in
/// // complete control of the allocation to which `p` points.
/// mem::forget(v);
///
/// // Overwrite memory with 4, 5, 6
/// for i in 0..len as isize {
/// ptr::write(p.offset(i), 4 + i);

View file

@ -13,7 +13,7 @@ use super::LengthAtMost32;
/// A by-value [array] iterator.
///
/// [array]: ../../std/primitive.array.html
#[unstable(feature = "array_value_iter", issue = "0")]
#[unstable(feature = "array_value_iter", issue = "65798")]
pub struct IntoIter<T, const N: usize>
where
[T; N]: LengthAtMost32,
@ -49,7 +49,7 @@ where
/// *Note*: this method might never get stabilized and/or removed in the
/// future as there will likely be another, preferred way of obtaining this
/// iterator (either via `IntoIterator` for arrays or via another way).
#[unstable(feature = "array_value_iter", issue = "0")]
#[unstable(feature = "array_value_iter", issue = "65798")]
pub fn new(array: [T; N]) -> Self {
// The transmute here is actually safe. The docs of `MaybeUninit`
// promise:
@ -95,7 +95,7 @@ where
}
#[stable(feature = "array_value_iter_impls", since = "1.38.0")]
#[stable(feature = "array_value_iter_impls", since = "1.40.0")]
impl<T, const N: usize> Iterator for IntoIter<T, {N}>
where
[T; N]: LengthAtMost32,
@ -141,7 +141,7 @@ where
}
}
#[stable(feature = "array_value_iter_impls", since = "1.38.0")]
#[stable(feature = "array_value_iter_impls", since = "1.40.0")]
impl<T, const N: usize> DoubleEndedIterator for IntoIter<T, {N}>
where
[T; N]: LengthAtMost32,
@ -176,7 +176,7 @@ where
}
}
#[stable(feature = "array_value_iter_impls", since = "1.38.0")]
#[stable(feature = "array_value_iter_impls", since = "1.40.0")]
impl<T, const N: usize> Drop for IntoIter<T, {N}>
where
[T; N]: LengthAtMost32,
@ -189,7 +189,7 @@ where
}
}
#[stable(feature = "array_value_iter_impls", since = "1.38.0")]
#[stable(feature = "array_value_iter_impls", since = "1.40.0")]
impl<T, const N: usize> ExactSizeIterator for IntoIter<T, {N}>
where
[T; N]: LengthAtMost32,
@ -204,7 +204,7 @@ where
}
}
#[stable(feature = "array_value_iter_impls", since = "1.38.0")]
#[stable(feature = "array_value_iter_impls", since = "1.40.0")]
impl<T, const N: usize> FusedIterator for IntoIter<T, {N}>
where
[T; N]: LengthAtMost32,
@ -214,13 +214,13 @@ where
// elements (that will still be yielded) is the length of the range `alive`.
// This range is decremented in length in either `next` or `next_back`. It is
// always decremented by 1 in those methods, but only if `Some(_)` is returned.
#[stable(feature = "array_value_iter_impls", since = "1.38.0")]
#[stable(feature = "array_value_iter_impls", since = "1.40.0")]
unsafe impl<T, const N: usize> TrustedLen for IntoIter<T, {N}>
where
[T; N]: LengthAtMost32,
{}
#[stable(feature = "array_value_iter_impls", since = "1.38.0")]
#[stable(feature = "array_value_iter_impls", since = "1.40.0")]
impl<T: Clone, const N: usize> Clone for IntoIter<T, {N}>
where
[T; N]: LengthAtMost32,
@ -251,7 +251,7 @@ where
}
}
#[stable(feature = "array_value_iter_impls", since = "1.38.0")]
#[stable(feature = "array_value_iter_impls", since = "1.40.0")]
impl<T: fmt::Debug, const N: usize> fmt::Debug for IntoIter<T, {N}>
where
[T; N]: LengthAtMost32,

View file

@ -18,7 +18,7 @@ use crate::slice::{Iter, IterMut};
mod iter;
#[cfg(not(bootstrap))]
#[unstable(feature = "array_value_iter", issue = "0")]
#[unstable(feature = "array_value_iter", issue = "65798")]
pub use iter::IntoIter;
/// Utility trait implemented only on arrays of fixed size

View file

@ -874,6 +874,7 @@ extern "rust-intrinsic" {
/// // the original inner type (`&i32`) to the converted inner type
/// // (`Option<&i32>`), so read the nomicon pages linked above.
/// let v_from_raw = unsafe {
// FIXME Update this when vec_into_raw_parts is stabilized
/// // Ensure the original vector is not dropped.
/// let mut v_clone = std::mem::ManuallyDrop::new(v_clone);
/// Vec::from_raw_parts(v_clone.as_mut_ptr() as *mut Option<&i32>,

View file

@ -28,7 +28,7 @@ use crate::fmt;
use crate::intrinsics::{assume, exact_div, unchecked_sub, is_aligned_and_not_null};
use crate::isize;
use crate::iter::*;
use crate::ops::{FnMut, self};
use crate::ops::{FnMut, Range, self};
use crate::option::Option;
use crate::option::Option::{None, Some};
use crate::result::Result;
@ -407,6 +407,86 @@ impl<T> [T] {
self as *mut [T] as *mut T
}
/// Returns the two raw pointers spanning the slice.
///
/// The returned range is half-open, which means that the end pointer
/// points *one past* the last element of the slice. This way, an empty
/// slice is represented by two equal pointers, and the difference between
/// the two pointers represents the size of the size.
///
/// See [`as_ptr`] for warnings on using these pointers. The end pointer
/// requires extra caution, as it does not point to a valid element in the
/// slice.
///
/// This function is useful for interacting with foreign interfaces which
/// use two pointers to refer to a range of elements in memory, as is
/// common in C++.
///
/// It can also be useful to check if a pointer to an element refers to an
/// element of this slice:
///
/// ```
/// #![feature(slice_ptr_range)]
///
/// let a = [1, 2, 3];
/// let x = &a[1] as *const _;
/// let y = &5 as *const _;
///
/// assert!(a.as_ptr_range().contains(&x));
/// assert!(!a.as_ptr_range().contains(&y));
/// ```
///
/// [`as_ptr`]: #method.as_ptr
#[unstable(feature = "slice_ptr_range", issue = "65807")]
#[inline]
pub fn as_ptr_range(&self) -> Range<*const T> {
// The `add` here is safe, because:
//
// - Both pointers are part of the same object, as pointing directly
// past the object also counts.
//
// - The size of the slice is never larger than isize::MAX bytes, as
// noted here:
// - https://github.com/rust-lang/unsafe-code-guidelines/issues/102#issuecomment-473340447
// - https://doc.rust-lang.org/reference/behavior-considered-undefined.html
// - https://doc.rust-lang.org/core/slice/fn.from_raw_parts.html#safety
// (This doesn't seem normative yet, but the very same assumption is
// made in many places, including the Index implementation of slices.)
//
// - There is no wrapping around involved, as slices do not wrap past
// the end of the address space.
//
// See the documentation of pointer::add.
let start = self.as_ptr();
let end = unsafe { start.add(self.len()) };
start..end
}
/// Returns the two unsafe mutable pointers spanning the slice.
///
/// The returned range is half-open, which means that the end pointer
/// points *one past* the last element of the slice. This way, an empty
/// slice is represented by two equal pointers, and the difference between
/// the two pointers represents the size of the size.
///
/// See [`as_mut_ptr`] for warnings on using these pointers. The end
/// pointer requires extra caution, as it does not point to a valid element
/// in the slice.
///
/// This function is useful for interacting with foreign interfaces which
/// use two pointers to refer to a range of elements in memory, as is
/// common in C++.
///
/// [`as_mut_ptr`]: #method.as_mut_ptr
#[unstable(feature = "slice_ptr_range", issue = "65807")]
#[inline]
pub fn as_mut_ptr_range(&mut self) -> Range<*mut T> {
// See as_ptr_range() above for why `add` here is safe.
let start = self.as_mut_ptr();
let end = unsafe { start.add(self.len()) };
start..end
}
/// Swaps two elements in the slice.
///
/// # Arguments

View file

@ -37,4 +37,4 @@ byteorder = { version = "1.3" }
chalk-engine = { version = "0.9.0", default-features=false }
rustc_fs_util = { path = "../librustc_fs_util" }
smallvec = { version = "0.6.8", features = ["union", "may_dangle"] }
measureme = "0.3"
measureme = "0.4"

View file

@ -2930,14 +2930,18 @@ impl<T, R, E> InternIteratorElement<T, R> for Result<T, E> {
// lower bounds from `size_hint` agree they are correct.
Ok(match iter.size_hint() {
(1, Some(1)) => {
f(&[iter.next().unwrap()?])
let t0 = iter.next().unwrap()?;
assert!(iter.next().is_none());
f(&[t0])
}
(2, Some(2)) => {
let t0 = iter.next().unwrap()?;
let t1 = iter.next().unwrap()?;
assert!(iter.next().is_none());
f(&[t0, t1])
}
(0, Some(0)) => {
assert!(iter.next().is_none());
f(&[])
}
_ => {

View file

@ -90,6 +90,10 @@ impl<'a, 'tcx, Q: QueryDescription<'tcx>> JobOwner<'a, 'tcx, Q> {
}
return TryGetJob::JobCompleted(result);
}
#[cfg(parallel_compiler)]
let query_blocked_prof_timer;
let job = match lock.active.entry((*key).clone()) {
Entry::Occupied(entry) => {
match *entry.get() {
@ -98,7 +102,9 @@ impl<'a, 'tcx, Q: QueryDescription<'tcx>> JobOwner<'a, 'tcx, Q> {
// in another thread has completed. Record how long we wait in the
// self-profiler.
#[cfg(parallel_compiler)]
tcx.prof.query_blocked_start(Q::NAME);
{
query_blocked_prof_timer = tcx.prof.query_blocked(Q::NAME);
}
job.clone()
},
@ -140,7 +146,11 @@ impl<'a, 'tcx, Q: QueryDescription<'tcx>> JobOwner<'a, 'tcx, Q> {
#[cfg(parallel_compiler)]
{
let result = job.r#await(tcx, span);
tcx.prof.query_blocked_end(Q::NAME);
// This `drop()` is not strictly necessary as the binding
// would go out of scope anyway. But it's good to have an
// explicit marker of how far the measurement goes.
drop(query_blocked_prof_timer);
if let Err(cycle) = result {
return TryGetJob::Cycle(Q::handle_cycle_error(tcx, cycle));

View file

@ -14,9 +14,12 @@ use measureme::{StringId, TimestampKind};
/// MmapSerializatioSink is faster on macOS and Linux
/// but FileSerializationSink is faster on Windows
#[cfg(not(windows))]
type Profiler = measureme::Profiler<measureme::MmapSerializationSink>;
type SerializationSink = measureme::MmapSerializationSink;
#[cfg(windows)]
type Profiler = measureme::Profiler<measureme::FileSerializationSink>;
type SerializationSink = measureme::FileSerializationSink;
type Profiler = measureme::Profiler<SerializationSink>;
#[derive(Clone, Copy, Debug, PartialEq, Eq, Ord, PartialOrd)]
pub enum ProfileCategory {
@ -131,32 +134,6 @@ impl SelfProfilerRef {
})
}
/// Start profiling a generic activity. Profiling continues until
/// `generic_activity_end` is called. The RAII-based `generic_activity`
/// usually is the better alternative.
#[inline(always)]
pub fn generic_activity_start(&self, event_id: &str) {
self.non_guard_generic_event(
|profiler| profiler.generic_activity_event_kind,
|profiler| profiler.profiler.alloc_string(event_id),
EventFilter::GENERIC_ACTIVITIES,
TimestampKind::Start,
);
}
/// End profiling a generic activity that was started with
/// `generic_activity_start`. The RAII-based `generic_activity` usually is
/// the better alternative.
#[inline(always)]
pub fn generic_activity_end(&self, event_id: &str) {
self.non_guard_generic_event(
|profiler| profiler.generic_activity_event_kind,
|profiler| profiler.profiler.alloc_string(event_id),
EventFilter::GENERIC_ACTIVITIES,
TimestampKind::End,
);
}
/// Start profiling a query provider. Profiling continues until the
/// TimingGuard returned from this call is dropped.
#[inline(always)]
@ -179,26 +156,14 @@ impl SelfProfilerRef {
}
/// Start profiling a query being blocked on a concurrent execution.
/// Profiling continues until `query_blocked_end` is called.
/// Profiling continues until the TimingGuard returned from this call is
/// dropped.
#[inline(always)]
pub fn query_blocked_start(&self, query_name: QueryName) {
self.non_guard_query_event(
|profiler| profiler.query_blocked_event_kind,
query_name,
EventFilter::QUERY_BLOCKED,
TimestampKind::Start,
);
}
/// End profiling a query being blocked on a concurrent execution.
#[inline(always)]
pub fn query_blocked_end(&self, query_name: QueryName) {
self.non_guard_query_event(
|profiler| profiler.query_blocked_event_kind,
query_name,
EventFilter::QUERY_BLOCKED,
TimestampKind::End,
);
pub fn query_blocked(&self, query_name: QueryName) -> TimingGuard<'_> {
self.exec(EventFilter::QUERY_BLOCKED, |profiler| {
let event_id = SelfProfiler::get_query_name_string_id(query_name);
TimingGuard::start(profiler, profiler.query_blocked_event_kind, event_id)
})
}
/// Start profiling how long it takes to load a query result from the
@ -238,28 +203,6 @@ impl SelfProfilerRef {
TimingGuard::none()
}));
}
#[inline(always)]
fn non_guard_generic_event<F: FnOnce(&SelfProfiler) -> StringId>(
&self,
event_kind: fn(&SelfProfiler) -> StringId,
event_id: F,
event_filter: EventFilter,
timestamp_kind: TimestampKind
) {
drop(self.exec(event_filter, |profiler| {
let thread_id = thread_id_to_u64(std::thread::current().id());
profiler.profiler.record_event(
event_kind(profiler),
event_id(profiler),
thread_id,
timestamp_kind,
);
TimingGuard::none()
}));
}
}
pub struct SelfProfiler {
@ -346,14 +289,7 @@ impl SelfProfiler {
}
#[must_use]
pub struct TimingGuard<'a>(Option<TimingGuardInternal<'a>>);
struct TimingGuardInternal<'a> {
raw_profiler: &'a Profiler,
event_id: StringId,
event_kind: StringId,
thread_id: u64,
}
pub struct TimingGuard<'a>(Option<measureme::TimingGuard<'a, SerializationSink>>);
impl<'a> TimingGuard<'a> {
#[inline]
@ -364,14 +300,10 @@ impl<'a> TimingGuard<'a> {
) -> TimingGuard<'a> {
let thread_id = thread_id_to_u64(std::thread::current().id());
let raw_profiler = &profiler.profiler;
raw_profiler.record_event(event_kind, event_id, thread_id, TimestampKind::Start);
TimingGuard(Some(TimingGuardInternal {
raw_profiler,
event_kind,
event_id,
thread_id,
}))
let timing_guard = raw_profiler.start_recording_interval_event(event_kind,
event_id,
thread_id);
TimingGuard(Some(timing_guard))
}
#[inline]
@ -379,15 +311,3 @@ impl<'a> TimingGuard<'a> {
TimingGuard(None)
}
}
impl<'a> Drop for TimingGuardInternal<'a> {
#[inline]
fn drop(&mut self) {
self.raw_profiler.record_event(
self.event_kind,
self.event_id,
self.thread_id,
TimestampKind::End
);
}
}

View file

@ -119,8 +119,14 @@ sgx_entry:
mov %rbx,%gs:tcsls_tcs_addr
stmxcsr %gs:tcsls_user_mxcsr
fnstcw %gs:tcsls_user_fcw
/* reset user state */
cld /* x86-64 ABI requires DF to be unset at function entry/exit */
/* - DF flag: x86-64 ABI requires DF to be unset at function entry/exit */
/* - AC flag: AEX on misaligned memory accesses leaks side channel info */
pushfq
andq $~0x40400, (%rsp)
popfq
/* check for debug buffer pointer */
testb $0xff,DEBUG(%rip)
jz .Lskip_debug_init