1
Fork 0

Auto merge of #100847 - matthiaskrgr:rollup-0ga531s, r=matthiaskrgr

Rollup of 11 pull requests

Successful merges:

 - #100556 (Clamp Function for f32 and f64)
 - #100663 (Make slice::reverse const)
 - #100697 ( Minor syntax and formatting update to doc comment on `find_vtable_types_for_unsizing`)
 - #100760 (update test for LLVM change)
 - #100761 (some general mir typeck cleanup)
 - #100775 (rustdoc: Merge source code pages HTML elements together v2)
 - #100813 (Add `/build-rust-analyzer/` to .gitignore)
 - #100821 (Make some docs nicer wrt pointer offsets)
 - #100822 (Replace most uses of `pointer::offset` with `add` and `sub`)
 - #100839 (Make doc for stdin field of process consistent)
 - #100842 (Add diagnostics lints to `rustc_transmute` module (zero diags))

Failed merges:

r? `@ghost`
`@rustbot` modify labels: rollup
This commit is contained in:
bors 2022-08-21 19:05:18 +00:00
commit c0941dfb5a
44 changed files with 351 additions and 301 deletions

1
.gitignore vendored
View file

@ -42,6 +42,7 @@ no_llvm_build
/llvm/
/mingw-build/
/build/
/build-rust-analyzer/
/dist/
/unicode-downloads
/target

View file

@ -219,7 +219,7 @@ impl<T> TypedArena<T> {
} else {
let ptr = self.ptr.get();
// Advance the pointer.
self.ptr.set(self.ptr.get().offset(1));
self.ptr.set(self.ptr.get().add(1));
// Write into uninitialized memory.
ptr::write(ptr, object);
&mut *ptr

View file

@ -90,12 +90,13 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
locations: Locations,
category: ConstraintCategory<'tcx>,
) {
self.prove_predicates(
Some(ty::Binder::dummy(ty::PredicateKind::Trait(ty::TraitPredicate {
self.prove_predicate(
ty::Binder::dummy(ty::PredicateKind::Trait(ty::TraitPredicate {
trait_ref,
constness: ty::BoundConstness::NotConst,
polarity: ty::ImplPolarity::Positive,
}))),
}))
.to_predicate(self.tcx()),
locations,
category,
);

View file

@ -268,7 +268,7 @@ impl<'tcx> UniversalRegionRelationsBuilder<'_, 'tcx> {
// }
// impl Foo for () {
// type Bar = ();
// fn foo(&self) ->&() {}
// fn foo(&self) -> &() {}
// }
// ```
// Both &Self::Bar and &() are WF

View file

@ -178,97 +178,15 @@ pub(crate) fn type_check<'mir, 'tcx>(
upvars,
};
let opaque_type_values = type_check_internal(
infcx,
param_env,
body,
promoted,
&region_bound_pairs,
implicit_region_bound,
&mut borrowck_context,
|mut cx| {
debug!("inside extra closure of type_check_internal");
cx.equate_inputs_and_outputs(&body, universal_regions, &normalized_inputs_and_output);
liveness::generate(
&mut cx,
body,
elements,
flow_inits,
move_data,
location_table,
use_polonius,
);
translate_outlives_facts(&mut cx);
let opaque_type_values =
infcx.inner.borrow_mut().opaque_type_storage.take_opaque_types();
opaque_type_values
.into_iter()
.map(|(opaque_type_key, decl)| {
cx.fully_perform_op(
Locations::All(body.span),
ConstraintCategory::OpaqueType,
CustomTypeOp::new(
|infcx| {
infcx.register_member_constraints(
param_env,
opaque_type_key,
decl.hidden_type.ty,
decl.hidden_type.span,
);
Ok(InferOk { value: (), obligations: vec![] })
},
|| "opaque_type_map".to_string(),
),
)
.unwrap();
let mut hidden_type = infcx.resolve_vars_if_possible(decl.hidden_type);
trace!(
"finalized opaque type {:?} to {:#?}",
opaque_type_key,
hidden_type.ty.kind()
);
if hidden_type.has_infer_types_or_consts() {
infcx.tcx.sess.delay_span_bug(
decl.hidden_type.span,
&format!("could not resolve {:#?}", hidden_type.ty.kind()),
);
hidden_type.ty = infcx.tcx.ty_error();
}
(opaque_type_key, (hidden_type, decl.origin))
})
.collect()
},
);
MirTypeckResults { constraints, universal_region_relations, opaque_type_values }
}
#[instrument(
skip(infcx, body, promoted, region_bound_pairs, borrowck_context, extra),
level = "debug"
)]
fn type_check_internal<'a, 'tcx, R>(
infcx: &'a InferCtxt<'a, 'tcx>,
param_env: ty::ParamEnv<'tcx>,
body: &'a Body<'tcx>,
promoted: &'a IndexVec<Promoted, Body<'tcx>>,
region_bound_pairs: &'a RegionBoundPairs<'tcx>,
implicit_region_bound: ty::Region<'tcx>,
borrowck_context: &'a mut BorrowCheckContext<'a, 'tcx>,
extra: impl FnOnce(TypeChecker<'a, 'tcx>) -> R,
) -> R {
debug!("body: {:#?}", body);
let mut checker = TypeChecker::new(
infcx,
body,
param_env,
region_bound_pairs,
&region_bound_pairs,
implicit_region_bound,
borrowck_context,
&mut borrowck_context,
);
let errors_reported = {
let mut verifier = TypeVerifier::new(&mut checker, promoted);
verifier.visit_body(&body);
@ -280,7 +198,56 @@ fn type_check_internal<'a, 'tcx, R>(
checker.typeck_mir(body);
}
extra(checker)
checker.equate_inputs_and_outputs(&body, universal_regions, &normalized_inputs_and_output);
liveness::generate(
&mut checker,
body,
elements,
flow_inits,
move_data,
location_table,
use_polonius,
);
translate_outlives_facts(&mut checker);
let opaque_type_values = infcx.inner.borrow_mut().opaque_type_storage.take_opaque_types();
let opaque_type_values = opaque_type_values
.into_iter()
.map(|(opaque_type_key, decl)| {
checker
.fully_perform_op(
Locations::All(body.span),
ConstraintCategory::OpaqueType,
CustomTypeOp::new(
|infcx| {
infcx.register_member_constraints(
param_env,
opaque_type_key,
decl.hidden_type.ty,
decl.hidden_type.span,
);
Ok(InferOk { value: (), obligations: vec![] })
},
|| "opaque_type_map".to_string(),
),
)
.unwrap();
let mut hidden_type = infcx.resolve_vars_if_possible(decl.hidden_type);
trace!("finalized opaque type {:?} to {:#?}", opaque_type_key, hidden_type.ty.kind());
if hidden_type.has_infer_types_or_consts() {
infcx.tcx.sess.delay_span_bug(
decl.hidden_type.span,
&format!("could not resolve {:#?}", hidden_type.ty.kind()),
);
hidden_type.ty = infcx.tcx.ty_error();
}
(opaque_type_key, (hidden_type, decl.origin))
})
.collect();
MirTypeckResults { constraints, universal_region_relations, opaque_type_values }
}
fn translate_outlives_facts(typeck: &mut TypeChecker<'_, '_>) {
@ -1911,7 +1878,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
}
}
&Rvalue::NullaryOp(_, ty) => {
&Rvalue::NullaryOp(NullOp::SizeOf | NullOp::AlignOf, ty) => {
let trait_ref = ty::TraitRef {
def_id: tcx.require_lang_item(LangItem::Sized, Some(self.last_span)),
substs: tcx.mk_substs_trait(ty, &[]),

View file

@ -94,7 +94,7 @@ mod platform {
struct Header(*mut u8);
const HEAP_ZERO_MEMORY: DWORD = 0x00000008;
unsafe fn get_header<'a>(ptr: *mut u8) -> &'a mut Header {
&mut *(ptr as *mut Header).offset(-1)
&mut *(ptr as *mut Header).sub(1)
}
unsafe fn align_ptr(ptr: *mut u8, align: usize) -> *mut u8 {
let aligned = ptr.add(align - (ptr as usize & (align - 1)));

View file

@ -156,7 +156,7 @@ mod platform {
struct Header(*mut u8);
const HEAP_ZERO_MEMORY: DWORD = 0x00000008;
unsafe fn get_header<'a>(ptr: *mut u8) -> &'a mut Header {
&mut *(ptr as *mut Header).offset(-1)
&mut *(ptr as *mut Header).sub(1)
}
unsafe fn align_ptr(ptr: *mut u8, align: usize) -> *mut u8 {
let aligned = ptr.add(align - (ptr as usize & (align - 1)));

View file

@ -128,7 +128,7 @@
//! #### Unsizing Casts
//! A subtle way of introducing neighbor edges is by casting to a trait object.
//! Since the resulting fat-pointer contains a reference to a vtable, we need to
//! instantiate all object-save methods of the trait, as we need to store
//! instantiate all object-safe methods of the trait, as we need to store
//! pointers to these functions even if they never get called anywhere. This can
//! be seen as a special case of taking a function reference.
//!
@ -1044,10 +1044,12 @@ fn should_codegen_locally<'tcx>(tcx: TyCtxt<'tcx>, instance: &Instance<'tcx>) ->
/// them.
///
/// For example, the source type might be `&SomeStruct` and the target type
/// might be `&SomeTrait` in a cast like:
/// might be `&dyn SomeTrait` in a cast like:
///
/// ```rust,ignore (not real code)
/// let src: &SomeStruct = ...;
/// let target = src as &SomeTrait;
/// let target = src as &dyn SomeTrait;
/// ```
///
/// Then the output of this function would be (SomeStruct, SomeTrait) since for
/// constructing the `target` fat-pointer we need the vtable for that pair.
@ -1068,8 +1070,10 @@ fn should_codegen_locally<'tcx>(tcx: TyCtxt<'tcx>, instance: &Instance<'tcx>) ->
/// for the pair of `T` (which is a trait) and the concrete type that `T` was
/// originally coerced from:
///
/// ```rust,ignore (not real code)
/// let src: &ComplexStruct<SomeStruct> = ...;
/// let target = src as &ComplexStruct<SomeTrait>;
/// let target = src as &ComplexStruct<dyn SomeTrait>;
/// ```
///
/// Again, we want this `find_vtable_types_for_unsizing()` to provide the pair
/// `(SomeStruct, SomeTrait)`.

View file

@ -273,7 +273,7 @@ impl<D: Decoder, T: Decodable<D>> Decodable<D> for Vec<T> {
unsafe {
let ptr: *mut T = vec.as_mut_ptr();
for i in 0..len {
std::ptr::write(ptr.offset(i as isize), Decodable::decode(d));
std::ptr::write(ptr.add(i), Decodable::decode(d));
}
vec.set_len(len);
}

View file

@ -7,6 +7,8 @@
result_into_ok_or_err
)]
#![allow(dead_code, unused_variables)]
#![deny(rustc::untranslatable_diagnostic)]
#![deny(rustc::diagnostic_outside_of_impl)]
#[macro_use]
extern crate tracing;

View file

@ -15,7 +15,7 @@ fn allocate_zeroed() {
let end = i.add(layout.size());
while i < end {
assert_eq!(*i, 0);
i = i.offset(1);
i = i.add(1);
}
Global.deallocate(ptr.as_non_null_ptr(), layout);
}

View file

@ -2447,8 +2447,8 @@ impl<T, A: Allocator> VecDeque<T, A> {
let mut right_offset = 0;
for i in left_edge..right_edge {
right_offset = (i - left_edge) % (cap - right_edge);
let src: isize = (right_edge + right_offset) as isize;
ptr::swap(buf.add(i), buf.offset(src));
let src = right_edge + right_offset;
ptr::swap(buf.add(i), buf.add(src));
}
let n_ops = right_edge - left_edge;
left_edge += n_ops;

View file

@ -436,9 +436,9 @@ impl CString {
///
/// unsafe {
/// assert_eq!(b'f', *ptr as u8);
/// assert_eq!(b'o', *ptr.offset(1) as u8);
/// assert_eq!(b'o', *ptr.offset(2) as u8);
/// assert_eq!(b'\0', *ptr.offset(3) as u8);
/// assert_eq!(b'o', *ptr.add(1) as u8);
/// assert_eq!(b'o', *ptr.add(2) as u8);
/// assert_eq!(b'\0', *ptr.add(3) as u8);
///
/// // retake pointer to free memory
/// let _ = CString::from_raw(ptr);

View file

@ -1024,7 +1024,7 @@ where
// Consume the greater side.
// If equal, prefer the right run to maintain stability.
unsafe {
let to_copy = if is_less(&*right.offset(-1), &*left.offset(-1)) {
let to_copy = if is_less(&*right.sub(1), &*left.sub(1)) {
decrement_and_get(left)
} else {
decrement_and_get(right)
@ -1038,12 +1038,12 @@ where
unsafe fn get_and_increment<T>(ptr: &mut *mut T) -> *mut T {
let old = *ptr;
*ptr = unsafe { ptr.offset(1) };
*ptr = unsafe { ptr.add(1) };
old
}
unsafe fn decrement_and_get<T>(ptr: &mut *mut T) -> *mut T {
*ptr = unsafe { ptr.offset(-1) };
*ptr = unsafe { ptr.sub(1) };
*ptr
}

View file

@ -267,7 +267,7 @@ where
// one slot in the underlying storage will have been freed up and we can immediately
// write back the result.
unsafe {
let dst = dst_buf.offset(i as isize);
let dst = dst_buf.add(i);
debug_assert!(dst as *const _ <= end, "InPlaceIterable contract violation");
ptr::write(dst, self.__iterator_get_unchecked(i));
// Since this executes user code which can panic we have to bump the pointer

View file

@ -160,7 +160,7 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> {
Some(unsafe { mem::zeroed() })
} else {
let old = self.ptr;
self.ptr = unsafe { self.ptr.offset(1) };
self.ptr = unsafe { self.ptr.add(1) };
Some(unsafe { ptr::read(old) })
}
@ -272,7 +272,7 @@ impl<T, A: Allocator> DoubleEndedIterator for IntoIter<T, A> {
// Make up a value of this ZST.
Some(unsafe { mem::zeroed() })
} else {
self.end = unsafe { self.end.offset(-1) };
self.end = unsafe { self.end.sub(1) };
Some(unsafe { ptr::read(self.end) })
}
@ -288,7 +288,7 @@ impl<T, A: Allocator> DoubleEndedIterator for IntoIter<T, A> {
}
} else {
// SAFETY: same as for advance_by()
self.end = unsafe { self.end.offset(step_size.wrapping_neg() as isize) };
self.end = unsafe { self.end.sub(step_size) };
}
let to_drop = ptr::slice_from_raw_parts_mut(self.end as *mut T, step_size);
// SAFETY: same as for advance_by()

View file

@ -542,8 +542,8 @@ impl<T> Vec<T> {
///
/// unsafe {
/// // Overwrite memory with 4, 5, 6
/// for i in 0..len as isize {
/// ptr::write(p.offset(i), 4 + i);
/// for i in 0..len {
/// ptr::write(p.add(i), 4 + i);
/// }
///
/// // Put everything back together into a Vec
@ -702,8 +702,8 @@ impl<T, A: Allocator> Vec<T, A> {
///
/// unsafe {
/// // Overwrite memory with 4, 5, 6
/// for i in 0..len as isize {
/// ptr::write(p.offset(i), 4 + i);
/// for i in 0..len {
/// ptr::write(p.add(i), 4 + i);
/// }
///
/// // Put everything back together into a Vec
@ -1393,7 +1393,7 @@ impl<T, A: Allocator> Vec<T, A> {
if index < len {
// Shift everything over to make space. (Duplicating the
// `index`th element into two consecutive places.)
ptr::copy(p, p.offset(1), len - index);
ptr::copy(p, p.add(1), len - index);
} else if index == len {
// No elements need shifting.
} else {
@ -1455,7 +1455,7 @@ impl<T, A: Allocator> Vec<T, A> {
ret = ptr::read(ptr);
// Shift everything down to fill in that spot.
ptr::copy(ptr.offset(1), ptr, len - index - 1);
ptr::copy(ptr.add(1), ptr, len - index - 1);
}
self.set_len(len - 1);
ret
@ -2408,7 +2408,7 @@ impl<T, A: Allocator> Vec<T, A> {
// Write all elements except the last one
for _ in 1..n {
ptr::write(ptr, value.next());
ptr = ptr.offset(1);
ptr = ptr.add(1);
// Increment the length in every step in case next() panics
local_len.increment_len(1);
}

View file

@ -39,7 +39,7 @@ where
let mut local_len = SetLenOnDrop::new(&mut self.len);
iterator.for_each(move |element| {
ptr::write(ptr, element);
ptr = ptr.offset(1);
ptr = ptr.add(1);
// Since the loop executes user code which can panic we have to bump the pointer
// after each step.
// NB can't overflow since we would have had to alloc the address space

View file

@ -1010,11 +1010,11 @@ fn test_as_bytes_fail() {
fn test_as_ptr() {
let buf = "hello".as_ptr();
unsafe {
assert_eq!(*buf.offset(0), b'h');
assert_eq!(*buf.offset(1), b'e');
assert_eq!(*buf.offset(2), b'l');
assert_eq!(*buf.offset(3), b'l');
assert_eq!(*buf.offset(4), b'o');
assert_eq!(*buf.add(0), b'h');
assert_eq!(*buf.add(1), b'e');
assert_eq!(*buf.add(2), b'l');
assert_eq!(*buf.add(3), b'l');
assert_eq!(*buf.add(4), b'o');
}
}

View file

@ -2209,9 +2209,9 @@ pub(crate) fn is_nonoverlapping<T>(src: *const T, dst: *const T, count: usize) -
/// dst.reserve(src_len);
///
/// unsafe {
/// // The call to offset is always safe because `Vec` will never
/// // The call to add is always safe because `Vec` will never
/// // allocate more than `isize::MAX` bytes.
/// let dst_ptr = dst.as_mut_ptr().offset(dst_len as isize);
/// let dst_ptr = dst.as_mut_ptr().add(dst_len);
/// let src_ptr = src.as_ptr();
///
/// // Truncate `src` without dropping its contents. We do this first,

View file

@ -1282,15 +1282,14 @@ impl f32 {
#[must_use = "method returns a new number and does not mutate the original value"]
#[stable(feature = "clamp", since = "1.50.0")]
#[inline]
pub fn clamp(self, min: f32, max: f32) -> f32 {
pub fn clamp(mut self, min: f32, max: f32) -> f32 {
assert!(min <= max);
let mut x = self;
if x < min {
x = min;
if self < min {
self = min;
}
if x > max {
x = max;
if self > max {
self = max;
}
x
self
}
}

View file

@ -1280,15 +1280,14 @@ impl f64 {
#[must_use = "method returns a new number and does not mutate the original value"]
#[stable(feature = "clamp", since = "1.50.0")]
#[inline]
pub fn clamp(self, min: f64, max: f64) -> f64 {
pub fn clamp(mut self, min: f64, max: f64) -> f64 {
assert!(min <= max);
let mut x = self;
if x < min {
x = min;
if self < min {
self = min;
}
if x > max {
x = max;
if self > max {
self = max;
}
x
self
}
}

View file

@ -674,8 +674,9 @@ impl<T> [T] {
/// assert!(v == [3, 2, 1]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_reverse", issue = "100784")]
#[inline]
pub fn reverse(&mut self) {
pub const fn reverse(&mut self) {
let half_len = self.len() / 2;
let Range { start, end } = self.as_mut_ptr_range();
@ -698,9 +699,9 @@ impl<T> [T] {
revswap(front_half, back_half, half_len);
#[inline]
fn revswap<T>(a: &mut [T], b: &mut [T], n: usize) {
debug_assert_eq!(a.len(), n);
debug_assert_eq!(b.len(), n);
const fn revswap<T>(a: &mut [T], b: &mut [T], n: usize) {
debug_assert!(a.len() == n);
debug_assert!(b.len() == n);
// Because this function is first compiled in isolation,
// this check tells LLVM that the indexing below is
@ -708,8 +709,10 @@ impl<T> [T] {
// lengths of the slices are known -- it's removed.
let (a, b) = (&mut a[..n], &mut b[..n]);
for i in 0..n {
let mut i = 0;
while i < n {
mem::swap(&mut a[i], &mut b[n - 1 - i]);
i += 1;
}
}
}
@ -2921,7 +2924,7 @@ impl<T> [T] {
let prev_ptr_write = ptr.add(next_write - 1);
if !same_bucket(&mut *ptr_read, &mut *prev_ptr_write) {
if next_read != next_write {
let ptr_write = prev_ptr_write.offset(1);
let ptr_write = prev_ptr_write.add(1);
mem::swap(&mut *ptr_read, &mut *ptr_write);
}
next_write += 1;

View file

@ -326,8 +326,8 @@ where
unsafe {
// Branchless comparison.
*end_l = i as u8;
end_l = end_l.offset(!is_less(&*elem, pivot) as isize);
elem = elem.offset(1);
end_l = end_l.add(!is_less(&*elem, pivot) as usize);
elem = elem.add(1);
}
}
}
@ -352,9 +352,9 @@ where
// Plus, `block_r` was asserted to be less than `BLOCK` and `elem` will therefore at most be pointing to the beginning of the slice.
unsafe {
// Branchless comparison.
elem = elem.offset(-1);
elem = elem.sub(1);
*end_r = i as u8;
end_r = end_r.offset(is_less(&*elem, pivot) as isize);
end_r = end_r.add(is_less(&*elem, pivot) as usize);
}
}
}
@ -365,12 +365,12 @@ where
if count > 0 {
macro_rules! left {
() => {
l.offset(*start_l as isize)
l.add(*start_l as usize)
};
}
macro_rules! right {
() => {
r.offset(-(*start_r as isize) - 1)
r.sub((*start_r as usize) + 1)
};
}
@ -398,16 +398,16 @@ where
ptr::copy_nonoverlapping(right!(), left!(), 1);
for _ in 1..count {
start_l = start_l.offset(1);
start_l = start_l.add(1);
ptr::copy_nonoverlapping(left!(), right!(), 1);
start_r = start_r.offset(1);
start_r = start_r.add(1);
ptr::copy_nonoverlapping(right!(), left!(), 1);
}
ptr::copy_nonoverlapping(&tmp, right!(), 1);
mem::forget(tmp);
start_l = start_l.offset(1);
start_r = start_r.offset(1);
start_l = start_l.add(1);
start_r = start_r.add(1);
}
}
@ -420,7 +420,7 @@ where
// safe. Otherwise, the debug assertions in the `is_done` case guarantee that
// `width(l, r) == block_l + block_r`, namely, that the block sizes have been adjusted to account
// for the smaller number of remaining elements.
l = unsafe { l.offset(block_l as isize) };
l = unsafe { l.add(block_l) };
}
if start_r == end_r {
@ -428,7 +428,7 @@ where
// SAFETY: Same argument as [block-width-guarantee]. Either this is a full block `2*BLOCK`-wide,
// or `block_r` has been adjusted for the last handful of elements.
r = unsafe { r.offset(-(block_r as isize)) };
r = unsafe { r.sub(block_r) };
}
if is_done {
@ -457,9 +457,9 @@ where
// - `offsets_l` contains valid offsets into `v` collected during the partitioning of
// the last block, so the `l.offset` calls are valid.
unsafe {
end_l = end_l.offset(-1);
ptr::swap(l.offset(*end_l as isize), r.offset(-1));
r = r.offset(-1);
end_l = end_l.sub(1);
ptr::swap(l.add(*end_l as usize), r.sub(1));
r = r.sub(1);
}
}
width(v.as_mut_ptr(), r)
@ -470,9 +470,9 @@ where
while start_r < end_r {
// SAFETY: See the reasoning in [remaining-elements-safety].
unsafe {
end_r = end_r.offset(-1);
ptr::swap(l, r.offset(-(*end_r as isize) - 1));
l = l.offset(1);
end_r = end_r.sub(1);
ptr::swap(l, r.sub((*end_r as usize) + 1));
l = l.add(1);
}
}
width(v.as_mut_ptr(), l)

View file

@ -216,12 +216,12 @@ pub(super) const fn run_utf8_validation(v: &[u8]) -> Result<(), Utf8Error> {
// SAFETY: since `align - index` and `ascii_block_size` are
// multiples of `usize_bytes`, `block = ptr.add(index)` is
// always aligned with a `usize` so it's safe to dereference
// both `block` and `block.offset(1)`.
// both `block` and `block.add(1)`.
unsafe {
let block = ptr.add(index) as *const usize;
// break if there is a nonascii byte
let zu = contains_nonascii(*block);
let zv = contains_nonascii(*block.offset(1));
let zv = contains_nonascii(*block.add(1));
if zu || zv {
break;
}

View file

@ -1554,8 +1554,8 @@ impl<T> AtomicPtr<T> {
/// Offsets the pointer's address by adding `val` *bytes*, returning the
/// previous pointer.
///
/// This is equivalent to using [`wrapping_add`] and [`cast`] to atomically
/// perform `ptr = ptr.cast::<u8>().wrapping_add(val).cast::<T>()`.
/// This is equivalent to using [`wrapping_byte_add`] to atomically
/// perform `ptr = ptr.wrapping_byte_add(val)`.
///
/// `fetch_byte_add` takes an [`Ordering`] argument which describes the
/// memory ordering of this operation. All ordering modes are possible. Note
@ -1565,8 +1565,7 @@ impl<T> AtomicPtr<T> {
/// **Note**: This method is only available on platforms that support atomic
/// operations on [`AtomicPtr`].
///
/// [`wrapping_add`]: pointer::wrapping_add
/// [`cast`]: pointer::cast
/// [`wrapping_byte_add`]: pointer::wrapping_byte_add
///
/// # Examples
///
@ -1591,8 +1590,8 @@ impl<T> AtomicPtr<T> {
/// Offsets the pointer's address by subtracting `val` *bytes*, returning the
/// previous pointer.
///
/// This is equivalent to using [`wrapping_sub`] and [`cast`] to atomically
/// perform `ptr = ptr.cast::<u8>().wrapping_sub(val).cast::<T>()`.
/// This is equivalent to using [`wrapping_byte_sub`] to atomically
/// perform `ptr = ptr.wrapping_byte_sub(val)`.
///
/// `fetch_byte_sub` takes an [`Ordering`] argument which describes the
/// memory ordering of this operation. All ordering modes are possible. Note
@ -1602,8 +1601,7 @@ impl<T> AtomicPtr<T> {
/// **Note**: This method is only available on platforms that support atomic
/// operations on [`AtomicPtr`].
///
/// [`wrapping_sub`]: pointer::wrapping_sub
/// [`cast`]: pointer::cast
/// [`wrapping_byte_sub`]: pointer::wrapping_byte_sub
///
/// # Examples
///

View file

@ -42,7 +42,7 @@ pub(crate) unsafe fn android_set_abort_message(payload: *mut &mut dyn BoxMeUp) {
return; // allocation failure
}
copy_nonoverlapping(msg.as_ptr(), buf as *mut u8, msg.len());
buf.offset(msg.len() as isize).write(0);
buf.add(msg.len()).write(0);
let func = transmute::<usize, SetAbortMessageType>(func_addr);
func(buf);

View file

@ -75,7 +75,7 @@ pub unsafe fn find_eh_action(lsda: *const u8, context: &EHContext<'_>) -> Result
let call_site_encoding = reader.read::<u8>();
let call_site_table_length = reader.read_uleb128();
let action_table = reader.ptr.offset(call_site_table_length as isize);
let action_table = reader.ptr.add(call_site_table_length as usize);
let ip = context.ip;
if !USING_SJLJ_EXCEPTIONS {

View file

@ -329,7 +329,7 @@ impl SocketAddr {
crate::ptr::copy_nonoverlapping(
namespace.as_ptr(),
addr.sun_path.as_mut_ptr().offset(1) as *mut u8,
addr.sun_path.as_mut_ptr().add(1) as *mut u8,
namespace.len(),
);
let len = (sun_path_offset(&addr) + 1 + namespace.len()) as libc::socklen_t;

View file

@ -169,15 +169,15 @@ use crate::sys_common::{AsInner, AsInnerMut, FromInner, IntoInner};
pub struct Child {
pub(crate) handle: imp::Process,
/// The handle for writing to the child's standard input (stdin), if it has
/// been captured. To avoid partially moving
/// the `child` and thus blocking yourself from calling
/// functions on `child` while using `stdin`,
/// you might find it helpful:
/// The handle for writing to the child's standard input (stdin), if it
/// has been captured. You might find it helpful to do
///
/// ```compile_fail,E0425
/// let stdin = child.stdin.take().unwrap();
/// ```
///
/// to avoid partially moving the `child` and thus blocking yourself from calling
/// functions on `child` while using `stdin`.
#[stable(feature = "process", since = "1.0.0")]
pub stdin: Option<ChildStdin>,

View file

@ -17,12 +17,12 @@ fn test_copy_to_userspace_function() {
dst.copy_from_enclave(&[0u8; 100]);
// Copy src[0..size] to dst + offset
unsafe { copy_to_userspace(src.as_ptr(), dst.as_mut_ptr().offset(offset), size) };
unsafe { copy_to_userspace(src.as_ptr(), dst.as_mut_ptr().add(offset), size) };
// Verify copy
for byte in 0..size {
unsafe {
assert_eq!(*dst.as_ptr().offset(offset + byte as isize), src[byte as usize]);
assert_eq!(*dst.as_ptr().add(offset + byte), src[byte as usize]);
}
}
}

View file

@ -168,7 +168,7 @@ unsafe fn allocate(layout: Layout, zeroed: bool) -> *mut u8 {
// SAFETY: Because the size and alignment of a header is <= `MIN_ALIGN` and `aligned`
// is aligned to at least `MIN_ALIGN` and has at least `MIN_ALIGN` bytes of padding before
// it, it is safe to write a header directly before it.
unsafe { ptr::write((aligned as *mut Header).offset(-1), Header(ptr)) };
unsafe { ptr::write((aligned as *mut Header).sub(1), Header(ptr)) };
// SAFETY: The returned pointer does not point to the to the start of an allocated block,
// but there is a header readable directly before it containing the location of the start
@ -213,7 +213,7 @@ unsafe impl GlobalAlloc for System {
// SAFETY: Because of the contract of `System`, `ptr` is guaranteed to be non-null
// and have a header readable directly before it.
unsafe { ptr::read((ptr as *mut Header).offset(-1)).0 }
unsafe { ptr::read((ptr as *mut Header).sub(1)).0 }
}
};

View file

@ -512,7 +512,7 @@ impl File {
));
}
};
let subst_ptr = path_buffer.offset(subst_off as isize);
let subst_ptr = path_buffer.add(subst_off.into());
let mut subst = slice::from_raw_parts(subst_ptr, subst_len as usize);
// Absolute paths start with an NT internal namespace prefix `\??\`
// We should not let it leak through.
@ -1345,10 +1345,10 @@ fn symlink_junction_inner(original: &Path, junction: &Path) -> io::Result<()> {
let v = br"\??\";
let v = v.iter().map(|x| *x as u16);
for c in v.chain(original.as_os_str().encode_wide()) {
*buf.offset(i) = c;
*buf.add(i) = c;
i += 1;
}
*buf.offset(i) = 0;
*buf.add(i) = 0;
i += 1;
(*db).ReparseTag = c::IO_REPARSE_TAG_MOUNT_POINT;
(*db).ReparseTargetMaximumLength = (i * 2) as c::WORD;

View file

@ -99,11 +99,11 @@ impl Iterator for Env {
}
let p = self.cur as *const u16;
let mut len = 0;
while *p.offset(len) != 0 {
while *p.add(len) != 0 {
len += 1;
}
let s = slice::from_raw_parts(p, len as usize);
self.cur = self.cur.offset(len + 1);
let s = slice::from_raw_parts(p, len);
self.cur = self.cur.add(len + 1);
// Windows allows environment variables to start with an equals
// symbol (in any other position, this is the separator between

View file

@ -111,53 +111,6 @@ fn write_header(out: &mut Buffer, class: &str, extra_content: Option<Buffer>) {
write!(out, "<code>");
}
/// Write all the pending elements sharing a same (or at mergeable) `Class`.
///
/// If there is a "parent" (if a `EnterSpan` event was encountered) and the parent can be merged
/// with the elements' class, then we simply write the elements since the `ExitSpan` event will
/// close the tag.
///
/// Otherwise, if there is only one pending element, we let the `string` function handle both
/// opening and closing the tag, otherwise we do it into this function.
fn write_pending_elems(
out: &mut Buffer,
href_context: &Option<HrefContext<'_, '_, '_>>,
pending_elems: &mut Vec<(&str, Option<Class>)>,
current_class: &mut Option<Class>,
closing_tags: &[(&str, Class)],
) {
if pending_elems.is_empty() {
return;
}
let mut done = false;
if let Some((_, parent_class)) = closing_tags.last() {
if can_merge(*current_class, Some(*parent_class), "") {
for (text, class) in pending_elems.iter() {
string(out, Escape(text), *class, &href_context, false);
}
done = true;
}
}
if !done {
// We only want to "open" the tag ourselves if we have more than one pending and if the current
// parent tag is not the same as our pending content.
let open_tag_ourselves = pending_elems.len() > 1;
let close_tag = if open_tag_ourselves {
enter_span(out, current_class.unwrap(), &href_context)
} else {
""
};
for (text, class) in pending_elems.iter() {
string(out, Escape(text), *class, &href_context, !open_tag_ourselves);
}
if open_tag_ourselves {
exit_span(out, close_tag);
}
}
pending_elems.clear();
*current_class = None;
}
/// Check if two `Class` can be merged together. In the following rules, "unclassified" means `None`
/// basically (since it's `Option<Class>`). The following rules apply:
///
@ -171,7 +124,88 @@ fn can_merge(class1: Option<Class>, class2: Option<Class>, text: &str) -> bool {
(Some(c1), Some(c2)) => c1.is_equal_to(c2),
(Some(Class::Ident(_)), None) | (None, Some(Class::Ident(_))) => true,
(Some(_), None) | (None, Some(_)) => text.trim().is_empty(),
_ => false,
(None, None) => true,
}
}
/// This type is used as a conveniency to prevent having to pass all its fields as arguments into
/// the various functions (which became its methods).
struct TokenHandler<'a, 'b, 'c, 'd, 'e> {
out: &'a mut Buffer,
/// It contains the closing tag and the associated `Class`.
closing_tags: Vec<(&'static str, Class)>,
/// This is used because we don't automatically generate the closing tag on `ExitSpan` in
/// case an `EnterSpan` event with the same class follows.
pending_exit_span: Option<Class>,
/// `current_class` and `pending_elems` are used to group HTML elements with same `class`
/// attributes to reduce the DOM size.
current_class: Option<Class>,
/// We need to keep the `Class` for each element because it could contain a `Span` which is
/// used to generate links.
pending_elems: Vec<(&'b str, Option<Class>)>,
href_context: Option<HrefContext<'c, 'd, 'e>>,
}
impl<'a, 'b, 'c, 'd, 'e> TokenHandler<'a, 'b, 'c, 'd, 'e> {
fn handle_exit_span(&mut self) {
// We can't get the last `closing_tags` element using `pop()` because `closing_tags` is
// being used in `write_pending_elems`.
let class = self.closing_tags.last().expect("ExitSpan without EnterSpan").1;
// We flush everything just in case...
self.write_pending_elems(Some(class));
exit_span(self.out, self.closing_tags.pop().expect("ExitSpan without EnterSpan").0);
self.pending_exit_span = None;
}
/// Write all the pending elements sharing a same (or at mergeable) `Class`.
///
/// If there is a "parent" (if a `EnterSpan` event was encountered) and the parent can be merged
/// with the elements' class, then we simply write the elements since the `ExitSpan` event will
/// close the tag.
///
/// Otherwise, if there is only one pending element, we let the `string` function handle both
/// opening and closing the tag, otherwise we do it into this function.
///
/// It returns `true` if `current_class` must be set to `None` afterwards.
fn write_pending_elems(&mut self, current_class: Option<Class>) -> bool {
if self.pending_elems.is_empty() {
return false;
}
if let Some((_, parent_class)) = self.closing_tags.last() &&
can_merge(current_class, Some(*parent_class), "")
{
for (text, class) in self.pending_elems.iter() {
string(self.out, Escape(text), *class, &self.href_context, false);
}
} else {
// We only want to "open" the tag ourselves if we have more than one pending and if the
// current parent tag is not the same as our pending content.
let close_tag = if self.pending_elems.len() > 1 && current_class.is_some() {
Some(enter_span(self.out, current_class.unwrap(), &self.href_context))
} else {
None
};
for (text, class) in self.pending_elems.iter() {
string(self.out, Escape(text), *class, &self.href_context, close_tag.is_none());
}
if let Some(close_tag) = close_tag {
exit_span(self.out, close_tag);
}
}
self.pending_elems.clear();
true
}
}
impl<'a, 'b, 'c, 'd, 'e> Drop for TokenHandler<'a, 'b, 'c, 'd, 'e> {
/// When leaving, we need to flush all pending data to not have missing content.
fn drop(&mut self) {
if self.pending_exit_span.is_some() {
self.handle_exit_span();
} else {
self.write_pending_elems(self.current_class);
}
}
}
@ -194,64 +228,72 @@ fn write_code(
) {
// This replace allows to fix how the code source with DOS backline characters is displayed.
let src = src.replace("\r\n", "\n");
// It contains the closing tag and the associated `Class`.
let mut closing_tags: Vec<(&'static str, Class)> = Vec::new();
// The following two variables are used to group HTML elements with same `class` attributes
// to reduce the DOM size.
let mut current_class: Option<Class> = None;
// We need to keep the `Class` for each element because it could contain a `Span` which is
// used to generate links.
let mut pending_elems: Vec<(&str, Option<Class>)> = Vec::new();
let mut token_handler = TokenHandler {
out,
closing_tags: Vec::new(),
pending_exit_span: None,
current_class: None,
pending_elems: Vec::new(),
href_context,
};
Classifier::new(
&src,
href_context.as_ref().map(|c| c.file_span).unwrap_or(DUMMY_SP),
token_handler.href_context.as_ref().map(|c| c.file_span).unwrap_or(DUMMY_SP),
decoration_info,
)
.highlight(&mut |highlight| {
match highlight {
Highlight::Token { text, class } => {
// If we received a `ExitSpan` event and then have a non-compatible `Class`, we
// need to close the `<span>`.
let need_current_class_update = if let Some(pending) = token_handler.pending_exit_span &&
!can_merge(Some(pending), class, text) {
token_handler.handle_exit_span();
true
// If the two `Class` are different, time to flush the current content and start
// a new one.
if !can_merge(current_class, class, text) {
write_pending_elems(
out,
&href_context,
&mut pending_elems,
&mut current_class,
&closing_tags,
);
current_class = class.map(Class::dummy);
} else if current_class.is_none() {
current_class = class.map(Class::dummy);
} else if !can_merge(token_handler.current_class, class, text) {
token_handler.write_pending_elems(token_handler.current_class);
true
} else {
token_handler.current_class.is_none()
};
if need_current_class_update {
token_handler.current_class = class.map(Class::dummy);
}
pending_elems.push((text, class));
token_handler.pending_elems.push((text, class));
}
Highlight::EnterSpan { class } => {
// We flush everything just in case...
write_pending_elems(
out,
&href_context,
&mut pending_elems,
&mut current_class,
&closing_tags,
);
closing_tags.push((enter_span(out, class, &href_context), class))
let mut should_add = true;
if let Some(pending_exit_span) = token_handler.pending_exit_span {
if class.is_equal_to(pending_exit_span) {
should_add = false;
} else {
token_handler.handle_exit_span();
}
} else {
// We flush everything just in case...
if token_handler.write_pending_elems(token_handler.current_class) {
token_handler.current_class = None;
}
}
if should_add {
let closing_tag = enter_span(token_handler.out, class, &token_handler.href_context);
token_handler.closing_tags.push((closing_tag, class));
}
token_handler.current_class = None;
token_handler.pending_exit_span = None;
}
Highlight::ExitSpan => {
// We flush everything just in case...
write_pending_elems(
out,
&href_context,
&mut pending_elems,
&mut current_class,
&closing_tags,
);
exit_span(out, closing_tags.pop().expect("ExitSpan without EnterSpan").0)
token_handler.current_class = None;
token_handler.pending_exit_span =
Some(token_handler.closing_tags.last().as_ref().expect("ExitSpan without EnterSpan").1);
}
};
});
write_pending_elems(out, &href_context, &mut pending_elems, &mut current_class, &closing_tags);
}
fn write_footer(out: &mut Buffer, playground_button: Option<&str>) {
@ -291,8 +333,8 @@ impl Class {
match (self, other) {
(Self::Self_(_), Self::Self_(_))
| (Self::Macro(_), Self::Macro(_))
| (Self::Ident(_), Self::Ident(_))
| (Self::Decoration(_), Self::Decoration(_)) => true,
| (Self::Ident(_), Self::Ident(_)) => true,
(Self::Decoration(c1), Self::Decoration(c2)) => c1 == c2,
(x, y) => x == y,
}
}
@ -761,7 +803,7 @@ impl<'a> Classifier<'a> {
TokenKind::CloseBracket => {
if self.in_attribute {
self.in_attribute = false;
sink(Highlight::Token { text: "]", class: Some(Class::Attribute) });
sink(Highlight::Token { text: "]", class: None });
sink(Highlight::ExitSpan);
return;
}

View file

@ -1,2 +1,4 @@
<span class="example"><span class="kw">let </span>x = <span class="number">1</span>;</span>
<span class="kw">let </span>y = <span class="number">2</span>;
<span class="example"><span class="kw">let </span>x = <span class="number">1</span>;
<span class="kw">let </span>y = <span class="number">2</span>;
</span><span class="example2"><span class="kw">let </span>z = <span class="number">3</span>;
</span><span class="kw">let </span>a = <span class="number">4</span>;

View file

@ -8,12 +8,13 @@
.lifetime { color: #B76514; }
.question-mark { color: #ff9011; }
</style>
<pre><code><span class="attribute">#![crate_type = <span class="string">&quot;lib&quot;</span>]</span>
<pre><code><span class="attribute">#![crate_type = <span class="string">&quot;lib&quot;</span>]
<span class="kw">use </span>std::path::{Path, PathBuf};
</span><span class="kw">use </span>std::path::{Path, PathBuf};
<span class="attribute">#[cfg(target_os = <span class="string">&quot;linux&quot;</span>)]</span>
<span class="kw">fn </span>main() -&gt; () {
<span class="attribute">#[cfg(target_os = <span class="string">&quot;linux&quot;</span>)]
#[cfg(target_os = <span class="string">&quot;windows&quot;</span>)]
</span><span class="kw">fn </span>main() -&gt; () {
<span class="kw">let </span>foo = <span class="bool-val">true </span>&amp;&amp; <span class="bool-val">false </span>|| <span class="bool-val">true</span>;
<span class="kw">let _</span>: <span class="kw-2">*const </span>() = <span class="number">0</span>;
<span class="kw">let _ </span>= <span class="kw-2">&amp;</span>foo;
@ -22,8 +23,8 @@
<span class="macro">mac!</span>(foo, <span class="kw-2">&amp;mut </span>bar);
<span class="macro">assert!</span>(<span class="self">self</span>.length &lt; N &amp;&amp; index &lt;= <span class="self">self</span>.length);
::std::env::var(<span class="string">&quot;gateau&quot;</span>).is_ok();
<span class="attribute">#[rustfmt::skip]</span>
<span class="kw">let </span>s:std::path::PathBuf = std::path::PathBuf::new();
<span class="attribute">#[rustfmt::skip]
</span><span class="kw">let </span>s:std::path::PathBuf = std::path::PathBuf::new();
<span class="kw">let </span><span class="kw-2">mut </span>s = String::new();
<span class="kw">match </span><span class="kw-2">&amp;</span>s {
@ -31,7 +32,7 @@
}
}
<span class="macro">macro_rules!</span> bar {
<span class="macro">macro_rules! </span>bar {
(<span class="macro-nonterminal">$foo</span>:tt) =&gt; {};
}
</code></pre>

View file

@ -3,6 +3,7 @@
use std::path::{Path, PathBuf};
#[cfg(target_os = "linux")]
#[cfg(target_os = "windows")]
fn main() -> () {
let foo = true && false || true;
let _: *const () = 0;

View file

@ -69,9 +69,12 @@ fn test_union_highlighting() {
fn test_decorations() {
create_default_session_globals_then(|| {
let src = "let x = 1;
let y = 2;";
let y = 2;
let z = 3;
let a = 4;";
let mut decorations = FxHashMap::default();
decorations.insert("example", vec![(0, 10)]);
decorations.insert("example", vec![(0, 10), (11, 21)]);
decorations.insert("example2", vec![(22, 32)]);
let mut html = Buffer::new();
write_code(&mut html, src, None, Some(DecorationInfo(decorations)));

View file

@ -0,0 +1,25 @@
// Floating-point clamp is designed to be implementable as max+min,
// so check to make sure that's what it's actually emitting.
// assembly-output: emit-asm
// compile-flags: --crate-type=lib -O -C llvm-args=-x86-asm-syntax=intel
// only-x86_64
// CHECK-LABEL: clamp_demo:
#[no_mangle]
pub fn clamp_demo(a: f32, x: f32, y: f32) -> f32 {
// CHECK: maxss
// CHECK: minss
a.clamp(x, y)
}
// CHECK-LABEL: clamp12_demo:
#[no_mangle]
pub fn clamp12_demo(a: f32) -> f32 {
// CHECK: movss xmm1
// CHECK-NEXT: maxss xmm1, xmm0
// CHECK-NEXT: movss xmm0
// CHECK-NEXT: minss xmm0, xmm1
// CHECK: ret
a.clamp(1.0, 2.0)
}

View file

@ -13,4 +13,4 @@ pub fn call_foreign_fn() -> u8 {
// CHECK: declare zeroext i8 @foreign_fn()
extern "C" {fn foreign_fn() -> u8;}
// CHECK: !{i32 7, !"PIC Level", i32 2}
// CHECK: !{i32 {{[78]}}, !"PIC Level", i32 2}

View file

@ -18,5 +18,5 @@ pub fn call_foreign_fn() -> u8 {
// CHECK: declare zeroext i8 @foreign_fn()
extern "C" {fn foreign_fn() -> u8;}
// CHECK: !{i32 7, !"PIC Level", i32 2}
// CHECK: !{i32 {{[78]}}, !"PIC Level", i32 2}
// CHECK: !{i32 7, !"PIE Level", i32 2}

View file

@ -1,5 +1,5 @@
<code># single
## double
### triple
<span class="attribute">#[outer]</span>
<span class="attribute">#![inner]</span></code>
<span class="attribute">#[outer]
#![inner]</span></code>

View file

@ -1,8 +1,10 @@
// @has issue_41783/struct.Foo.html
// @!hasraw - 'space'
// @!hasraw - 'comment'
// @hasraw - '<span class="attribute">#[outer]</span>'
// @hasraw - '<span class="attribute">#![inner]</span>'
// @hasraw - '<span class="attribute">#[outer]'
// @!hasraw - '<span class="attribute">#[outer]</span>'
// @hasraw - '#![inner]</span>'
// @!hasraw - '<span class="attribute">#![inner]</span>'
// @snapshot 'codeblock' - '//*[@class="rustdoc-toggle top-doc"]/*[@class="docblock"]//pre/code'
/// ```no_run