1
Fork 0

Auto merge of #58691 - Centril:rollup, r=Centril

Rollup of 6 pull requests

Successful merges:

 - #57364 (Improve parsing diagnostic for negative supertrait bounds)
 - #58183 (Clarify guarantees for `Box` allocation)
 - #58442 (Simplify the unix `Weak` functionality)
 - #58454 (Refactor Windows stdio and remove stdin double buffering )
 - #58511 (Const to op simplification)
 - #58642 (rustdoc: support methods on primitives in intra-doc links)

Failed merges:

r? @ghost
This commit is contained in:
bors 2019-02-24 06:59:13 +00:00
commit e17c48e2f2
32 changed files with 584 additions and 399 deletions

View file

@ -34,6 +34,9 @@ extern "Rust" {
/// This type implements the [`Alloc`] trait by forwarding calls
/// to the allocator registered with the `#[global_allocator]` attribute
/// if there is one, or the `std` crates default.
///
/// Note: while this type is unstable, the functionality it provides can be
/// accessed through the [free functions in `alloc`](index.html#functions).
#[unstable(feature = "allocator_api", issue = "32838")]
#[derive(Copy, Clone, Default, Debug)]
pub struct Global;

View file

@ -4,6 +4,16 @@
//! heap allocation in Rust. Boxes provide ownership for this allocation, and
//! drop their contents when they go out of scope.
//!
//! For non-zero-sized values, a [`Box`] will use the [`Global`] allocator for
//! its allocation. It is valid to convert both ways between a [`Box`] and a
//! raw pointer allocated with the [`Global`] allocator, given that the
//! [`Layout`] used with the allocator is correct for the type. More precisely,
//! a `value: *mut T` that has been allocated with the [`Global`] allocator
//! with `Layout::for_value(&*value)` may be converted into a box using
//! `Box::<T>::from_raw(value)`. Conversely, the memory backing a `value: *mut
//! T` obtained from `Box::<T>::into_raw` may be deallocated using the
//! [`Global`] allocator with `Layout::for_value(&*value)`.
//!
//! # Examples
//!
//! Move a value from the stack to the heap by creating a [`Box`]:

View file

@ -312,7 +312,7 @@ impl_stable_hash_for!(
impl<'tcx> for enum mir::interpret::ConstValue<'tcx> [ mir::interpret::ConstValue ] {
Scalar(val),
Slice(a, b),
ByRef(id, alloc, offset),
ByRef(ptr, alloc),
}
);
impl_stable_hash_for!(struct crate::mir::interpret::RawConst<'tcx> {

View file

@ -31,9 +31,9 @@ pub enum ConstValue<'tcx> {
/// it.
Slice(Scalar, u64),
/// An allocation together with an offset into the allocation.
/// Invariant: the `AllocId` matches the allocation.
ByRef(AllocId, &'tcx Allocation, Size),
/// An allocation together with a pointer into the allocation.
/// Invariant: the pointer's `AllocId` resolves to the allocation.
ByRef(Pointer, &'tcx Allocation),
}
#[cfg(target_arch = "x86_64")]

View file

@ -505,8 +505,8 @@ impl<'a, 'tcx> Lift<'tcx> for ConstValue<'a> {
match *self {
ConstValue::Scalar(x) => Some(ConstValue::Scalar(x)),
ConstValue::Slice(x, y) => Some(ConstValue::Slice(x, y)),
ConstValue::ByRef(x, alloc, z) => Some(ConstValue::ByRef(
x, alloc.lift_to_tcx(tcx)?, z,
ConstValue::ByRef(ptr, alloc) => Some(ConstValue::ByRef(
ptr, alloc.lift_to_tcx(tcx)?,
)),
}
}

View file

@ -71,7 +71,7 @@ pub fn codegen_static_initializer(
let static_ = cx.tcx.const_eval(param_env.and(cid))?;
let alloc = match static_.val {
ConstValue::ByRef(_, alloc, n) if n.bytes() == 0 => alloc,
ConstValue::ByRef(ptr, alloc) if ptr.offset.bytes() == 0 => alloc,
_ => bug!("static const eval returned {:#?}", static_),
};
Ok((const_alloc_to_llvm(cx, alloc), alloc))

View file

@ -101,8 +101,8 @@ impl<'a, 'tcx: 'a, V: CodegenObject> OperandRef<'tcx, V> {
let b_llval = bx.cx().const_usize(b);
OperandValue::Pair(a_llval, b_llval)
},
ConstValue::ByRef(_, alloc, offset) => {
return Ok(bx.load_operand(bx.cx().from_const_alloc(layout, alloc, offset)));
ConstValue::ByRef(ptr, alloc) => {
return Ok(bx.load_operand(bx.cx().from_const_alloc(layout, alloc, ptr.offset)));
},
};

View file

@ -417,8 +417,8 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let layout = cx.layout_of(self.monomorphize(&ty));
match bx.tcx().const_eval(param_env.and(cid)) {
Ok(val) => match val.val {
mir::interpret::ConstValue::ByRef(_, alloc, offset) => {
bx.cx().from_const_alloc(layout, alloc, offset)
mir::interpret::ConstValue::ByRef(ptr, alloc) => {
bx.cx().from_const_alloc(layout, alloc, ptr.offset)
}
_ => bug!("promoteds should have an allocation: {:?}", val),
},

View file

@ -21,7 +21,7 @@ use syntax::ast::Mutability;
use syntax::source_map::{Span, DUMMY_SP};
use crate::interpret::{self,
PlaceTy, MPlaceTy, MemPlace, OpTy, ImmTy, Operand, Immediate, Scalar, Pointer,
PlaceTy, MPlaceTy, MemPlace, OpTy, ImmTy, Immediate, Scalar, Pointer,
RawConst, ConstValue,
EvalResult, EvalError, EvalErrorKind, GlobalId, EvalContext, StackPopCleanup,
Allocation, AllocId, MemoryKind,
@ -62,45 +62,46 @@ pub(crate) fn eval_promoted<'a, 'mir, 'tcx>(
eval_body_using_ecx(&mut ecx, cid, Some(mir), param_env)
}
// FIXME: These two conversion functions are bad hacks. We should just always use allocations.
pub fn op_to_const<'tcx>(
fn mplace_to_const<'tcx>(
ecx: &CompileTimeEvalContext<'_, '_, 'tcx>,
mplace: MPlaceTy<'tcx>,
) -> EvalResult<'tcx, ty::Const<'tcx>> {
let MemPlace { ptr, align, meta } = *mplace;
// extract alloc-offset pair
assert!(meta.is_none());
let ptr = ptr.to_ptr()?;
let alloc = ecx.memory.get(ptr.alloc_id)?;
assert!(alloc.align >= align);
assert!(alloc.bytes.len() as u64 - ptr.offset.bytes() >= mplace.layout.size.bytes());
let mut alloc = alloc.clone();
alloc.align = align;
// FIXME shouldn't it be the case that `mark_static_initialized` has already
// interned this? I thought that is the entire point of that `FinishStatic` stuff?
let alloc = ecx.tcx.intern_const_alloc(alloc);
let val = ConstValue::ByRef(ptr, alloc);
Ok(ty::Const { val, ty: mplace.layout.ty })
}
fn op_to_const<'tcx>(
ecx: &CompileTimeEvalContext<'_, '_, 'tcx>,
op: OpTy<'tcx>,
may_normalize: bool,
) -> EvalResult<'tcx, ty::Const<'tcx>> {
// We do not normalize just any data. Only scalar layout and slices.
let normalize = may_normalize
&& match op.layout.abi {
layout::Abi::Scalar(..) => true,
layout::Abi::ScalarPair(..) => op.layout.ty.is_slice(),
_ => false,
};
let normalize = match op.layout.abi {
layout::Abi::Scalar(..) => true,
layout::Abi::ScalarPair(..) => op.layout.ty.is_slice(),
_ => false,
};
let normalized_op = if normalize {
ecx.try_read_immediate(op)?
Err(*ecx.read_immediate(op).expect("normalization works on validated constants"))
} else {
match *op {
Operand::Indirect(mplace) => Err(mplace),
Operand::Immediate(val) => Ok(val)
}
op.try_as_mplace()
};
let val = match normalized_op {
Err(MemPlace { ptr, align, meta }) => {
// extract alloc-offset pair
assert!(meta.is_none());
let ptr = ptr.to_ptr()?;
let alloc = ecx.memory.get(ptr.alloc_id)?;
assert!(alloc.align >= align);
assert!(alloc.bytes.len() as u64 - ptr.offset.bytes() >= op.layout.size.bytes());
let mut alloc = alloc.clone();
alloc.align = align;
// FIXME shouldn't it be the case that `mark_static_initialized` has already
// interned this? I thought that is the entire point of that `FinishStatic` stuff?
let alloc = ecx.tcx.intern_const_alloc(alloc);
ConstValue::ByRef(ptr.alloc_id, alloc, ptr.offset)
},
Ok(Immediate::Scalar(x)) =>
Ok(mplace) => return mplace_to_const(ecx, mplace),
Err(Immediate::Scalar(x)) =>
ConstValue::Scalar(x.not_undef()?),
Ok(Immediate::ScalarPair(a, b)) =>
Err(Immediate::ScalarPair(a, b)) =>
ConstValue::Slice(a.not_undef()?, b.to_usize(ecx)?),
};
Ok(ty::Const { val, ty: op.layout.ty })
@ -476,7 +477,7 @@ pub fn const_field<'a, 'tcx>(
let ecx = mk_eval_cx(tcx, DUMMY_SP, param_env);
let result = (|| {
// get the operand again
let op = ecx.lazy_const_to_op(ty::LazyConst::Evaluated(value), value.ty)?;
let op = ecx.const_to_op(value, None)?;
// downcast
let down = match variant {
None => op,
@ -486,7 +487,7 @@ pub fn const_field<'a, 'tcx>(
let field = ecx.operand_field(down, field.index() as u64)?;
// and finally move back to the const world, always normalizing because
// this is not called for statics.
op_to_const(&ecx, field, true)
op_to_const(&ecx, field)
})();
result.map_err(|error| {
let err = error_to_const_error(&ecx, error);
@ -502,7 +503,7 @@ pub fn const_variant_index<'a, 'tcx>(
) -> EvalResult<'tcx, VariantIdx> {
trace!("const_variant_index: {:?}", val);
let ecx = mk_eval_cx(tcx, DUMMY_SP, param_env);
let op = ecx.lazy_const_to_op(ty::LazyConst::Evaluated(val), val.ty)?;
let op = ecx.const_to_op(val, None)?;
Ok(ecx.read_discriminant(op)?.1)
}
@ -523,13 +524,11 @@ fn validate_and_turn_into_const<'a, 'tcx>(
let cid = key.value;
let ecx = mk_eval_cx(tcx, tcx.def_span(key.value.instance.def_id()), key.param_env);
let val = (|| {
let op = ecx.raw_const_to_mplace(constant)?.into();
// FIXME: Once the visitor infrastructure landed, change validation to
// work directly on `MPlaceTy`.
let mut ref_tracking = RefTracking::new(op);
while let Some((op, path)) = ref_tracking.todo.pop() {
let mplace = ecx.raw_const_to_mplace(constant)?;
let mut ref_tracking = RefTracking::new(mplace);
while let Some((mplace, path)) = ref_tracking.todo.pop() {
ecx.validate_operand(
op,
mplace.into(),
path,
Some(&mut ref_tracking),
true, // const mode
@ -537,8 +536,11 @@ fn validate_and_turn_into_const<'a, 'tcx>(
}
// Now that we validated, turn this into a proper constant.
let def_id = cid.instance.def.def_id();
let normalize = tcx.is_static(def_id).is_none() && cid.promoted.is_none();
op_to_const(&ecx, op, normalize)
if tcx.is_static(def_id).is_some() || cid.promoted.is_some() {
mplace_to_const(&ecx, mplace)
} else {
op_to_const(&ecx, mplace.into())
}
})();
val.map_err(|error| {

View file

@ -172,7 +172,7 @@ use rustc::ty::{self, Ty, TyCtxt, TypeFoldable, Const};
use rustc::ty::layout::{Integer, IntegerExt, VariantIdx, Size};
use rustc::mir::Field;
use rustc::mir::interpret::{ConstValue, Pointer, Scalar};
use rustc::mir::interpret::{ConstValue, Scalar};
use rustc::util::common::ErrorReported;
use syntax::attr::{SignedInt, UnsignedInt};
@ -214,9 +214,8 @@ impl<'a, 'tcx> LiteralExpander<'a, 'tcx> {
match (val, &crty.sty, &rty.sty) {
// the easy case, deref a reference
(ConstValue::Scalar(Scalar::Ptr(p)), x, y) if x == y => ConstValue::ByRef(
p.alloc_id,
p,
self.tcx.alloc_map.lock().unwrap_memory(p.alloc_id),
p.offset,
),
// unsize array to slice if pattern is array but match value or other patterns are slice
(ConstValue::Scalar(Scalar::Ptr(p)), ty::Array(t, n), ty::Slice(u)) => {
@ -1428,7 +1427,7 @@ fn slice_pat_covered_by_const<'tcx>(
suffix: &[Pattern<'tcx>]
) -> Result<bool, ErrorReported> {
let data: &[u8] = match (const_val.val, &const_val.ty.sty) {
(ConstValue::ByRef(id, alloc, offset), ty::Array(t, n)) => {
(ConstValue::ByRef(ptr, alloc), ty::Array(t, n)) => {
if *t != tcx.types.u8 {
// FIXME(oli-obk): can't mix const patterns with slice patterns and get
// any sort of exhaustiveness/unreachable check yet
@ -1436,7 +1435,6 @@ fn slice_pat_covered_by_const<'tcx>(
// are definitely unreachable.
return Ok(false);
}
let ptr = Pointer::new(id, offset);
let n = n.assert_usize(tcx).unwrap();
alloc.get_bytes(&tcx, ptr, Size::from_bytes(n)).unwrap()
},
@ -1778,8 +1776,8 @@ fn specialize<'p, 'a: 'p, 'tcx: 'a>(
let (opt_ptr, n, ty) = match value.ty.sty {
ty::TyKind::Array(t, n) => {
match value.val {
ConstValue::ByRef(id, alloc, offset) => (
Some((Pointer::new(id, offset), alloc)),
ConstValue::ByRef(ptr, alloc) => (
Some((ptr, alloc)),
n.unwrap_usize(cx.tcx),
t,
),

View file

@ -13,7 +13,7 @@ use rustc::mir::interpret::{
sign_extend, truncate,
};
use super::{
EvalContext, Machine, AllocMap, Allocation, AllocationExtra,
EvalContext, Machine,
MemPlace, MPlaceTy, PlaceTy, Place, MemoryKind,
};
pub use rustc::mir::interpret::ScalarMaybeUndef;
@ -270,7 +270,7 @@ pub(super) fn from_known_layout<'tcx>(
impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> {
/// Try reading an immediate in memory; this is interesting particularly for ScalarPair.
/// Returns `None` if the layout does not permit loading this as a value.
pub(super) fn try_read_immediate_from_mplace(
fn try_read_immediate_from_mplace(
&self,
mplace: MPlaceTy<'tcx, M::PointerTag>,
) -> EvalResult<'tcx, Option<Immediate<M::PointerTag>>> {
@ -324,7 +324,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
/// Note that for a given layout, this operation will either always fail or always
/// succeed! Whether it succeeds depends on whether the layout can be represented
/// in a `Immediate`, not on which data is stored there currently.
pub(crate) fn try_read_immediate(
pub(super) fn try_read_immediate(
&self,
src: OpTy<'tcx, M::PointerTag>,
) -> EvalResult<'tcx, Result<Immediate<M::PointerTag>, MemPlace<M::PointerTag>>> {
@ -509,7 +509,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
// Evaluate a place with the goal of reading from it. This lets us sometimes
// avoid allocations.
fn eval_place_to_op(
pub(super) fn eval_place_to_op(
&self,
mir_place: &mir::Place<'tcx>,
layout: Option<TyLayout<'tcx>>,
@ -546,14 +546,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
Move(ref place) =>
self.eval_place_to_op(place, layout)?,
Constant(ref constant) => {
let layout = from_known_layout(layout, || {
let ty = self.monomorphize(mir_op.ty(self.mir(), *self.tcx))?;
self.layout_of(ty)
})?;
let op = self.const_value_to_op(*constant.literal)?;
OpTy { op, layout }
}
Constant(ref constant) => self.eval_lazy_const_to_op(*constant.literal, layout)?,
};
trace!("{:?}: {:?}", mir_op, *op);
Ok(op)
@ -569,38 +562,56 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
.collect()
}
// Used when Miri runs into a constant, and (indirectly through lazy_const_to_op) by CTFE.
fn const_value_to_op(
// Used when Miri runs into a constant, and by const propagation.
crate fn eval_lazy_const_to_op(
&self,
val: ty::LazyConst<'tcx>,
) -> EvalResult<'tcx, Operand<M::PointerTag>> {
trace!("const_value_to_op: {:?}", val);
let val = match val {
layout: Option<TyLayout<'tcx>>,
) -> EvalResult<'tcx, OpTy<'tcx, M::PointerTag>> {
trace!("const_to_op: {:?}", val);
match val {
ty::LazyConst::Unevaluated(def_id, substs) => {
let instance = self.resolve(def_id, substs)?;
return Ok(*OpTy::from(self.const_eval_raw(GlobalId {
return Ok(OpTy::from(self.const_eval_raw(GlobalId {
instance,
promoted: None,
})?));
},
ty::LazyConst::Evaluated(c) => c,
};
match val.val {
ConstValue::ByRef(id, alloc, offset) => {
ty::LazyConst::Evaluated(c) => self.const_to_op(c, layout),
}
}
// Used when the miri-engine runs into a constant and for extracting information from constants
// in patterns via the `const_eval` module
crate fn const_to_op(
&self,
val: ty::Const<'tcx>,
layout: Option<TyLayout<'tcx>>,
) -> EvalResult<'tcx, OpTy<'tcx, M::PointerTag>> {
let layout = from_known_layout(layout, || {
let ty = self.monomorphize(val.ty)?;
self.layout_of(ty)
})?;
let op = match val.val {
ConstValue::ByRef(ptr, alloc) => {
// We rely on mutability being set correctly in that allocation to prevent writes
// where none should happen -- and for `static mut`, we copy on demand anyway.
Ok(Operand::Indirect(
MemPlace::from_ptr(Pointer::new(id, offset), alloc.align)
).with_default_tag())
Operand::Indirect(
MemPlace::from_ptr(ptr, alloc.align)
).with_default_tag()
},
ConstValue::Slice(a, b) =>
Ok(Operand::Immediate(Immediate::ScalarPair(
Operand::Immediate(Immediate::ScalarPair(
a.into(),
Scalar::from_uint(b, self.tcx.data_layout.pointer_size).into(),
)).with_default_tag()),
)).with_default_tag(),
ConstValue::Scalar(x) =>
Ok(Operand::Immediate(Immediate::Scalar(x.into())).with_default_tag()),
}
Operand::Immediate(Immediate::Scalar(x.into())).with_default_tag(),
};
Ok(OpTy {
op,
layout,
})
}
/// Read discriminant, return the runtime value as well as the variant index.
@ -697,23 +708,4 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
}
})
}
}
impl<'a, 'mir, 'tcx, M> EvalContext<'a, 'mir, 'tcx, M>
where
M: Machine<'a, 'mir, 'tcx, PointerTag=()>,
// FIXME: Working around https://github.com/rust-lang/rust/issues/24159
M::MemoryMap: AllocMap<AllocId, (MemoryKind<M::MemoryKinds>, Allocation<(), M::AllocExtra>)>,
M::AllocExtra: AllocationExtra<(), M::MemoryExtra>,
{
// FIXME: CTFE should use allocations, then we can remove this.
pub(crate) fn lazy_const_to_op(
&self,
cnst: ty::LazyConst<'tcx>,
ty: ty::Ty<'tcx>,
) -> EvalResult<'tcx, OpTy<'tcx>> {
let op = self.const_value_to_op(cnst)?;
Ok(OpTy { op, layout: self.layout_of(ty)? })
}
}

View file

@ -59,7 +59,7 @@ impl<'tcx, Tag> ::std::ops::Deref for PlaceTy<'tcx, Tag> {
}
/// A MemPlace with its layout. Constructing it is only possible in this module.
#[derive(Copy, Clone, Debug)]
#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
pub struct MPlaceTy<'tcx, Tag=()> {
mplace: MemPlace<Tag>,
pub layout: TyLayout<'tcx>,

View file

@ -266,8 +266,8 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
}
Discriminant(ref place) => {
let place = self.eval_place(place)?;
let discr_val = self.read_discriminant(self.place_to_op(place)?)?.0;
let op = self.eval_place_to_op(place, None)?;
let discr_val = self.read_discriminant(op)?.0;
let size = dest.layout.size;
self.write_scalar(Scalar::from_uint(discr_val, size), dest)?;
}

View file

@ -11,7 +11,7 @@ use rustc::mir::interpret::{
};
use super::{
OpTy, Machine, EvalContext, ValueVisitor,
OpTy, Machine, EvalContext, ValueVisitor, MPlaceTy,
};
macro_rules! validation_failure {
@ -74,13 +74,13 @@ pub enum PathElem {
}
/// State for tracking recursive validation of references
pub struct RefTracking<'tcx, Tag> {
pub seen: FxHashSet<(OpTy<'tcx, Tag>)>,
pub todo: Vec<(OpTy<'tcx, Tag>, Vec<PathElem>)>,
pub struct RefTracking<T> {
pub seen: FxHashSet<T>,
pub todo: Vec<(T, Vec<PathElem>)>,
}
impl<'tcx, Tag: Copy+Eq+Hash> RefTracking<'tcx, Tag> {
pub fn new(op: OpTy<'tcx, Tag>) -> Self {
impl<'tcx, T: Copy + Eq + Hash> RefTracking<T> {
pub fn new(op: T) -> Self {
let mut ref_tracking = RefTracking {
seen: FxHashSet::default(),
todo: vec![(op, Vec::new())],
@ -151,7 +151,7 @@ struct ValidityVisitor<'rt, 'a: 'rt, 'mir: 'rt, 'tcx: 'a+'rt+'mir, M: Machine<'a
/// starts must not be changed! `visit_fields` and `visit_array` rely on
/// this stack discipline.
path: Vec<PathElem>,
ref_tracking: Option<&'rt mut RefTracking<'tcx, M::PointerTag>>,
ref_tracking: Option<&'rt mut RefTracking<MPlaceTy<'tcx, M::PointerTag>>>,
const_mode: bool,
ecx: &'rt EvalContext<'a, 'mir, 'tcx, M>,
}
@ -401,16 +401,15 @@ impl<'rt, 'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>>
// before. Proceed recursively even for integer pointers, no
// reason to skip them! They are (recursively) valid for some ZST,
// but not for others (e.g., `!` is a ZST).
let op = place.into();
if ref_tracking.seen.insert(op) {
trace!("Recursing below ptr {:#?}", *op);
if ref_tracking.seen.insert(place) {
trace!("Recursing below ptr {:#?}", *place);
// We need to clone the path anyway, make sure it gets created
// with enough space for the additional `Deref`.
let mut new_path = Vec::with_capacity(self.path.len()+1);
new_path.clone_from(&self.path);
new_path.push(PathElem::Deref);
// Remember to come back to this later.
ref_tracking.todo.push((op, new_path));
ref_tracking.todo.push((place, new_path));
}
}
}
@ -600,7 +599,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
&self,
op: OpTy<'tcx, M::PointerTag>,
path: Vec<PathElem>,
ref_tracking: Option<&mut RefTracking<'tcx, M::PointerTag>>,
ref_tracking: Option<&mut RefTracking<MPlaceTy<'tcx, M::PointerTag>>>,
const_mode: bool,
) -> EvalResult<'tcx> {
trace!("validate_operand: {:?}, {:?}", *op, op.layout.ty);

View file

@ -1260,7 +1260,7 @@ fn collect_const<'a, 'tcx>(
ConstValue::Slice(Scalar::Ptr(ptr), _) |
ConstValue::Scalar(Scalar::Ptr(ptr)) =>
collect_miri(tcx, ptr.alloc_id, output),
ConstValue::ByRef(_id, alloc, _offset) => {
ConstValue::ByRef(_ptr, alloc) => {
for &((), id) in alloc.relocations.values() {
collect_miri(tcx, id, output);
}

View file

@ -253,7 +253,7 @@ impl<'a, 'mir, 'tcx> ConstPropagator<'a, 'mir, 'tcx> {
source_info: SourceInfo,
) -> Option<Const<'tcx>> {
self.ecx.tcx.span = source_info.span;
match self.ecx.lazy_const_to_op(*c.literal, c.ty) {
match self.ecx.eval_lazy_const_to_op(*c.literal, None) {
Ok(op) => {
Some((op, c.span))
},

View file

@ -1461,7 +1461,7 @@ fn maybe_check_static_with_link_section(tcx: TyCtxt, id: DefId, span: Span) {
};
let param_env = ty::ParamEnv::reveal_all();
if let Ok(static_) = tcx.const_eval(param_env.and(cid)) {
let alloc = if let ConstValue::ByRef(_, allocation, _) = static_.val {
let alloc = if let ConstValue::ByRef(_, allocation) = static_.val {
allocation
} else {
bug!("Matching on non-ByRef static")

View file

@ -976,11 +976,13 @@ impl Attributes {
"https://doc.rust-lang.org/nightly",
};
// This is a primitive so the url is done "by hand".
let tail = fragment.find('#').unwrap_or_else(|| fragment.len());
Some((s.clone(),
format!("{}{}std/primitive.{}.html",
format!("{}{}std/primitive.{}.html{}",
url,
if !url.ends_with('/') { "/" } else { "" },
fragment)))
&fragment[..tail],
&fragment[tail..])))
} else {
panic!("This isn't a primitive?!");
}

View file

@ -1,6 +1,7 @@
use rustc::lint as lint;
use rustc::hir;
use rustc::hir::def::Def;
use rustc::hir::def_id::DefId;
use rustc::ty;
use syntax;
use syntax::ast::{self, Ident, NodeId};
@ -126,6 +127,17 @@ impl<'a, 'tcx, 'rcx> LinkCollector<'a, 'tcx, 'rcx> {
path = name.clone();
}
}
if let Some(prim) = is_primitive(&path, false) {
let did = primitive_impl(cx, &path).ok_or(())?;
return cx.tcx.associated_items(did)
.find(|item| item.ident.name == item_name)
.and_then(|item| match item.kind {
ty::AssociatedKind::Method => Some("method"),
_ => None,
})
.map(|out| (prim, Some(format!("{}#{}.{}", path, out, item_name))))
.ok_or(());
}
// FIXME: `with_scope` requires the `NodeId` of a module.
let ty = cx.resolver.borrow_mut()
@ -589,3 +601,26 @@ fn is_primitive(path_str: &str, is_val: bool) -> Option<Def> {
PRIMITIVES.iter().find(|x| x.0 == path_str).map(|x| x.1)
}
}
fn primitive_impl(cx: &DocContext<'_, '_, '_>, path_str: &str) -> Option<DefId> {
let tcx = cx.tcx;
match path_str {
"u8" => tcx.lang_items().u8_impl(),
"u16" => tcx.lang_items().u16_impl(),
"u32" => tcx.lang_items().u32_impl(),
"u64" => tcx.lang_items().u64_impl(),
"u128" => tcx.lang_items().u128_impl(),
"usize" => tcx.lang_items().usize_impl(),
"i8" => tcx.lang_items().i8_impl(),
"i16" => tcx.lang_items().i16_impl(),
"i32" => tcx.lang_items().i32_impl(),
"i64" => tcx.lang_items().i64_impl(),
"i128" => tcx.lang_items().i128_impl(),
"isize" => tcx.lang_items().isize_impl(),
"f32" => tcx.lang_items().f32_impl(),
"f64" => tcx.lang_items().f64_impl(),
"str" => tcx.lang_items().str_impl(),
"char" => tcx.lang_items().char_impl(),
_ => None,
}
}

View file

@ -9,8 +9,10 @@ impl Stdin {
pub fn new() -> io::Result<Stdin> {
Ok(Stdin(()))
}
}
pub fn read(&self, _: &mut [u8]) -> io::Result<usize> {
impl io::Read for Stdin {
fn read(&mut self, _buf: &mut [u8]) -> io::Result<usize> {
Ok(0)
}
}
@ -19,15 +21,17 @@ impl Stdout {
pub fn new() -> io::Result<Stdout> {
Ok(Stdout(()))
}
}
pub fn write(&self, _: &[u8]) -> io::Result<usize> {
impl io::Write for Stdout {
fn write(&mut self, _buf: &[u8]) -> io::Result<usize> {
Err(io::Error::new(
io::ErrorKind::BrokenPipe,
"Stdout is not connected to any output in this environment",
))
}
pub fn flush(&self) -> io::Result<()> {
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
@ -36,29 +40,18 @@ impl Stderr {
pub fn new() -> io::Result<Stderr> {
Ok(Stderr(()))
}
}
pub fn write(&self, _: &[u8]) -> io::Result<usize> {
impl io::Write for Stderr {
fn write(&mut self, _buf: &[u8]) -> io::Result<usize> {
Err(io::Error::new(
io::ErrorKind::BrokenPipe,
"Stderr is not connected to any output in this environment",
))
}
pub fn flush(&self) -> io::Result<()> {
Ok(())
}
}
// FIXME: right now this raw stderr handle is used in a few places because
// std::io::stderr_raw isn't exposed, but once that's exposed this impl
// should go away
impl io::Write for Stderr {
fn write(&mut self, data: &[u8]) -> io::Result<usize> {
Stderr::write(self, data)
}
fn flush(&mut self) -> io::Result<()> {
Stderr::flush(self)
Ok(())
}
}

View file

@ -8,10 +8,12 @@ pub struct Stderr(());
impl Stdin {
pub fn new() -> io::Result<Stdin> { Ok(Stdin(())) }
}
pub fn read(&self, data: &mut [u8]) -> io::Result<usize> {
impl io::Read for Stdin {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let fd = FileDesc::new(0);
let ret = fd.read(data);
let ret = fd.read(buf);
fd.into_raw();
ret
}
@ -19,44 +21,35 @@ impl Stdin {
impl Stdout {
pub fn new() -> io::Result<Stdout> { Ok(Stdout(())) }
}
pub fn write(&self, data: &[u8]) -> io::Result<usize> {
impl io::Write for Stdout {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
let fd = FileDesc::new(1);
let ret = fd.write(data);
let ret = fd.write(buf);
fd.into_raw();
ret
}
pub fn flush(&self) -> io::Result<()> {
fn flush(&mut self) -> io::Result<()> {
cvt(syscall::fsync(1)).and(Ok(()))
}
}
impl Stderr {
pub fn new() -> io::Result<Stderr> { Ok(Stderr(())) }
}
pub fn write(&self, data: &[u8]) -> io::Result<usize> {
impl io::Write for Stderr {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
let fd = FileDesc::new(2);
let ret = fd.write(data);
let ret = fd.write(buf);
fd.into_raw();
ret
}
pub fn flush(&self) -> io::Result<()> {
cvt(syscall::fsync(2)).and(Ok(()))
}
}
// FIXME: right now this raw stderr handle is used in a few places because
// std::io::stderr_raw isn't exposed, but once that's exposed this impl
// should go away
impl io::Write for Stderr {
fn write(&mut self, data: &[u8]) -> io::Result<usize> {
Stderr::write(self, data)
}
fn flush(&mut self) -> io::Result<()> {
Stderr::flush(self)
cvt(syscall::fsync(2)).and(Ok(()))
}
}

View file

@ -16,46 +16,39 @@ fn with_std_fd<F: FnOnce(&FileDesc) -> R, R>(fd: abi::Fd, f: F) -> R {
impl Stdin {
pub fn new() -> io::Result<Stdin> { Ok(Stdin(())) }
}
pub fn read(&self, data: &mut [u8]) -> io::Result<usize> {
with_std_fd(abi::FD_STDIN, |fd| fd.read(data))
impl io::Read for Stdin {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
with_std_fd(abi::FD_STDIN, |fd| fd.read(buf))
}
}
impl Stdout {
pub fn new() -> io::Result<Stdout> { Ok(Stdout(())) }
}
pub fn write(&self, data: &[u8]) -> io::Result<usize> {
with_std_fd(abi::FD_STDOUT, |fd| fd.write(data))
impl io::Write for Stdout {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
with_std_fd(abi::FD_STDOUT, |fd| fd.write(buf))
}
pub fn flush(&self) -> io::Result<()> {
fn flush(&mut self) -> io::Result<()> {
with_std_fd(abi::FD_STDOUT, |fd| fd.flush())
}
}
impl Stderr {
pub fn new() -> io::Result<Stderr> { Ok(Stderr(())) }
pub fn write(&self, data: &[u8]) -> io::Result<usize> {
with_std_fd(abi::FD_STDERR, |fd| fd.write(data))
}
pub fn flush(&self) -> io::Result<()> {
with_std_fd(abi::FD_STDERR, |fd| fd.flush())
}
}
// FIXME: right now this raw stderr handle is used in a few places because
// std::io::stderr_raw isn't exposed, but once that's exposed this impl
// should go away
impl io::Write for Stderr {
fn write(&mut self, data: &[u8]) -> io::Result<usize> {
Stderr::write(self, data)
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
with_std_fd(abi::FD_STDERR, |fd| fd.write(buf))
}
fn flush(&mut self) -> io::Result<()> {
Stderr::flush(self)
with_std_fd(abi::FD_STDERR, |fd| fd.flush())
}
}

View file

@ -8,10 +8,12 @@ pub struct Stderr(());
impl Stdin {
pub fn new() -> io::Result<Stdin> { Ok(Stdin(())) }
}
pub fn read(&self, data: &mut [u8]) -> io::Result<usize> {
impl io::Read for Stdin {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let fd = FileDesc::new(libc::STDIN_FILENO);
let ret = fd.read(data);
let ret = fd.read(buf);
fd.into_raw(); // do not close this FD
ret
}
@ -19,44 +21,35 @@ impl Stdin {
impl Stdout {
pub fn new() -> io::Result<Stdout> { Ok(Stdout(())) }
}
pub fn write(&self, data: &[u8]) -> io::Result<usize> {
impl io::Write for Stdout {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
let fd = FileDesc::new(libc::STDOUT_FILENO);
let ret = fd.write(data);
let ret = fd.write(buf);
fd.into_raw(); // do not close this FD
ret
}
pub fn flush(&self) -> io::Result<()> {
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
impl Stderr {
pub fn new() -> io::Result<Stderr> { Ok(Stderr(())) }
}
pub fn write(&self, data: &[u8]) -> io::Result<usize> {
impl io::Write for Stderr {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
let fd = FileDesc::new(libc::STDERR_FILENO);
let ret = fd.write(data);
let ret = fd.write(buf);
fd.into_raw(); // do not close this FD
ret
}
pub fn flush(&self) -> io::Result<()> {
Ok(())
}
}
// FIXME: right now this raw stderr handle is used in a few places because
// std::io::stderr_raw isn't exposed, but once that's exposed this impl
// should go away
impl io::Write for Stderr {
fn write(&mut self, data: &[u8]) -> io::Result<usize> {
Stderr::write(self, data)
}
fn flush(&mut self) -> io::Result<()> {
Stderr::flush(self)
Ok(())
}
}

View file

@ -18,7 +18,7 @@
use libc;
use ffi::CString;
use ffi::CStr;
use marker;
use mem;
use sync::atomic::{AtomicUsize, Ordering};
@ -26,7 +26,7 @@ use sync::atomic::{AtomicUsize, Ordering};
macro_rules! weak {
(fn $name:ident($($t:ty),*) -> $ret:ty) => (
static $name: ::sys::weak::Weak<unsafe extern fn($($t),*) -> $ret> =
::sys::weak::Weak::new(stringify!($name));
::sys::weak::Weak::new(concat!(stringify!($name), '\0'));
)
}
@ -45,23 +45,22 @@ impl<F> Weak<F> {
}
}
pub fn get(&self) -> Option<&F> {
pub fn get(&self) -> Option<F> {
assert_eq!(mem::size_of::<F>(), mem::size_of::<usize>());
unsafe {
if self.addr.load(Ordering::SeqCst) == 1 {
self.addr.store(fetch(self.name), Ordering::SeqCst);
}
if self.addr.load(Ordering::SeqCst) == 0 {
None
} else {
mem::transmute::<&AtomicUsize, Option<&F>>(&self.addr)
match self.addr.load(Ordering::SeqCst) {
0 => None,
addr => Some(mem::transmute_copy::<usize, F>(&addr)),
}
}
}
}
unsafe fn fetch(name: &str) -> usize {
let name = match CString::new(name) {
let name = match CStr::from_bytes_with_nul(name.as_bytes()) {
Ok(cstr) => cstr,
Err(..) => return 0,
};

View file

@ -9,9 +9,11 @@ impl Stdin {
pub fn new() -> io::Result<Stdin> {
Ok(Stdin)
}
}
pub fn read(&self, data: &mut [u8]) -> io::Result<usize> {
Ok(ReadSysCall::perform(0, data))
impl io::Read for Stdin {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
Ok(ReadSysCall::perform(0, buf))
}
}
@ -19,13 +21,15 @@ impl Stdout {
pub fn new() -> io::Result<Stdout> {
Ok(Stdout)
}
}
pub fn write(&self, data: &[u8]) -> io::Result<usize> {
WriteSysCall::perform(1, data);
Ok(data.len())
impl io::Write for Stdout {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
WriteSysCall::perform(1, buf);
Ok(buf.len())
}
pub fn flush(&self) -> io::Result<()> {
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
@ -34,23 +38,16 @@ impl Stderr {
pub fn new() -> io::Result<Stderr> {
Ok(Stderr)
}
pub fn write(&self, data: &[u8]) -> io::Result<usize> {
WriteSysCall::perform(2, data);
Ok(data.len())
}
pub fn flush(&self) -> io::Result<()> {
Ok(())
}
}
impl io::Write for Stderr {
fn write(&mut self, data: &[u8]) -> io::Result<usize> {
(&*self).write(data)
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
WriteSysCall::perform(2, buf);
Ok(buf.len())
}
fn flush(&mut self) -> io::Result<()> {
(&*self).flush()
Ok(())
}
}

View file

@ -252,9 +252,9 @@ impl Stdio {
// should still be unavailable so propagate the
// INVALID_HANDLE_VALUE.
Stdio::Inherit => {
match stdio::get(stdio_id) {
match stdio::get_handle(stdio_id) {
Ok(io) => {
let io = Handle::new(io.handle());
let io = Handle::new(io);
let ret = io.duplicate(0, true,
c::DUPLICATE_SAME_ACCESS);
io.into_raw();

View file

@ -1,152 +1,259 @@
#![unstable(issue = "0", feature = "windows_stdio")]
use io::prelude::*;
use char::decode_utf16;
use cmp;
use io::{self, Cursor};
use io;
use ptr;
use str;
use sync::Mutex;
use sys::c;
use sys::cvt;
use sys::handle::Handle;
pub enum Output {
Console(c::HANDLE),
Pipe(c::HANDLE),
}
// Don't cache handles but get them fresh for every read/write. This allows us to track changes to
// the value over time (such as if a process calls `SetStdHandle` while it's running). See #40490.
pub struct Stdin {
utf8: Mutex<io::Cursor<Vec<u8>>>,
surrogate: u16,
}
pub struct Stdout;
pub struct Stderr;
pub fn get(handle: c::DWORD) -> io::Result<Output> {
let handle = unsafe { c::GetStdHandle(handle) };
// Apparently Windows doesn't handle large reads on stdin or writes to stdout/stderr well (see
// #13304 for details).
//
// From MSDN (2011): "The storage for this buffer is allocated from a shared heap for the
// process that is 64 KB in size. The maximum size of the buffer will depend on heap usage."
//
// We choose the cap at 8 KiB because libuv does the same, and it seems to be acceptable so far.
const MAX_BUFFER_SIZE: usize = 8192;
// The standard buffer size of BufReader for Stdin should be able to hold 3x more bytes than there
// are `u16`'s in MAX_BUFFER_SIZE. This ensures the read data can always be completely decoded from
// UTF-16 to UTF-8.
pub const STDIN_BUF_SIZE: usize = MAX_BUFFER_SIZE / 2 * 3;
pub fn get_handle(handle_id: c::DWORD) -> io::Result<c::HANDLE> {
let handle = unsafe { c::GetStdHandle(handle_id) };
if handle == c::INVALID_HANDLE_VALUE {
Err(io::Error::last_os_error())
} else if handle.is_null() {
Err(io::Error::from_raw_os_error(c::ERROR_INVALID_HANDLE as i32))
} else {
let mut out = 0;
match unsafe { c::GetConsoleMode(handle, &mut out) } {
0 => Ok(Output::Pipe(handle)),
_ => Ok(Output::Console(handle)),
}
Ok(handle)
}
}
fn write(handle: c::DWORD, data: &[u8]) -> io::Result<usize> {
let handle = match get(handle)? {
Output::Console(c) => c,
Output::Pipe(p) => {
let handle = Handle::new(p);
let ret = handle.write(data);
handle.into_raw();
return ret
}
};
fn is_console(handle: c::HANDLE) -> bool {
// `GetConsoleMode` will return false (0) if this is a pipe (we don't care about the reported
// mode). This will only detect Windows Console, not other terminals connected to a pipe like
// MSYS. Which is exactly what we need, as only Windows Console needs a conversion to UTF-16.
let mut mode = 0;
unsafe { c::GetConsoleMode(handle, &mut mode) != 0 }
}
// As with stdin on windows, stdout often can't handle writes of large
// sizes. For an example, see #14940. For this reason, don't try to
// write the entire output buffer on windows.
fn write(handle_id: c::DWORD, data: &[u8]) -> io::Result<usize> {
let handle = get_handle(handle_id)?;
if !is_console(handle) {
let handle = Handle::new(handle);
let ret = handle.write(data);
handle.into_raw(); // Don't close the handle
return ret;
}
// As the console is meant for presenting text, we assume bytes of `data` come from a string
// and are encoded as UTF-8, which needs to be encoded as UTF-16.
//
// For some other references, it appears that this problem has been
// encountered by others [1] [2]. We choose the number 8K just because
// libuv does the same.
//
// [1]: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1232
// [2]: http://www.mail-archive.com/log4net-dev@logging.apache.org/msg00661.html
const OUT_MAX: usize = 8192;
let len = cmp::min(data.len(), OUT_MAX);
// If the data is not valid UTF-8 we write out as many bytes as are valid.
// Only when there are no valid bytes (which will happen on the next call), return an error.
let len = cmp::min(data.len(), MAX_BUFFER_SIZE / 2);
let utf8 = match str::from_utf8(&data[..len]) {
Ok(s) => s,
Err(ref e) if e.valid_up_to() == 0 => return Err(invalid_encoding()),
Err(ref e) if e.valid_up_to() == 0 => {
return Err(io::Error::new(io::ErrorKind::InvalidData,
"Windows stdio in console mode does not support writing non-UTF-8 byte sequences"))
},
Err(e) => str::from_utf8(&data[..e.valid_up_to()]).unwrap(),
};
let utf16 = utf8.encode_utf16().collect::<Vec<u16>>();
let mut utf16 = [0u16; MAX_BUFFER_SIZE / 2];
let mut len_utf16 = 0;
for (chr, dest) in utf8.encode_utf16().zip(utf16.iter_mut()) {
*dest = chr;
len_utf16 += 1;
}
let utf16 = &utf16[..len_utf16];
let mut written = write_u16s(handle, &utf16)?;
// Figure out how many bytes of as UTF-8 were written away as UTF-16.
if written == utf16.len() {
Ok(utf8.len())
} else {
// Make sure we didn't end up writing only half of a surrogate pair (even though the chance
// is tiny). Because it is not possible for user code to re-slice `data` in such a way that
// a missing surrogate can be produced (and also because of the UTF-8 validation above),
// write the missing surrogate out now.
// Buffering it would mean we have to lie about the number of bytes written.
let first_char_remaining = utf16[written];
if first_char_remaining >= 0xDCEE && first_char_remaining <= 0xDFFF { // low surrogate
// We just hope this works, and give up otherwise
let _ = write_u16s(handle, &utf16[written..written+1]);
written += 1;
}
// Calculate the number of bytes of `utf8` that were actually written.
let mut count = 0;
for ch in utf16[..written].iter() {
count += match ch {
0x0000 ..= 0x007F => 1,
0x0080 ..= 0x07FF => 2,
0xDCEE ..= 0xDFFF => 1, // Low surrogate. We already counted 3 bytes for the other.
_ => 3,
};
}
debug_assert!(String::from_utf16(&utf16[..written]).unwrap() == utf8[..count]);
Ok(count)
}
}
fn write_u16s(handle: c::HANDLE, data: &[u16]) -> io::Result<usize> {
let mut written = 0;
cvt(unsafe {
c::WriteConsoleW(handle,
utf16.as_ptr() as c::LPCVOID,
utf16.len() as u32,
data.as_ptr() as c::LPCVOID,
data.len() as u32,
&mut written,
ptr::null_mut())
})?;
// FIXME if this only partially writes the utf16 buffer then we need to
// figure out how many bytes of `data` were actually written
assert_eq!(written as usize, utf16.len());
Ok(utf8.len())
Ok(written as usize)
}
impl Stdin {
pub fn new() -> io::Result<Stdin> {
Ok(Stdin {
utf8: Mutex::new(Cursor::new(Vec::new())),
})
}
pub fn read(&self, buf: &mut [u8]) -> io::Result<usize> {
let handle = match get(c::STD_INPUT_HANDLE)? {
Output::Console(c) => c,
Output::Pipe(p) => {
let handle = Handle::new(p);
let ret = handle.read(buf);
handle.into_raw();
return ret
}
};
let mut utf8 = self.utf8.lock().unwrap();
// Read more if the buffer is empty
if utf8.position() as usize == utf8.get_ref().len() {
let mut utf16 = vec![0u16; 0x1000];
let mut num = 0;
let mut input_control = readconsole_input_control(CTRL_Z_MASK);
cvt(unsafe {
c::ReadConsoleW(handle,
utf16.as_mut_ptr() as c::LPVOID,
utf16.len() as u32,
&mut num,
&mut input_control as c::PCONSOLE_READCONSOLE_CONTROL)
})?;
utf16.truncate(num as usize);
// FIXME: what to do about this data that has already been read?
let mut data = match String::from_utf16(&utf16) {
Ok(utf8) => utf8.into_bytes(),
Err(..) => return Err(invalid_encoding()),
};
if let Some(&last_byte) = data.last() {
if last_byte == CTRL_Z {
data.pop();
}
}
*utf8 = Cursor::new(data);
}
// MemReader shouldn't error here since we just filled it
utf8.read(buf)
Ok(Stdin { surrogate: 0 })
}
}
#[unstable(reason = "not public", issue = "0", feature = "fd_read")]
impl<'a> Read for &'a Stdin {
impl io::Read for Stdin {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
(**self).read(buf)
let handle = get_handle(c::STD_INPUT_HANDLE)?;
if !is_console(handle) {
let handle = Handle::new(handle);
let ret = handle.read(buf);
handle.into_raw(); // Don't close the handle
return ret;
}
if buf.len() == 0 {
return Ok(0);
} else if buf.len() < 4 {
return Err(io::Error::new(io::ErrorKind::InvalidInput,
"Windows stdin in console mode does not support a buffer too small to \
guarantee holding one arbitrary UTF-8 character (4 bytes)"))
}
let mut utf16_buf = [0u16; MAX_BUFFER_SIZE / 2];
// In the worst case, an UTF-8 string can take 3 bytes for every `u16` of an UTF-16. So
// we can read at most a third of `buf.len()` chars and uphold the guarantee no data gets
// lost.
let amount = cmp::min(buf.len() / 3, utf16_buf.len());
let read = read_u16s_fixup_surrogates(handle, &mut utf16_buf, amount, &mut self.surrogate)?;
utf16_to_utf8(&utf16_buf[..read], buf)
}
}
// We assume that if the last `u16` is an unpaired surrogate they got sliced apart by our
// buffer size, and keep it around for the next read hoping to put them together.
// This is a best effort, and may not work if we are not the only reader on Stdin.
fn read_u16s_fixup_surrogates(handle: c::HANDLE,
buf: &mut [u16],
mut amount: usize,
surrogate: &mut u16) -> io::Result<usize>
{
// Insert possibly remaining unpaired surrogate from last read.
let mut start = 0;
if *surrogate != 0 {
buf[0] = *surrogate;
*surrogate = 0;
start = 1;
if amount == 1 {
// Special case: `Stdin::read` guarantees we can always read at least one new `u16`
// and combine it with an unpaired surrogate, because the UTF-8 buffer is at least
// 4 bytes.
amount = 2;
}
}
let mut amount = read_u16s(handle, &mut buf[start..amount])? + start;
if amount > 0 {
let last_char = buf[amount - 1];
if last_char >= 0xD800 && last_char <= 0xDBFF { // high surrogate
*surrogate = last_char;
amount -= 1;
}
}
Ok(amount)
}
fn read_u16s(handle: c::HANDLE, buf: &mut [u16]) -> io::Result<usize> {
// Configure the `pInputControl` parameter to not only return on `\r\n` but also Ctrl-Z, the
// traditional DOS method to indicate end of character stream / user input (SUB).
// See #38274 and https://stackoverflow.com/questions/43836040/win-api-readconsole.
const CTRL_Z: u16 = 0x1A;
const CTRL_Z_MASK: c::ULONG = 1 << CTRL_Z;
let mut input_control = c::CONSOLE_READCONSOLE_CONTROL {
nLength: ::mem::size_of::<c::CONSOLE_READCONSOLE_CONTROL>() as c::ULONG,
nInitialChars: 0,
dwCtrlWakeupMask: CTRL_Z_MASK,
dwControlKeyState: 0,
};
let mut amount = 0;
cvt(unsafe {
c::ReadConsoleW(handle,
buf.as_mut_ptr() as c::LPVOID,
buf.len() as u32,
&mut amount,
&mut input_control as c::PCONSOLE_READCONSOLE_CONTROL)
})?;
if amount > 0 && buf[amount as usize - 1] == CTRL_Z {
amount -= 1;
}
Ok(amount as usize)
}
#[allow(unused)]
fn utf16_to_utf8(utf16: &[u16], utf8: &mut [u8]) -> io::Result<usize> {
let mut written = 0;
for chr in decode_utf16(utf16.iter().cloned()) {
match chr {
Ok(chr) => {
chr.encode_utf8(&mut utf8[written..]);
written += chr.len_utf8();
}
Err(_) => {
// We can't really do any better than forget all data and return an error.
return Err(io::Error::new(io::ErrorKind::InvalidData,
"Windows stdin in console mode does not support non-UTF-16 input; \
encountered unpaired surrogate"))
}
}
}
Ok(written)
}
impl Stdout {
pub fn new() -> io::Result<Stdout> {
Ok(Stdout)
}
}
pub fn write(&self, data: &[u8]) -> io::Result<usize> {
write(c::STD_OUTPUT_HANDLE, data)
impl io::Write for Stdout {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
write(c::STD_OUTPUT_HANDLE, buf)
}
pub fn flush(&self) -> io::Result<()> {
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
@ -155,66 +262,22 @@ impl Stderr {
pub fn new() -> io::Result<Stderr> {
Ok(Stderr)
}
pub fn write(&self, data: &[u8]) -> io::Result<usize> {
write(c::STD_ERROR_HANDLE, data)
}
pub fn flush(&self) -> io::Result<()> {
Ok(())
}
}
// FIXME: right now this raw stderr handle is used in a few places because
// std::io::stderr_raw isn't exposed, but once that's exposed this impl
// should go away
impl io::Write for Stderr {
fn write(&mut self, data: &[u8]) -> io::Result<usize> {
Stderr::write(self, data)
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
write(c::STD_ERROR_HANDLE, buf)
}
fn flush(&mut self) -> io::Result<()> {
Stderr::flush(self)
Ok(())
}
}
impl Output {
pub fn handle(&self) -> c::HANDLE {
match *self {
Output::Console(c) => c,
Output::Pipe(c) => c,
}
}
}
fn invalid_encoding() -> io::Error {
io::Error::new(io::ErrorKind::InvalidData,
"Windows stdio in console mode does not support non-UTF-8 byte sequences; \
see https://github.com/rust-lang/rust/issues/23344")
}
fn readconsole_input_control(wakeup_mask: c::ULONG) -> c::CONSOLE_READCONSOLE_CONTROL {
c::CONSOLE_READCONSOLE_CONTROL {
nLength: ::mem::size_of::<c::CONSOLE_READCONSOLE_CONTROL>() as c::ULONG,
nInitialChars: 0,
dwCtrlWakeupMask: wakeup_mask,
dwControlKeyState: 0,
}
}
const CTRL_Z: u8 = 0x1A;
const CTRL_Z_MASK: c::ULONG = 0x4000000; //1 << 0x1A
pub fn is_ebadf(err: &io::Error) -> bool {
err.raw_os_error() == Some(c::ERROR_INVALID_HANDLE as i32)
}
// The default buffer capacity is 64k, but apparently windows
// doesn't like 64k reads on stdin. See #13304 for details, but the
// idea is that on windows we use a slightly smaller buffer that's
// been seen to be acceptable.
pub const STDIN_BUF_SIZE: usize = 8 * 1024;
pub fn panic_output() -> Option<impl io::Write> {
Stderr::new().ok()
}

View file

@ -1733,7 +1733,7 @@ impl<'a> Parser<'a> {
}
} else if self.eat_keyword(keywords::Impl) {
// Always parse bounds greedily for better error recovery.
let bounds = self.parse_generic_bounds()?;
let bounds = self.parse_generic_bounds(None)?;
impl_dyn_multi = bounds.len() > 1 || self.prev_token_kind == PrevTokenKind::Plus;
TyKind::ImplTrait(ast::DUMMY_NODE_ID, bounds)
} else if self.check_keyword(keywords::Dyn) &&
@ -1742,13 +1742,13 @@ impl<'a> Parser<'a> {
!can_continue_type_after_non_fn_ident(t))) {
self.bump(); // `dyn`
// Always parse bounds greedily for better error recovery.
let bounds = self.parse_generic_bounds()?;
let bounds = self.parse_generic_bounds(None)?;
impl_dyn_multi = bounds.len() > 1 || self.prev_token_kind == PrevTokenKind::Plus;
TyKind::TraitObject(bounds, TraitObjectSyntax::Dyn)
} else if self.check(&token::Question) ||
self.check_lifetime() && self.look_ahead(1, |t| t.is_like_plus()) {
// Bound list (trait object type)
TyKind::TraitObject(self.parse_generic_bounds_common(allow_plus)?,
TyKind::TraitObject(self.parse_generic_bounds_common(allow_plus, None)?,
TraitObjectSyntax::None)
} else if self.eat_lt() {
// Qualified path
@ -1794,7 +1794,7 @@ impl<'a> Parser<'a> {
let mut bounds = vec![GenericBound::Trait(poly_trait_ref, TraitBoundModifier::None)];
if parse_plus {
self.eat_plus(); // `+`, or `+=` gets split and `+` is discarded
bounds.append(&mut self.parse_generic_bounds()?);
bounds.append(&mut self.parse_generic_bounds(None)?);
}
Ok(TyKind::TraitObject(bounds, TraitObjectSyntax::None))
}
@ -1819,7 +1819,7 @@ impl<'a> Parser<'a> {
}
self.bump(); // `+`
let bounds = self.parse_generic_bounds()?;
let bounds = self.parse_generic_bounds(None)?;
let sum_span = ty.span.to(self.prev_span);
let mut err = struct_span_err!(self.sess.span_diagnostic, sum_span, E0178,
@ -5496,11 +5496,16 @@ impl<'a> Parser<'a> {
/// TY_BOUND = TY_BOUND_NOPAREN | (TY_BOUND_NOPAREN)
/// TY_BOUND_NOPAREN = [?] [for<LT_PARAM_DEFS>] SIMPLE_PATH (e.g., `?for<'a: 'b> m::Trait<'a>`)
/// ```
fn parse_generic_bounds_common(&mut self, allow_plus: bool) -> PResult<'a, GenericBounds> {
fn parse_generic_bounds_common(&mut self,
allow_plus: bool,
colon_span: Option<Span>) -> PResult<'a, GenericBounds> {
let mut bounds = Vec::new();
let mut negative_bounds = Vec::new();
let mut last_plus_span = None;
loop {
// This needs to be synchronized with `Token::can_begin_bound`.
let is_bound_start = self.check_path() || self.check_lifetime() ||
self.check(&token::Not) || // used for error reporting only
self.check(&token::Question) ||
self.check_keyword(keywords::For) ||
self.check(&token::OpenDelim(token::Paren));
@ -5508,6 +5513,7 @@ impl<'a> Parser<'a> {
let lo = self.span;
let has_parens = self.eat(&token::OpenDelim(token::Paren));
let inner_lo = self.span;
let is_negative = self.eat(&token::Not);
let question = if self.eat(&token::Question) { Some(self.prev_span) } else { None };
if self.token.is_lifetime() {
if let Some(question_span) = question {
@ -5538,13 +5544,20 @@ impl<'a> Parser<'a> {
if has_parens {
self.expect(&token::CloseDelim(token::Paren))?;
}
let poly_trait = PolyTraitRef::new(lifetime_defs, path, lo.to(self.prev_span));
let modifier = if question.is_some() {
TraitBoundModifier::Maybe
let poly_span = lo.to(self.prev_span);
if is_negative {
negative_bounds.push(
last_plus_span.or(colon_span).unwrap()
.to(poly_span));
} else {
TraitBoundModifier::None
};
bounds.push(GenericBound::Trait(poly_trait, modifier));
let poly_trait = PolyTraitRef::new(lifetime_defs, path, poly_span);
let modifier = if question.is_some() {
TraitBoundModifier::Maybe
} else {
TraitBoundModifier::None
};
bounds.push(GenericBound::Trait(poly_trait, modifier));
}
}
} else {
break
@ -5552,14 +5565,39 @@ impl<'a> Parser<'a> {
if !allow_plus || !self.eat_plus() {
break
} else {
last_plus_span = Some(self.prev_span);
}
}
if !negative_bounds.is_empty() {
let plural = negative_bounds.len() > 1;
let mut err = self.struct_span_err(negative_bounds,
"negative trait bounds are not supported");
let bound_list = colon_span.unwrap().to(self.prev_span);
let mut new_bound_list = String::new();
if !bounds.is_empty() {
let mut snippets = bounds.iter().map(|bound| bound.span())
.map(|span| self.sess.source_map().span_to_snippet(span));
while let Some(Ok(snippet)) = snippets.next() {
new_bound_list.push_str(" + ");
new_bound_list.push_str(&snippet);
}
new_bound_list = new_bound_list.replacen(" +", ":", 1);
}
err.span_suggestion_short(bound_list,
&format!("remove the trait bound{}",
if plural { "s" } else { "" }),
new_bound_list,
Applicability::MachineApplicable);
err.emit();
}
return Ok(bounds);
}
fn parse_generic_bounds(&mut self) -> PResult<'a, GenericBounds> {
self.parse_generic_bounds_common(true)
fn parse_generic_bounds(&mut self, colon_span: Option<Span>) -> PResult<'a, GenericBounds> {
self.parse_generic_bounds_common(true, colon_span)
}
/// Parses bounds of a lifetime parameter `BOUND + BOUND + BOUND`, possibly with trailing `+`.
@ -5587,7 +5625,7 @@ impl<'a> Parser<'a> {
// Parse optional colon and param bounds.
let bounds = if self.eat(&token::Colon) {
self.parse_generic_bounds()?
self.parse_generic_bounds(None)?
} else {
Vec::new()
};
@ -5619,7 +5657,7 @@ impl<'a> Parser<'a> {
// Parse optional colon and param bounds.
let bounds = if self.eat(&token::Colon) {
self.parse_generic_bounds()?
self.parse_generic_bounds(None)?
} else {
Vec::new()
};
@ -6032,7 +6070,7 @@ impl<'a> Parser<'a> {
// or with mandatory equality sign and the second type.
let ty = self.parse_ty()?;
if self.eat(&token::Colon) {
let bounds = self.parse_generic_bounds()?;
let bounds = self.parse_generic_bounds(None)?;
where_clause.predicates.push(ast::WherePredicate::BoundPredicate(
ast::WhereBoundPredicate {
span: lo.to(self.prev_span),
@ -6546,14 +6584,14 @@ impl<'a> Parser<'a> {
// Parse optional colon and supertrait bounds.
let bounds = if self.eat(&token::Colon) {
self.parse_generic_bounds()?
self.parse_generic_bounds(Some(self.prev_span))?
} else {
Vec::new()
};
if self.eat(&token::Eq) {
// it's a trait alias
let bounds = self.parse_generic_bounds()?;
let bounds = self.parse_generic_bounds(None)?;
tps.where_clause = self.parse_where_clause()?;
self.expect(&token::Semi)?;
if is_auto == IsAuto::Yes {
@ -7588,7 +7626,7 @@ impl<'a> Parser<'a> {
tps.where_clause = self.parse_where_clause()?;
let alias = if existential {
self.expect(&token::Colon)?;
let bounds = self.parse_generic_bounds()?;
let bounds = self.parse_generic_bounds(None)?;
AliasKind::Existential(bounds)
} else {
self.expect(&token::Eq)?;

View file

@ -0,0 +1,3 @@
#![deny(intra_doc_link_resolution_failure)]
//! A [`char`] and its [`char::len_utf8`].

View file

@ -0,0 +1,14 @@
// run-rustfix
trait Tr {} //~ ERROR negative trait bounds are not supported
trait Tr2: SuperA {} //~ ERROR negative trait bounds are not supported
trait Tr3: SuperB {} //~ ERROR negative trait bounds are not supported
trait Tr4: SuperB + SuperD {}
trait Tr5 {}
trait SuperA {}
trait SuperB {}
trait SuperC {}
trait SuperD {}
fn main() {}

View file

@ -0,0 +1,16 @@
// run-rustfix
trait Tr: !SuperA {} //~ ERROR negative trait bounds are not supported
trait Tr2: SuperA + !SuperB {} //~ ERROR negative trait bounds are not supported
trait Tr3: !SuperA + SuperB {} //~ ERROR negative trait bounds are not supported
trait Tr4: !SuperA + SuperB //~ ERROR negative trait bounds are not supported
+ !SuperC + SuperD {}
trait Tr5: !SuperA //~ ERROR negative trait bounds are not supported
+ !SuperB {}
trait SuperA {}
trait SuperB {}
trait SuperC {}
trait SuperD {}
fn main() {}

View file

@ -0,0 +1,42 @@
error: negative trait bounds are not supported
--> $DIR/issue-33418.rs:3:9
|
LL | trait Tr: !SuperA {} //~ ERROR negative trait bounds are not supported
| ^^^^^^^^^ help: remove the trait bound
error: negative trait bounds are not supported
--> $DIR/issue-33418.rs:4:19
|
LL | trait Tr2: SuperA + !SuperB {} //~ ERROR negative trait bounds are not supported
| ---------^^^^^^^^^
| |
| help: remove the trait bound
error: negative trait bounds are not supported
--> $DIR/issue-33418.rs:5:10
|
LL | trait Tr3: !SuperA + SuperB {} //~ ERROR negative trait bounds are not supported
| ^^^^^^^^^---------
| |
| help: remove the trait bound
error: negative trait bounds are not supported
--> $DIR/issue-33418.rs:6:10
|
LL | trait Tr4: !SuperA + SuperB //~ ERROR negative trait bounds are not supported
| __________-^^^^^^^^
LL | | + !SuperC + SuperD {}
| |_____^^^^^^^^^________- help: remove the trait bounds
error: negative trait bounds are not supported
--> $DIR/issue-33418.rs:8:10
|
LL | trait Tr5: !SuperA //~ ERROR negative trait bounds are not supported
| __________-^^^^^^^^
LL | | + !SuperB {}
| | ^^^^^^^^-
| |_____________|
| help: remove the trait bounds
error: aborting due to 5 previous errors