1
Fork 0

Auto merge of #69094 - Dylan-DPC:rollup-4qe7uv1, r=Dylan-DPC

Rollup of 8 pull requests

Successful merges:

 - #67585 (Improve `char::is_ascii_*` codegen)
 - #68914 (Speed up `SipHasher128`.)
 - #68994 (rustbuild: include channel in sanitizers installed name)
 - #69032 (ICE in nightly-2020-02-08: handle TerminatorKind::Yield in librustc_mir::transform::promote_consts::Validator method)
 - #69034 (parser: Remove `Parser::prev_token_kind`)
 - #69042 (Remove backtrace header text)
 - #69059 (Remove a few unused objects)
 - #69089 (Properly use the darwin archive format on Apple targets)

Failed merges:

r? @ghost
This commit is contained in:
bors 2020-02-12 16:25:13 +00:00
commit 2d2be57097
21 changed files with 199 additions and 166 deletions

View file

@ -121,9 +121,9 @@ checksum = "1d49d90015b3c36167a20fe2810c5cd875ad504b39cff3d4eae7977e6b7c1cb2"
[[package]] [[package]]
name = "backtrace" name = "backtrace"
version = "0.3.40" version = "0.3.44"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "924c76597f0d9ca25d762c25a4d369d51267536465dc5064bdf0eb073ed477ea" checksum = "e4036b9bf40f3cf16aba72a3d65e8a520fc4bafcdc7079aea8f848c58c5b5536"
dependencies = [ dependencies = [
"backtrace-sys", "backtrace-sys",
"cfg-if", "cfg-if",

View file

@ -571,7 +571,7 @@ impl Step for Sanitizers {
} }
let out_dir = builder.native_dir(self.target).join("sanitizers"); let out_dir = builder.native_dir(self.target).join("sanitizers");
let runtimes = supported_sanitizers(&out_dir, self.target); let runtimes = supported_sanitizers(&out_dir, self.target, &builder.config.channel);
if runtimes.is_empty() { if runtimes.is_empty() {
return runtimes; return runtimes;
} }
@ -635,7 +635,11 @@ pub struct SanitizerRuntime {
} }
/// Returns sanitizers available on a given target. /// Returns sanitizers available on a given target.
fn supported_sanitizers(out_dir: &Path, target: Interned<String>) -> Vec<SanitizerRuntime> { fn supported_sanitizers(
out_dir: &Path,
target: Interned<String>,
channel: &str,
) -> Vec<SanitizerRuntime> {
let mut result = Vec::new(); let mut result = Vec::new();
match &*target { match &*target {
"x86_64-apple-darwin" => { "x86_64-apple-darwin" => {
@ -644,7 +648,7 @@ fn supported_sanitizers(out_dir: &Path, target: Interned<String>) -> Vec<Sanitiz
cmake_target: format!("clang_rt.{}_osx_dynamic", s), cmake_target: format!("clang_rt.{}_osx_dynamic", s),
path: out_dir path: out_dir
.join(&format!("build/lib/darwin/libclang_rt.{}_osx_dynamic.dylib", s)), .join(&format!("build/lib/darwin/libclang_rt.{}_osx_dynamic.dylib", s)),
name: format!("librustc_rt.{}.dylib", s), name: format!("librustc-{}_rt.{}.dylib", channel, s),
}); });
} }
} }
@ -653,7 +657,7 @@ fn supported_sanitizers(out_dir: &Path, target: Interned<String>) -> Vec<Sanitiz
result.push(SanitizerRuntime { result.push(SanitizerRuntime {
cmake_target: format!("clang_rt.{}-x86_64", s), cmake_target: format!("clang_rt.{}-x86_64", s),
path: out_dir.join(&format!("build/lib/linux/libclang_rt.{}-x86_64.a", s)), path: out_dir.join(&format!("build/lib/linux/libclang_rt.{}-x86_64.a", s)),
name: format!("librustc_rt.{}.a", s), name: format!("librustc-{}_rt.{}.a", channel, s),
}); });
} }
} }
@ -662,7 +666,7 @@ fn supported_sanitizers(out_dir: &Path, target: Interned<String>) -> Vec<Sanitiz
result.push(SanitizerRuntime { result.push(SanitizerRuntime {
cmake_target: format!("clang_rt.{}-x86_64", s), cmake_target: format!("clang_rt.{}-x86_64", s),
path: out_dir.join(&format!("build/lib/fuchsia/libclang_rt.{}-x86_64.a", s)), path: out_dir.join(&format!("build/lib/fuchsia/libclang_rt.{}-x86_64.a", s)),
name: format!("librustc_rt.{}.a", s), name: format!("librustc-{}_rt.{}.a", channel, s),
}); });
} }
} }
@ -671,7 +675,7 @@ fn supported_sanitizers(out_dir: &Path, target: Interned<String>) -> Vec<Sanitiz
result.push(SanitizerRuntime { result.push(SanitizerRuntime {
cmake_target: format!("clang_rt.{}-aarch64", s), cmake_target: format!("clang_rt.{}-aarch64", s),
path: out_dir.join(&format!("build/lib/fuchsia/libclang_rt.{}-aarch64.a", s)), path: out_dir.join(&format!("build/lib/fuchsia/libclang_rt.{}-aarch64.a", s)),
name: format!("librustc_rt.{}.a", s), name: format!("librustc-{}_rt.{}.a", channel, s),
}); });
} }
} }

View file

@ -1075,7 +1075,10 @@ impl char {
#[rustc_const_unstable(feature = "const_ascii_ctype_on_intrinsics", issue = "68983")] #[rustc_const_unstable(feature = "const_ascii_ctype_on_intrinsics", issue = "68983")]
#[inline] #[inline]
pub const fn is_ascii_alphabetic(&self) -> bool { pub const fn is_ascii_alphabetic(&self) -> bool {
self.is_ascii() && (*self as u8).is_ascii_alphabetic() match *self {
'A'..='Z' | 'a'..='z' => true,
_ => false,
}
} }
/// Checks if the value is an ASCII uppercase character: /// Checks if the value is an ASCII uppercase character:
@ -1108,7 +1111,10 @@ impl char {
#[rustc_const_unstable(feature = "const_ascii_ctype_on_intrinsics", issue = "68983")] #[rustc_const_unstable(feature = "const_ascii_ctype_on_intrinsics", issue = "68983")]
#[inline] #[inline]
pub const fn is_ascii_uppercase(&self) -> bool { pub const fn is_ascii_uppercase(&self) -> bool {
self.is_ascii() && (*self as u8).is_ascii_uppercase() match *self {
'A'..='Z' => true,
_ => false,
}
} }
/// Checks if the value is an ASCII lowercase character: /// Checks if the value is an ASCII lowercase character:
@ -1141,7 +1147,10 @@ impl char {
#[rustc_const_unstable(feature = "const_ascii_ctype_on_intrinsics", issue = "68983")] #[rustc_const_unstable(feature = "const_ascii_ctype_on_intrinsics", issue = "68983")]
#[inline] #[inline]
pub const fn is_ascii_lowercase(&self) -> bool { pub const fn is_ascii_lowercase(&self) -> bool {
self.is_ascii() && (*self as u8).is_ascii_lowercase() match *self {
'a'..='z' => true,
_ => false,
}
} }
/// Checks if the value is an ASCII alphanumeric character: /// Checks if the value is an ASCII alphanumeric character:
@ -1177,7 +1186,10 @@ impl char {
#[rustc_const_unstable(feature = "const_ascii_ctype_on_intrinsics", issue = "68983")] #[rustc_const_unstable(feature = "const_ascii_ctype_on_intrinsics", issue = "68983")]
#[inline] #[inline]
pub const fn is_ascii_alphanumeric(&self) -> bool { pub const fn is_ascii_alphanumeric(&self) -> bool {
self.is_ascii() && (*self as u8).is_ascii_alphanumeric() match *self {
'0'..='9' | 'A'..='Z' | 'a'..='z' => true,
_ => false,
}
} }
/// Checks if the value is an ASCII decimal digit: /// Checks if the value is an ASCII decimal digit:
@ -1210,7 +1222,10 @@ impl char {
#[rustc_const_unstable(feature = "const_ascii_ctype_on_intrinsics", issue = "68983")] #[rustc_const_unstable(feature = "const_ascii_ctype_on_intrinsics", issue = "68983")]
#[inline] #[inline]
pub const fn is_ascii_digit(&self) -> bool { pub const fn is_ascii_digit(&self) -> bool {
self.is_ascii() && (*self as u8).is_ascii_digit() match *self {
'0'..='9' => true,
_ => false,
}
} }
/// Checks if the value is an ASCII hexadecimal digit: /// Checks if the value is an ASCII hexadecimal digit:
@ -1246,7 +1261,10 @@ impl char {
#[rustc_const_unstable(feature = "const_ascii_ctype_on_intrinsics", issue = "68983")] #[rustc_const_unstable(feature = "const_ascii_ctype_on_intrinsics", issue = "68983")]
#[inline] #[inline]
pub const fn is_ascii_hexdigit(&self) -> bool { pub const fn is_ascii_hexdigit(&self) -> bool {
self.is_ascii() && (*self as u8).is_ascii_hexdigit() match *self {
'0'..='9' | 'A'..='F' | 'a'..='f' => true,
_ => false,
}
} }
/// Checks if the value is an ASCII punctuation character: /// Checks if the value is an ASCII punctuation character:
@ -1283,7 +1301,10 @@ impl char {
#[rustc_const_unstable(feature = "const_ascii_ctype_on_intrinsics", issue = "68983")] #[rustc_const_unstable(feature = "const_ascii_ctype_on_intrinsics", issue = "68983")]
#[inline] #[inline]
pub const fn is_ascii_punctuation(&self) -> bool { pub const fn is_ascii_punctuation(&self) -> bool {
self.is_ascii() && (*self as u8).is_ascii_punctuation() match *self {
'!'..='/' | ':'..='@' | '['..='`' | '{'..='~' => true,
_ => false,
}
} }
/// Checks if the value is an ASCII graphic character: /// Checks if the value is an ASCII graphic character:
@ -1316,7 +1337,10 @@ impl char {
#[rustc_const_unstable(feature = "const_ascii_ctype_on_intrinsics", issue = "68983")] #[rustc_const_unstable(feature = "const_ascii_ctype_on_intrinsics", issue = "68983")]
#[inline] #[inline]
pub const fn is_ascii_graphic(&self) -> bool { pub const fn is_ascii_graphic(&self) -> bool {
self.is_ascii() && (*self as u8).is_ascii_graphic() match *self {
'!'..='~' => true,
_ => false,
}
} }
/// Checks if the value is an ASCII whitespace character: /// Checks if the value is an ASCII whitespace character:
@ -1366,7 +1390,10 @@ impl char {
#[rustc_const_unstable(feature = "const_ascii_ctype_on_intrinsics", issue = "68983")] #[rustc_const_unstable(feature = "const_ascii_ctype_on_intrinsics", issue = "68983")]
#[inline] #[inline]
pub const fn is_ascii_whitespace(&self) -> bool { pub const fn is_ascii_whitespace(&self) -> bool {
self.is_ascii() && (*self as u8).is_ascii_whitespace() match *self {
'\t' | '\n' | '\x0C' | '\r' | ' ' => true,
_ => false,
}
} }
/// Checks if the value is an ASCII control character: /// Checks if the value is an ASCII control character:
@ -1401,6 +1428,9 @@ impl char {
#[rustc_const_unstable(feature = "const_ascii_ctype_on_intrinsics", issue = "68983")] #[rustc_const_unstable(feature = "const_ascii_ctype_on_intrinsics", issue = "68983")]
#[inline] #[inline]
pub const fn is_ascii_control(&self) -> bool { pub const fn is_ascii_control(&self) -> bool {
self.is_ascii() && (*self as u8).is_ascii_control() match *self {
'\0'..='\x1F' | '\x7F' => true,
_ => false,
}
} }
} }

View file

@ -339,8 +339,6 @@ impl TaintDirections {
} }
} }
pub struct ConstraintInfo {}
impl<'tcx> RegionConstraintCollector<'tcx> { impl<'tcx> RegionConstraintCollector<'tcx> {
pub fn new() -> Self { pub fn new() -> Self {
Self::default() Self::default()

View file

@ -459,6 +459,7 @@ pub enum ArchiveKind {
Other, Other,
K_GNU, K_GNU,
K_BSD, K_BSD,
K_DARWIN,
K_COFF, K_COFF,
} }

View file

@ -69,6 +69,7 @@ impl FromStr for ArchiveKind {
match s { match s {
"gnu" => Ok(ArchiveKind::K_GNU), "gnu" => Ok(ArchiveKind::K_GNU),
"bsd" => Ok(ArchiveKind::K_BSD), "bsd" => Ok(ArchiveKind::K_BSD),
"darwin" => Ok(ArchiveKind::K_DARWIN),
"coff" => Ok(ArchiveKind::K_COFF), "coff" => Ok(ArchiveKind::K_COFF),
_ => Err(()), _ => Err(()),
} }

View file

@ -765,6 +765,9 @@ fn link_sanitizer_runtime(sess: &Session, crate_type: config::CrateType, linker:
let default_sysroot = filesearch::get_or_default_sysroot(); let default_sysroot = filesearch::get_or_default_sysroot();
let default_tlib = let default_tlib =
filesearch::make_target_lib_path(&default_sysroot, sess.opts.target_triple.triple()); filesearch::make_target_lib_path(&default_sysroot, sess.opts.target_triple.triple());
let channel = option_env!("CFG_RELEASE_CHANNEL")
.map(|channel| format!("-{}", channel))
.unwrap_or_default();
match sess.opts.target_triple.triple() { match sess.opts.target_triple.triple() {
"x86_64-apple-darwin" => { "x86_64-apple-darwin" => {
@ -772,13 +775,13 @@ fn link_sanitizer_runtime(sess: &Session, crate_type: config::CrateType, linker:
// LLVM will link to `@rpath/*.dylib`, so we need to specify an // LLVM will link to `@rpath/*.dylib`, so we need to specify an
// rpath to the library as well (the rpath should be absolute, see // rpath to the library as well (the rpath should be absolute, see
// PR #41352 for details). // PR #41352 for details).
let libname = format!("rustc_rt.{}", name); let libname = format!("rustc{}_rt.{}", channel, name);
let rpath = default_tlib.to_str().expect("non-utf8 component in path"); let rpath = default_tlib.to_str().expect("non-utf8 component in path");
linker.args(&["-Wl,-rpath".into(), "-Xlinker".into(), rpath.into()]); linker.args(&["-Wl,-rpath".into(), "-Xlinker".into(), rpath.into()]);
linker.link_dylib(Symbol::intern(&libname)); linker.link_dylib(Symbol::intern(&libname));
} }
"x86_64-unknown-linux-gnu" | "x86_64-fuchsia" | "aarch64-fuchsia" => { "x86_64-unknown-linux-gnu" | "x86_64-fuchsia" | "aarch64-fuchsia" => {
let filename = format!("librustc_rt.{}.a", name); let filename = format!("librustc{}_rt.{}.a", channel, name);
let path = default_tlib.join(&filename); let path = default_tlib.join(&filename);
linker.link_whole_rlib(&path); linker.link_whole_rlib(&path);
} }

View file

@ -0,0 +1,4 @@
fn main() {
println!("cargo:rerun-if-changed=build.rs");
println!("cargo:rerun-if-env-changed=CFG_RELEASE_CHANNEL");
}

View file

@ -4,7 +4,6 @@ use std::cmp;
use std::hash::Hasher; use std::hash::Hasher;
use std::mem; use std::mem;
use std::ptr; use std::ptr;
use std::slice;
#[cfg(test)] #[cfg(test)]
mod tests; mod tests;
@ -52,46 +51,17 @@ macro_rules! compress {
}}; }};
} }
/// Loads an integer of the desired type from a byte stream, in LE order. Uses /// Loads up to 8 bytes from a byte-slice into a little-endian u64.
/// `copy_nonoverlapping` to let the compiler generate the most efficient way
/// to load it from a possibly unaligned address.
///
/// Unsafe because: unchecked indexing at i..i+size_of(int_ty)
macro_rules! load_int_le {
($buf:expr, $i:expr, $int_ty:ident) => {{
debug_assert!($i + mem::size_of::<$int_ty>() <= $buf.len());
let mut data = 0 as $int_ty;
ptr::copy_nonoverlapping(
$buf.get_unchecked($i),
&mut data as *mut _ as *mut u8,
mem::size_of::<$int_ty>(),
);
data.to_le()
}};
}
/// Loads an u64 using up to 7 bytes of a byte slice.
///
/// Unsafe because: unchecked indexing at start..start+len
#[inline] #[inline]
unsafe fn u8to64_le(buf: &[u8], start: usize, len: usize) -> u64 { fn u8to64_le(buf: &[u8], start: usize, len: usize) -> u64 {
debug_assert!(len < 8); assert!(len <= 8 && start + len <= buf.len());
let mut i = 0; // current byte index (from LSB) in the output u64
let mut out = 0; let mut out = 0u64;
if i + 3 < len { unsafe {
out = u64::from(load_int_le!(buf, start + i, u32)); let out_ptr = &mut out as *mut _ as *mut u8;
i += 4; ptr::copy_nonoverlapping(buf.as_ptr().offset(start as isize), out_ptr, len);
} }
if i + 1 < len { out.to_le()
out |= u64::from(load_int_le!(buf, start + i, u16)) << (i * 8);
i += 2
}
if i < len {
out |= u64::from(*buf.get_unchecked(start + i)) << (i * 8);
i += 1;
}
debug_assert_eq!(i, len);
out
} }
impl SipHasher128 { impl SipHasher128 {
@ -122,42 +92,76 @@ impl SipHasher128 {
self.state.v1 ^= 0xee; self.state.v1 ^= 0xee;
} }
// Specialized write function that is only valid for buffers with len <= 8. // A specialized write function for values with size <= 8.
// It's used to force inlining of write_u8 and write_usize, those would normally be inlined //
// except for composite types (that includes slices and str hashing because of delimiter). // The hashing of multi-byte integers depends on endianness. E.g.:
// Without this extra push the compiler is very reluctant to inline delimiter writes, // - little-endian: `write_u32(0xDDCCBBAA)` == `write([0xAA, 0xBB, 0xCC, 0xDD])`
// degrading performance substantially for the most common use cases. // - big-endian: `write_u32(0xDDCCBBAA)` == `write([0xDD, 0xCC, 0xBB, 0xAA])`
//
// This function does the right thing for little-endian hardware. On
// big-endian hardware `x` must be byte-swapped first to give the right
// behaviour. After any byte-swapping, the input must be zero-extended to
// 64-bits. The caller is responsible for the byte-swapping and
// zero-extension.
#[inline] #[inline]
fn short_write(&mut self, msg: &[u8]) { fn short_write<T>(&mut self, _x: T, x: u64) {
debug_assert!(msg.len() <= 8); let size = mem::size_of::<T>();
let length = msg.len(); self.length += size;
self.length += length;
// The original number must be zero-extended, not sign-extended.
debug_assert!(if size < 8 { x >> (8 * size) == 0 } else { true });
// The number of bytes needed to fill `self.tail`.
let needed = 8 - self.ntail; let needed = 8 - self.ntail;
let fill = cmp::min(length, needed);
if fill == 8 { // SipHash parses the input stream as 8-byte little-endian integers.
self.tail = unsafe { load_int_le!(msg, 0, u64) }; // Inputs are put into `self.tail` until 8 bytes of data have been
} else { // collected, and then that word is processed.
self.tail |= unsafe { u8to64_le(msg, 0, fill) } << (8 * self.ntail); //
if length < needed { // For example, imagine that `self.tail` is 0x0000_00EE_DDCC_BBAA,
self.ntail += length; // `self.ntail` is 5 (because 5 bytes have been put into `self.tail`),
return; // and `needed` is therefore 3.
} //
// - Scenario 1, `self.write_u8(0xFF)`: we have already zero-extended
// the input to 0x0000_0000_0000_00FF. We now left-shift it five
// bytes, giving 0x0000_FF00_0000_0000. We then bitwise-OR that value
// into `self.tail`, resulting in 0x0000_FFEE_DDCC_BBAA.
// (Zero-extension of the original input is critical in this scenario
// because we don't want the high two bytes of `self.tail` to be
// touched by the bitwise-OR.) `self.tail` is not yet full, so we
// return early, after updating `self.ntail` to 6.
//
// - Scenario 2, `self.write_u32(0xIIHH_GGFF)`: we have already
// zero-extended the input to 0x0000_0000_IIHH_GGFF. We now
// left-shift it five bytes, giving 0xHHGG_FF00_0000_0000. We then
// bitwise-OR that value into `self.tail`, resulting in
// 0xHHGG_FFEE_DDCC_BBAA. `self.tail` is now full, and we can use it
// to update `self.state`. (As mentioned above, this assumes a
// little-endian machine; on a big-endian machine we would have
// byte-swapped 0xIIHH_GGFF in the caller, giving 0xFFGG_HHII, and we
// would then end up bitwise-ORing 0xGGHH_II00_0000_0000 into
// `self.tail`).
//
self.tail |= x << (8 * self.ntail);
if size < needed {
self.ntail += size;
return;
} }
// `self.tail` is full, process it.
self.state.v3 ^= self.tail; self.state.v3 ^= self.tail;
Sip24Rounds::c_rounds(&mut self.state); Sip24Rounds::c_rounds(&mut self.state);
self.state.v0 ^= self.tail; self.state.v0 ^= self.tail;
// Buffered tail is now flushed, process new input. // Continuing scenario 2: we have one byte left over from the input. We
self.ntail = length - needed; // set `self.ntail` to 1 and `self.tail` to `0x0000_0000_IIHH_GGFF >>
self.tail = unsafe { u8to64_le(msg, needed, self.ntail) }; // 8*3`, which is 0x0000_0000_0000_00II. (Or on a big-endian machine
} // the prior byte-swapping would leave us with 0x0000_0000_0000_00FF.)
//
#[inline(always)] // The `if` is needed to avoid shifting by 64 bits, which Rust
fn short_write_gen<T>(&mut self, x: T) { // complains about.
let bytes = self.ntail = size - needed;
unsafe { slice::from_raw_parts(&x as *const T as *const u8, mem::size_of::<T>()) }; self.tail = if needed < 8 { x >> (8 * needed) } else { 0 };
self.short_write(bytes);
} }
#[inline] #[inline]
@ -182,52 +186,52 @@ impl SipHasher128 {
impl Hasher for SipHasher128 { impl Hasher for SipHasher128 {
#[inline] #[inline]
fn write_u8(&mut self, i: u8) { fn write_u8(&mut self, i: u8) {
self.short_write_gen(i); self.short_write(i, i as u64);
} }
#[inline] #[inline]
fn write_u16(&mut self, i: u16) { fn write_u16(&mut self, i: u16) {
self.short_write_gen(i); self.short_write(i, i.to_le() as u64);
} }
#[inline] #[inline]
fn write_u32(&mut self, i: u32) { fn write_u32(&mut self, i: u32) {
self.short_write_gen(i); self.short_write(i, i.to_le() as u64);
} }
#[inline] #[inline]
fn write_u64(&mut self, i: u64) { fn write_u64(&mut self, i: u64) {
self.short_write_gen(i); self.short_write(i, i.to_le() as u64);
} }
#[inline] #[inline]
fn write_usize(&mut self, i: usize) { fn write_usize(&mut self, i: usize) {
self.short_write_gen(i); self.short_write(i, i.to_le() as u64);
} }
#[inline] #[inline]
fn write_i8(&mut self, i: i8) { fn write_i8(&mut self, i: i8) {
self.short_write_gen(i); self.short_write(i, i as u8 as u64);
} }
#[inline] #[inline]
fn write_i16(&mut self, i: i16) { fn write_i16(&mut self, i: i16) {
self.short_write_gen(i); self.short_write(i, (i as u16).to_le() as u64);
} }
#[inline] #[inline]
fn write_i32(&mut self, i: i32) { fn write_i32(&mut self, i: i32) {
self.short_write_gen(i); self.short_write(i, (i as u32).to_le() as u64);
} }
#[inline] #[inline]
fn write_i64(&mut self, i: i64) { fn write_i64(&mut self, i: i64) {
self.short_write_gen(i); self.short_write(i, (i as u64).to_le() as u64);
} }
#[inline] #[inline]
fn write_isize(&mut self, i: isize) { fn write_isize(&mut self, i: isize) {
self.short_write_gen(i); self.short_write(i, (i as usize).to_le() as u64);
} }
#[inline] #[inline]
@ -239,7 +243,7 @@ impl Hasher for SipHasher128 {
if self.ntail != 0 { if self.ntail != 0 {
needed = 8 - self.ntail; needed = 8 - self.ntail;
self.tail |= unsafe { u8to64_le(msg, 0, cmp::min(length, needed)) } << (8 * self.ntail); self.tail |= u8to64_le(msg, 0, cmp::min(length, needed)) << (8 * self.ntail);
if length < needed { if length < needed {
self.ntail += length; self.ntail += length;
return; return;
@ -257,7 +261,7 @@ impl Hasher for SipHasher128 {
let mut i = needed; let mut i = needed;
while i < len - left { while i < len - left {
let mi = unsafe { load_int_le!(msg, i, u64) }; let mi = u8to64_le(msg, i, 8);
self.state.v3 ^= mi; self.state.v3 ^= mi;
Sip24Rounds::c_rounds(&mut self.state); Sip24Rounds::c_rounds(&mut self.state);
@ -266,7 +270,7 @@ impl Hasher for SipHasher128 {
i += 8; i += 8;
} }
self.tail = unsafe { u8to64_le(msg, i, left) }; self.tail = u8to64_le(msg, i, left);
self.ntail = left; self.ntail = left;
} }

View file

@ -120,10 +120,6 @@ pub trait Callbacks {
} }
} }
pub struct DefaultCallbacks;
impl Callbacks for DefaultCallbacks {}
#[derive(Default)] #[derive(Default)]
pub struct TimePassesCallbacks { pub struct TimePassesCallbacks {
time_passes: bool, time_passes: bool,

View file

@ -463,6 +463,7 @@ impl<'tcx> Validator<'_, 'tcx> {
let terminator = self.body[loc.block].terminator(); let terminator = self.body[loc.block].terminator();
match &terminator.kind { match &terminator.kind {
TerminatorKind::Call { func, args, .. } => self.validate_call(func, args), TerminatorKind::Call { func, args, .. } => self.validate_call(func, args),
TerminatorKind::Yield { .. } => Err(Unpromotable),
kind => { kind => {
span_bug!(terminator.source_info.span, "{:?} not promotable", kind); span_bug!(terminator.source_info.span, "{:?} not promotable", kind);
} }

View file

@ -1,6 +1,6 @@
use super::pat::{GateOr, PARAM_EXPECTED}; use super::pat::{GateOr, PARAM_EXPECTED};
use super::ty::{AllowPlus, RecoverQPath}; use super::ty::{AllowPlus, RecoverQPath};
use super::{BlockMode, Parser, PathStyle, PrevTokenKind, Restrictions, TokenType}; use super::{BlockMode, Parser, PathStyle, Restrictions, TokenType};
use super::{SemiColonMode, SeqSep, TokenExpectType}; use super::{SemiColonMode, SeqSep, TokenExpectType};
use crate::maybe_recover_from_interpolated_ty_qpath; use crate::maybe_recover_from_interpolated_ty_qpath;
@ -166,17 +166,10 @@ impl<'a> Parser<'a> {
self.expected_tokens.push(TokenType::Operator); self.expected_tokens.push(TokenType::Operator);
while let Some(op) = self.check_assoc_op() { while let Some(op) = self.check_assoc_op() {
// Adjust the span for interpolated LHS to point to the `$lhs` token and not to what // Adjust the span for interpolated LHS to point to the `$lhs` token
// it refers to. Interpolated identifiers are unwrapped early and never show up here // and not to what it refers to.
// as `PrevTokenKind::Interpolated` so if LHS is a single identifier we always process let lhs_span = match self.unnormalized_prev_token().kind {
// it as "interpolated", it doesn't change the answer for non-interpolated idents. TokenKind::Interpolated(..) => self.prev_span,
let lhs_span = match (self.prev_token_kind, &lhs.kind) {
(PrevTokenKind::Interpolated, _) => self.prev_span,
(PrevTokenKind::Ident, &ExprKind::Path(None, ref path))
if path.segments.len() == 1 =>
{
self.prev_span
}
_ => lhs.span, _ => lhs.span,
}; };
@ -535,11 +528,13 @@ impl<'a> Parser<'a> {
expr: PResult<'a, P<Expr>>, expr: PResult<'a, P<Expr>>,
) -> PResult<'a, (Span, P<Expr>)> { ) -> PResult<'a, (Span, P<Expr>)> {
expr.map(|e| { expr.map(|e| {
if self.prev_token_kind == PrevTokenKind::Interpolated { (
(self.prev_span, e) match self.unnormalized_prev_token().kind {
} else { TokenKind::Interpolated(..) => self.prev_span,
(e.span, e) _ => e.span,
} },
e,
)
}) })
} }

View file

@ -83,18 +83,6 @@ macro_rules! maybe_recover_from_interpolated_ty_qpath {
}; };
} }
#[derive(Debug, Clone, Copy, PartialEq)]
enum PrevTokenKind {
DocComment,
Comma,
Plus,
Interpolated,
Eof,
Ident,
BitOr,
Other,
}
#[derive(Clone)] #[derive(Clone)]
pub struct Parser<'a> { pub struct Parser<'a> {
pub sess: &'a ParseSess, pub sess: &'a ParseSess,
@ -115,9 +103,6 @@ pub struct Parser<'a> {
/// Preferable use is through the `unnormalized_prev_token()` getter. /// Preferable use is through the `unnormalized_prev_token()` getter.
/// Use span from this token if you need to concatenate it with some neighbouring spans. /// Use span from this token if you need to concatenate it with some neighbouring spans.
unnormalized_prev_token: Option<Token>, unnormalized_prev_token: Option<Token>,
/// Equivalent to `prev_token.kind` in simplified form.
/// FIXME: Remove in favor of `(unnormalized_)prev_token().kind`.
prev_token_kind: PrevTokenKind,
/// Equivalent to `unnormalized_prev_token().span`. /// Equivalent to `unnormalized_prev_token().span`.
/// FIXME: Remove in favor of `(unnormalized_)prev_token().span`. /// FIXME: Remove in favor of `(unnormalized_)prev_token().span`.
pub prev_span: Span, pub prev_span: Span,
@ -396,7 +381,6 @@ impl<'a> Parser<'a> {
unnormalized_token: None, unnormalized_token: None,
prev_token: Token::dummy(), prev_token: Token::dummy(),
unnormalized_prev_token: None, unnormalized_prev_token: None,
prev_token_kind: PrevTokenKind::Other,
prev_span: DUMMY_SP, prev_span: DUMMY_SP,
restrictions: Restrictions::empty(), restrictions: Restrictions::empty(),
recurse_into_file_modules, recurse_into_file_modules,
@ -523,10 +507,11 @@ impl<'a> Parser<'a> {
self.bump(); self.bump();
Ok(Ident::new(name, span)) Ok(Ident::new(name, span))
} }
_ => Err(if self.prev_token_kind == PrevTokenKind::DocComment { _ => Err(match self.prev_token.kind {
self.span_fatal_err(self.prev_span, Error::UselessDocComment) TokenKind::DocComment(..) => {
} else { self.span_fatal_err(self.prev_span, Error::UselessDocComment)
self.expected_ident_found() }
_ => self.expected_ident_found(),
}), }),
} }
} }
@ -908,7 +893,7 @@ impl<'a> Parser<'a> {
/// Advance the parser by one token. /// Advance the parser by one token.
pub fn bump(&mut self) { pub fn bump(&mut self) {
if self.prev_token_kind == PrevTokenKind::Eof { if self.prev_token.kind == TokenKind::Eof {
// Bumping after EOF is a bad sign, usually an infinite loop. // Bumping after EOF is a bad sign, usually an infinite loop.
let msg = "attempted to bump the parser past EOF (may be stuck in a loop)"; let msg = "attempted to bump the parser past EOF (may be stuck in a loop)";
self.span_bug(self.token.span, msg); self.span_bug(self.token.span, msg);
@ -920,16 +905,6 @@ impl<'a> Parser<'a> {
self.unnormalized_prev_token = self.unnormalized_token.take(); self.unnormalized_prev_token = self.unnormalized_token.take();
// Update fields derived from the previous token. // Update fields derived from the previous token.
self.prev_token_kind = match self.prev_token.kind {
token::DocComment(..) => PrevTokenKind::DocComment,
token::Comma => PrevTokenKind::Comma,
token::BinOp(token::Plus) => PrevTokenKind::Plus,
token::BinOp(token::Or) => PrevTokenKind::BitOr,
token::Interpolated(..) => PrevTokenKind::Interpolated,
token::Eof => PrevTokenKind::Eof,
token::Ident(..) => PrevTokenKind::Ident,
_ => PrevTokenKind::Other,
};
self.prev_span = self.unnormalized_prev_token().span; self.prev_span = self.unnormalized_prev_token().span;
self.expected_tokens.clear(); self.expected_tokens.clear();
@ -949,7 +924,6 @@ impl<'a> Parser<'a> {
self.unnormalized_prev_token = self.unnormalized_token.take(); self.unnormalized_prev_token = self.unnormalized_token.take();
// Update fields derived from the previous token. // Update fields derived from the previous token.
self.prev_token_kind = PrevTokenKind::Other;
self.prev_span = self.unnormalized_prev_token().span.with_hi(span.lo()); self.prev_span = self.unnormalized_prev_token().span.with_hi(span.lo());
self.expected_tokens.clear(); self.expected_tokens.clear();

View file

@ -2,7 +2,7 @@ use super::diagnostics::Error;
use super::expr::LhsExpr; use super::expr::LhsExpr;
use super::pat::GateOr; use super::pat::GateOr;
use super::path::PathStyle; use super::path::PathStyle;
use super::{BlockMode, Parser, PrevTokenKind, Restrictions, SemiColonMode}; use super::{BlockMode, Parser, Restrictions, SemiColonMode};
use crate::maybe_whole; use crate::maybe_whole;
use crate::DirectoryOwnership; use crate::DirectoryOwnership;
@ -190,7 +190,7 @@ impl<'a> Parser<'a> {
/// Also error if the previous token was a doc comment. /// Also error if the previous token was a doc comment.
fn error_outer_attrs(&self, attrs: &[Attribute]) { fn error_outer_attrs(&self, attrs: &[Attribute]) {
if !attrs.is_empty() { if !attrs.is_empty() {
if self.prev_token_kind == PrevTokenKind::DocComment { if matches!(self.prev_token.kind, TokenKind::DocComment(..)) {
self.span_fatal_err(self.prev_span, Error::UselessDocComment).emit(); self.span_fatal_err(self.prev_span, Error::UselessDocComment).emit();
} else if attrs.iter().any(|a| a.style == AttrStyle::Outer) { } else if attrs.iter().any(|a| a.style == AttrStyle::Outer) {
self.struct_span_err(self.token.span, "expected statement after outer attribute") self.struct_span_err(self.token.span, "expected statement after outer attribute")

View file

@ -1,5 +1,5 @@
use super::item::ParamCfg; use super::item::ParamCfg;
use super::{Parser, PathStyle, PrevTokenKind, TokenType}; use super::{Parser, PathStyle, TokenType};
use crate::{maybe_recover_from_interpolated_ty_qpath, maybe_whole}; use crate::{maybe_recover_from_interpolated_ty_qpath, maybe_whole};
@ -14,7 +14,7 @@ use syntax::ast::{
}; };
use syntax::ast::{Mac, Mutability}; use syntax::ast::{Mac, Mutability};
use syntax::ptr::P; use syntax::ptr::P;
use syntax::token::{self, Token}; use syntax::token::{self, Token, TokenKind};
/// Any `?` or `?const` modifiers that appear at the start of a bound. /// Any `?` or `?const` modifiers that appear at the start of a bound.
struct BoundModifiers { struct BoundModifiers {
@ -196,7 +196,7 @@ impl<'a> Parser<'a> {
let mut trailing_plus = false; let mut trailing_plus = false;
let (ts, trailing) = self.parse_paren_comma_seq(|p| { let (ts, trailing) = self.parse_paren_comma_seq(|p| {
let ty = p.parse_ty()?; let ty = p.parse_ty()?;
trailing_plus = p.prev_token_kind == PrevTokenKind::Plus; trailing_plus = p.prev_token.kind == TokenKind::BinOp(token::Plus);
Ok(ty) Ok(ty)
})?; })?;
@ -320,7 +320,7 @@ impl<'a> Parser<'a> {
fn parse_impl_ty(&mut self, impl_dyn_multi: &mut bool) -> PResult<'a, TyKind> { fn parse_impl_ty(&mut self, impl_dyn_multi: &mut bool) -> PResult<'a, TyKind> {
// Always parse bounds greedily for better error recovery. // Always parse bounds greedily for better error recovery.
let bounds = self.parse_generic_bounds(None)?; let bounds = self.parse_generic_bounds(None)?;
*impl_dyn_multi = bounds.len() > 1 || self.prev_token_kind == PrevTokenKind::Plus; *impl_dyn_multi = bounds.len() > 1 || self.prev_token.kind == TokenKind::BinOp(token::Plus);
Ok(TyKind::ImplTrait(ast::DUMMY_NODE_ID, bounds)) Ok(TyKind::ImplTrait(ast::DUMMY_NODE_ID, bounds))
} }
@ -340,7 +340,7 @@ impl<'a> Parser<'a> {
self.bump(); // `dyn` self.bump(); // `dyn`
// Always parse bounds greedily for better error recovery. // Always parse bounds greedily for better error recovery.
let bounds = self.parse_generic_bounds(None)?; let bounds = self.parse_generic_bounds(None)?;
*impl_dyn_multi = bounds.len() > 1 || self.prev_token_kind == PrevTokenKind::Plus; *impl_dyn_multi = bounds.len() > 1 || self.prev_token.kind == TokenKind::BinOp(token::Plus);
Ok(TyKind::TraitObject(bounds, TraitObjectSyntax::Dyn)) Ok(TyKind::TraitObject(bounds, TraitObjectSyntax::Dyn))
} }

View file

@ -26,7 +26,7 @@ pub fn opts() -> TargetOptions {
has_rpath: true, has_rpath: true,
dll_prefix: "lib".to_string(), dll_prefix: "lib".to_string(),
dll_suffix: ".dylib".to_string(), dll_suffix: ".dylib".to_string(),
archive_format: "bsd".to_string(), archive_format: "darwin".to_string(),
pre_link_args: LinkArgs::new(), pre_link_args: LinkArgs::new(),
has_elf_tls: version >= (10, 7), has_elf_tls: version >= (10, 7),
abi_return_struct_as_int: true, abi_return_struct_as_int: true,

View file

@ -27,7 +27,7 @@ hashbrown = { version = "0.6.2", default-features = false, features = ['rustc-de
[dependencies.backtrace_rs] [dependencies.backtrace_rs]
package = "backtrace" package = "backtrace"
version = "0.3.37" version = "0.3.44"
default-features = false # without the libstd `backtrace` feature, stub out everything default-features = false # without the libstd `backtrace` feature, stub out everything
features = [ "rustc-dep-of-std" ] # enable build support for integrating into libstd features = [ "rustc-dep-of-std" ] # enable build support for integrating into libstd

View file

@ -70,6 +70,7 @@ unsafe fn _print_fmt(fmt: &mut fmt::Formatter<'_>, print_fmt: PrintFmt) -> fmt::
let mut print_path = move |fmt: &mut fmt::Formatter<'_>, bows: BytesOrWideString<'_>| { let mut print_path = move |fmt: &mut fmt::Formatter<'_>, bows: BytesOrWideString<'_>| {
output_filename(fmt, bows, print_fmt, cwd.as_ref()) output_filename(fmt, bows, print_fmt, cwd.as_ref())
}; };
write!(fmt, "stack backtrace:\n")?;
let mut bt_fmt = BacktraceFmt::new(fmt, print_fmt, &mut print_path); let mut bt_fmt = BacktraceFmt::new(fmt, print_fmt, &mut print_path);
bt_fmt.add_context()?; bt_fmt.add_context()?;
let mut idx = 0; let mut idx = 0;

View file

@ -38,6 +38,7 @@ enum class LLVMRustArchiveKind {
Other, Other,
GNU, GNU,
BSD, BSD,
DARWIN,
COFF, COFF,
}; };
@ -47,6 +48,8 @@ static Archive::Kind fromRust(LLVMRustArchiveKind Kind) {
return Archive::K_GNU; return Archive::K_GNU;
case LLVMRustArchiveKind::BSD: case LLVMRustArchiveKind::BSD:
return Archive::K_BSD; return Archive::K_BSD;
case LLVMRustArchiveKind::DARWIN:
return Archive::K_DARWIN;
case LLVMRustArchiveKind::COFF: case LLVMRustArchiveKind::COFF:
return Archive::K_COFF; return Archive::K_COFF;
default: default:

View file

@ -0,0 +1,18 @@
// This issue reproduces an ICE on compile
// Fails on 2020-02-08 nightly
// regressed commit: https://github.com/rust-lang/rust/commit/f8fd4624474a68bd26694eff3536b9f3a127b2d3
//
// check-pass
#![feature(generator_trait)]
#![feature(generators)]
use std::ops::Generator;
fn gen() -> impl Generator<usize> {
|_: usize| {
println!("-> {}", yield);
}
}
fn main() {}

View file

@ -16,9 +16,9 @@ use std::str;
fn main() { fn main() {
let args: Vec<String> = env::args().collect(); let args: Vec<String> = env::args().collect();
if args.len() >= 2 && args[1] == "force" { if args.len() >= 2 && args[1] == "force" {
println!("{}", std::backtrace::Backtrace::force_capture()); println!("stack backtrace:\n{}", std::backtrace::Backtrace::force_capture());
} else if args.len() >= 2 { } else if args.len() >= 2 {
println!("{}", std::backtrace::Backtrace::capture()); println!("stack backtrace:\n{}", std::backtrace::Backtrace::capture());
} else { } else {
runtest(&args[0]); runtest(&args[0]);
println!("test ok"); println!("test ok");