1
Fork 0

Merge branch 'master' into copied

This commit is contained in:
Konrad Borowski 2018-12-23 16:47:11 +01:00 committed by GitHub
commit 8ac5380ea0
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
1381 changed files with 25073 additions and 12429 deletions

10
.gitmodules vendored
View file

@ -5,9 +5,6 @@
[submodule "src/rust-installer"]
path = src/tools/rust-installer
url = https://github.com/rust-lang/rust-installer.git
[submodule "src/liblibc"]
path = src/liblibc
url = https://github.com/rust-lang/libc.git
[submodule "src/doc/nomicon"]
path = src/doc/nomicon
url = https://github.com/rust-lang-nursery/nomicon.git
@ -23,9 +20,6 @@
[submodule "src/tools/rls"]
path = src/tools/rls
url = https://github.com/rust-lang-nursery/rls.git
[submodule "src/libcompiler_builtins"]
path = src/libcompiler_builtins
url = https://github.com/rust-lang-nursery/compiler-builtins.git
[submodule "src/tools/clippy"]
path = src/tools/clippy
url = https://github.com/rust-lang-nursery/rust-clippy.git
@ -35,9 +29,6 @@
[submodule "src/tools/miri"]
path = src/tools/miri
url = https://github.com/solson/miri.git
[submodule "src/dlmalloc"]
path = src/dlmalloc
url = https://github.com/alexcrichton/dlmalloc-rs.git
[submodule "src/doc/rust-by-example"]
path = src/doc/rust-by-example
url = https://github.com/rust-lang/rust-by-example.git
@ -61,7 +52,6 @@
path = src/tools/clang
url = https://github.com/rust-lang-nursery/clang.git
branch = rust-release-80-v2
[submodule "src/doc/rustc-guide"]
path = src/doc/rustc-guide
url = https://github.com/rust-lang/rustc-guide.git

View file

@ -16,7 +16,7 @@ matrix:
fast_finish: true
include:
# Images used in testing PR and try-build should be run first.
- env: IMAGE=x86_64-gnu-llvm-5.0 RUST_BACKTRACE=1
- env: IMAGE=x86_64-gnu-llvm-6.0 RUST_BACKTRACE=1
if: type = pull_request OR branch = auto
- env: IMAGE=dist-x86_64-linux DEPLOY=1

View file

@ -39,7 +39,7 @@ don't know about, so please report liberally. If you're not sure if something
is a bug or not, feel free to file a bug anyway.
**If you believe reporting your bug publicly represents a security risk to Rust users,
please follow our [instructions for reporting security vulnerabilities](https://www.rust-lang.org/security.html)**.
please follow our [instructions for reporting security vulnerabilities](https://www.rust-lang.org/policies/security)**.
If you have the chance, before reporting a bug, please [search existing
issues](https://github.com/rust-lang/rust/search?q=&type=Issues&utf8=%E2%9C%93),

1457
Cargo.lock

File diff suppressed because it is too large Load diff

View file

@ -65,6 +65,10 @@ rustfmt-nightly = { path = "src/tools/rustfmt" }
# here
rustc-workspace-hack = { path = 'src/tools/rustc-workspace-hack' }
# See comments in `tools/rustc-std-workspace-core/README.md` for what's going on
# here
rustc-std-workspace-core = { path = 'src/tools/rustc-std-workspace-core' }
[patch."https://github.com/rust-lang/rust-clippy"]
clippy_lints = { path = "src/tools/clippy/clippy_lints" }
rustc_tools_util = { path = "src/tools/clippy/rustc_tools_util" }

View file

@ -10,7 +10,7 @@ standard library, and documentation.
Read ["Installation"] from [The Book].
["Installation"]: https://doc.rust-lang.org/book/second-edition/ch01-01-installation.html
["Installation"]: https://doc.rust-lang.org/book/ch01-01-installation.html
[The Book]: https://doc.rust-lang.org/book/index.html
## Building from Source

View file

@ -1,3 +1,14 @@
Version 1.31.1 (2018-12-20)
===========================
- [Fix Rust failing to build on `powerpc-unknown-netbsd`][56562]
- [Fix broken go-to-definition in RLS][rls/1171]
- [Fix infinite loop on hover in RLS][rls/1170]
[56562]: https://github.com/rust-lang/rust/pull/56562
[rls/1171]: https://github.com/rust-lang/rls/issues/1171
[rls/1170]: https://github.com/rust-lang/rls/pull/1170
Version 1.31.0 (2018-12-06)
==========================

View file

@ -2,6 +2,7 @@
authors = ["The Rust Project Developers"]
name = "bootstrap"
version = "0.0.0"
edition = "2018"
[lib]
name = "bootstrap"

View file

@ -678,10 +678,10 @@ class RustBuild(object):
print("Updating submodule", module)
run(["git", "submodule", "-q", "sync", module],
run(["git", "submodule", "-q", "sync", "--progress", module],
cwd=self.rust_root, verbose=self.verbose)
run(["git", "submodule", "update",
"--init", "--recursive", module],
"--init", "--recursive", "--progress", module],
cwd=self.rust_root, verbose=self.verbose)
run(["git", "reset", "-q", "--hard"],
cwd=module_path, verbose=self.verbose)

View file

@ -21,20 +21,20 @@ use std::path::{Path, PathBuf};
use std::process::Command;
use std::time::{Duration, Instant};
use cache::{Cache, Interned, INTERNER};
use check;
use compile;
use dist;
use doc;
use flags::Subcommand;
use install;
use native;
use test;
use tool;
use util::{add_lib_path, exe, libdir};
use {Build, DocTests, Mode, GitRepo};
use crate::cache::{Cache, Interned, INTERNER};
use crate::check;
use crate::compile;
use crate::dist;
use crate::doc;
use crate::flags::Subcommand;
use crate::install;
use crate::native;
use crate::test;
use crate::tool;
use crate::util::{add_lib_path, exe, libdir};
use crate::{Build, DocTests, Mode, GitRepo};
pub use Compiler;
pub use crate::Compiler;
use petgraph::graph::NodeIndex;
use petgraph::Graph;
@ -389,7 +389,6 @@ impl<'a> Builder<'a> {
test::UiFullDeps,
test::RunPassFullDeps,
test::RunFailFullDeps,
test::CompileFailFullDeps,
test::Rustdoc,
test::Pretty,
test::RunPassPretty,
@ -417,6 +416,7 @@ impl<'a> Builder<'a> {
test::Rustfmt,
test::Miri,
test::Clippy,
test::CompiletestTest,
test::RustdocJS,
test::RustdocTheme,
// Run bootstrap close to the end as it's unlikely to fail
@ -685,6 +685,11 @@ impl<'a> Builder<'a> {
.env("RUSTDOC_REAL", self.rustdoc(host))
.env("RUSTDOC_CRATE_VERSION", self.rust_version())
.env("RUSTC_BOOTSTRAP", "1");
// Remove make-related flags that can cause jobserver problems.
cmd.env_remove("MAKEFLAGS");
cmd.env_remove("MFLAGS");
if let Some(linker) = self.linker(host) {
cmd.env("RUSTC_TARGET_LINKER", linker);
}
@ -793,7 +798,7 @@ impl<'a> Builder<'a> {
}
// Set a flag for `check` so that certain build scripts can do less work
// (e.g. not building/requiring LLVM).
// (e.g., not building/requiring LLVM).
if cmd == "check" {
cargo.env("RUST_CHECK", "1");
}
@ -923,12 +928,12 @@ impl<'a> Builder<'a> {
cargo.env("RUSTC_FORCE_UNSTABLE", "1");
// Currently the compiler depends on crates from crates.io, and
// then other crates can depend on the compiler (e.g. proc-macro
// then other crates can depend on the compiler (e.g., proc-macro
// crates). Let's say, for example that rustc itself depends on the
// bitflags crate. If an external crate then depends on the
// bitflags crate as well, we need to make sure they don't
// conflict, even if they pick the same version of bitflags. We'll
// want to make sure that e.g. a plugin and rustc each get their
// want to make sure that e.g., a plugin and rustc each get their
// own copy of bitflags.
// Cargo ensures that this works in general through the -C metadata
@ -1247,7 +1252,7 @@ impl<'a> Builder<'a> {
#[cfg(test)]
mod __test {
use super::*;
use config::Config;
use crate::config::Config;
use std::thread;
fn configure(host: &[&str], target: &[&str]) -> Config {

View file

@ -23,7 +23,7 @@ use std::path::{Path, PathBuf};
use std::sync::Mutex;
use std::cmp::{PartialOrd, Ord, Ordering};
use builder::Step;
use crate::builder::Step;
pub struct Interned<T>(usize, PhantomData<*const T>);

View file

@ -39,9 +39,9 @@ use std::process::Command;
use build_helper::output;
use cc;
use {Build, GitRepo};
use config::Target;
use cache::Interned;
use crate::{Build, GitRepo};
use crate::config::Target;
use crate::cache::Interned;
// The `cc` crate doesn't provide a way to obtain a path to the detected archiver,
// so use some simplified logic here. First we respect the environment variable `AR`, then
@ -143,7 +143,10 @@ fn set_compiler(cfg: &mut cc::Build,
// compiler already takes into account the triple in question.
t if t.contains("android") => {
if let Some(ndk) = config.and_then(|c| c.ndk.as_ref()) {
let target = target.replace("armv7", "arm");
let target = target.replace("armv7neon", "arm")
.replace("armv7", "arm")
.replace("thumbv7neon", "arm")
.replace("thumbv7", "arm");
let compiler = format!("{}-{}", target, compiler.clang());
cfg.compiler(ndk.join("bin").join(compiler));
}

View file

@ -20,11 +20,11 @@ use std::process::Command;
use build_helper::output;
use Build;
use config::Config;
use crate::Build;
use crate::config::Config;
// The version number
pub const CFG_RELEASE_NUM: &str = "1.32.0";
pub const CFG_RELEASE_NUM: &str = "1.33.0";
pub struct GitInfo {
inner: Option<Info>,

View file

@ -10,11 +10,12 @@
//! Implementation of compiling the compiler and standard library, in "check" mode.
use compile::{run_cargo, std_cargo, test_cargo, rustc_cargo, rustc_cargo_env, add_to_sysroot};
use builder::{RunConfig, Builder, ShouldRun, Step};
use tool::{prepare_tool_cargo, SourceType};
use {Compiler, Mode};
use cache::{INTERNER, Interned};
use crate::compile::{run_cargo, std_cargo, test_cargo, rustc_cargo, rustc_cargo_env,
add_to_sysroot};
use crate::builder::{RunConfig, Builder, ShouldRun, Step};
use crate::tool::{prepare_tool_cargo, SourceType};
use crate::{Compiler, Mode};
use crate::cache::{INTERNER, Interned};
use std::path::PathBuf;
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]

View file

@ -19,7 +19,7 @@ use std::fs;
use std::io::{self, ErrorKind};
use std::path::Path;
use Build;
use crate::Build;
pub fn clean(build: &Build, all: bool) {
rm_rf("tmp".as_ref());

View file

@ -18,7 +18,7 @@
use std::borrow::Cow;
use std::env;
use std::fs::{self, File};
use std::fs;
use std::io::BufReader;
use std::io::prelude::*;
use std::path::{Path, PathBuf};
@ -29,12 +29,12 @@ use build_helper::{output, mtime, up_to_date};
use filetime::FileTime;
use serde_json;
use util::{exe, libdir, is_dylib};
use {Compiler, Mode, GitRepo};
use native;
use crate::util::{exe, libdir, is_dylib};
use crate::{Compiler, Mode, GitRepo};
use crate::native;
use cache::{INTERNER, Interned};
use builder::{Step, RunConfig, ShouldRun, Builder};
use crate::cache::{INTERNER, Interned};
use crate::builder::{Step, RunConfig, ShouldRun, Builder};
#[derive(Debug, PartialOrd, Ord, Copy, Clone, PartialEq, Eq, Hash)]
pub struct Std {
@ -152,11 +152,12 @@ pub fn std_cargo(builder: &Builder,
if builder.no_std(target) == Some(true) {
// for no-std targets we only compile a few no_std crates
cargo.arg("--features").arg("c mem")
cargo
.args(&["-p", "alloc"])
.args(&["-p", "compiler_builtins"])
.arg("--manifest-path")
.arg(builder.src.join("src/rustc/compiler_builtins_shim/Cargo.toml"));
.arg(builder.src.join("src/liballoc/Cargo.toml"))
.arg("--features")
.arg("compiler-builtins-mem");
} else {
let features = builder.std_features();
@ -707,7 +708,7 @@ impl Step for CodegenBackend {
}
let stamp = codegen_backend_stamp(builder, compiler, target, backend);
let codegen_backend = codegen_backend.to_str().unwrap();
t!(t!(File::create(&stamp)).write_all(codegen_backend.as_bytes()));
t!(fs::write(&stamp, &codegen_backend));
}
}
@ -796,8 +797,7 @@ fn copy_codegen_backends_to_sysroot(builder: &Builder,
for backend in builder.config.rust_codegen_backends.iter() {
let stamp = codegen_backend_stamp(builder, compiler, target, *backend);
let mut dylib = String::new();
t!(t!(File::open(&stamp)).read_to_string(&mut dylib));
let dylib = t!(fs::read_to_string(&stamp));
let file = Path::new(&dylib);
let filename = file.file_name().unwrap().to_str().unwrap();
// change `librustc_codegen_llvm-xxxxxx.so` to `librustc_codegen_llvm-llvm.so`
@ -1137,10 +1137,7 @@ pub fn run_cargo(builder: &Builder,
// contents (the list of files to copy) is different or if any dep's mtime
// is newer then we rewrite the stamp file.
deps.sort();
let mut stamp_contents = Vec::new();
if let Ok(mut f) = File::open(stamp) {
t!(f.read_to_end(&mut stamp_contents));
}
let stamp_contents = fs::read(stamp);
let stamp_mtime = mtime(&stamp);
let mut new_contents = Vec::new();
let mut max = None;
@ -1156,7 +1153,10 @@ pub fn run_cargo(builder: &Builder,
}
let max = max.unwrap();
let max_path = max_path.unwrap();
if stamp_contents == new_contents && max <= stamp_mtime {
let contents_equal = stamp_contents
.map(|contents| contents == new_contents)
.unwrap_or_default();
if contents_equal && max <= stamp_mtime {
builder.verbose(&format!("not updating {:?}; contents equal and {:?} <= {:?}",
stamp, max, stamp_mtime));
return deps
@ -1166,7 +1166,7 @@ pub fn run_cargo(builder: &Builder,
} else {
builder.verbose(&format!("updating {:?} as deps changed", stamp));
}
t!(t!(File::create(stamp)).write_all(&new_contents));
t!(fs::write(&stamp, &new_contents));
deps
}

View file

@ -15,17 +15,16 @@
use std::collections::{HashMap, HashSet};
use std::env;
use std::fs::{self, File};
use std::io::prelude::*;
use std::fs;
use std::path::{Path, PathBuf};
use std::process;
use std::cmp;
use num_cpus;
use toml;
use cache::{INTERNER, Interned};
use flags::Flags;
pub use flags::Subcommand;
use crate::cache::{INTERNER, Interned};
use crate::flags::Flags;
pub use crate::flags::Subcommand;
/// Global configuration for the entire build and/or bootstrap.
///
@ -416,9 +415,7 @@ impl Config {
config.run_host_only = !(flags.host.is_empty() && !flags.target.is_empty());
let toml = file.map(|file| {
let mut f = t!(File::open(&file));
let mut contents = String::new();
t!(f.read_to_string(&mut contents));
let contents = t!(fs::read_to_string(&file));
match toml::from_str(&contents) {
Ok(table) => table,
Err(err) => {

View file

@ -107,6 +107,8 @@ v("arm-linux-androideabi-ndk", "target.arm-linux-androideabi.android-ndk",
"arm-linux-androideabi NDK standalone path")
v("armv7-linux-androideabi-ndk", "target.armv7-linux-androideabi.android-ndk",
"armv7-linux-androideabi NDK standalone path")
v("thumbv7neon-linux-androideabi-ndk", "target.thumbv7neon-linux-androideabi.android-ndk",
"thumbv7neon-linux-androideabi NDK standalone path")
v("aarch64-linux-android-ndk", "target.aarch64-linux-android.android-ndk",
"aarch64-linux-android NDK standalone path")
v("x86_64-linux-android-ndk", "target.x86_64-linux-android.android-ndk",

View file

@ -19,20 +19,20 @@
//! pieces of `rustup.rs`!
use std::env;
use std::fs::{self, File};
use std::io::{Read, Write};
use std::fs;
use std::io::Write;
use std::path::{PathBuf, Path};
use std::process::{Command, Stdio};
use build_helper::output;
use {Compiler, Mode, LLVM_TOOLS};
use channel;
use util::{libdir, is_dylib, exe};
use builder::{Builder, RunConfig, ShouldRun, Step};
use compile;
use tool::{self, Tool};
use cache::{INTERNER, Interned};
use crate::{Compiler, Mode, LLVM_TOOLS};
use crate::channel;
use crate::util::{libdir, is_dylib, exe};
use crate::builder::{Builder, RunConfig, ShouldRun, Step};
use crate::compile;
use crate::tool::{self, Tool};
use crate::cache::{INTERNER, Interned};
use time;
pub fn pkgname(builder: &Builder, component: &str) -> String {
@ -353,7 +353,7 @@ impl Step for Mingw {
/// Build the `rust-mingw` installer component.
///
/// This contains all the bits and pieces to run the MinGW Windows targets
/// without any extra installed software (e.g. we bundle gcc, libraries, etc).
/// without any extra installed software (e.g., we bundle gcc, libraries, etc).
fn run(self, builder: &Builder) -> Option<PathBuf> {
let host = self.host;
@ -857,12 +857,9 @@ impl Step for Src {
// (essentially libstd and all of its path dependencies)
let std_src_dirs = [
"src/build_helper",
"src/dlmalloc",
"src/liballoc",
"src/libbacktrace",
"src/libcompiler_builtins",
"src/libcore",
"src/liblibc",
"src/libpanic_abort",
"src/libpanic_unwind",
"src/librustc_asan",
@ -871,20 +868,15 @@ impl Step for Src {
"src/librustc_tsan",
"src/libstd",
"src/libunwind",
"src/rustc/compiler_builtins_shim",
"src/rustc/libc_shim",
"src/rustc/dlmalloc_shim",
"src/libtest",
"src/libterm",
"src/libprofiler_builtins",
"src/stdsimd",
"src/libproc_macro",
];
let std_src_dirs_exclude = [
"src/libcompiler_builtins/compiler-rt/test",
"src/tools/rustc-std-workspace-core",
];
copy_src_dirs(builder, &std_src_dirs[..], &std_src_dirs_exclude[..], &dst_src);
copy_src_dirs(builder, &std_src_dirs[..], &[], &dst_src);
for file in src_files.iter() {
builder.copy(&builder.src.join(file), &dst_src.join(file));
}
@ -908,7 +900,7 @@ impl Step for Src {
}
}
const CARGO_VENDOR_VERSION: &str = "0.1.19";
const CARGO_VENDOR_VERSION: &str = "0.1.22";
#[derive(Debug, PartialOrd, Ord, Copy, Clone, Hash, PartialEq, Eq)]
pub struct PlainSourceTarball;
@ -1510,8 +1502,7 @@ impl Step for Extended {
}
let xform = |p: &Path| {
let mut contents = String::new();
t!(t!(File::open(p)).read_to_string(&mut contents));
let mut contents = t!(fs::read_to_string(p));
if rls_installer.is_none() {
contents = filter(&contents, "rls");
}
@ -1522,8 +1513,8 @@ impl Step for Extended {
contents = filter(&contents, "rustfmt");
}
let ret = tmp.join(p.file_name().unwrap());
t!(t!(File::create(&ret)).write_all(contents.as_bytes()));
return ret
t!(fs::write(&ret, &contents));
ret
};
if target.contains("apple-darwin") {
@ -1868,8 +1859,7 @@ impl Step for HashSign {
let file = builder.config.dist_gpg_password_file.as_ref().unwrap_or_else(|| {
panic!("\n\nfailed to specify `dist.gpg-password-file` in `config.toml`\n\n")
});
let mut pass = String::new();
t!(t!(File::open(&file)).read_to_string(&mut pass));
let pass = t!(fs::read_to_string(&file));
let today = output(Command::new("date").arg("+%Y-%m-%d"));

View file

@ -18,20 +18,19 @@
//! `rustdoc`.
use std::collections::HashSet;
use std::fs::{self, File};
use std::io::prelude::*;
use std::fs;
use std::io;
use std::path::{PathBuf, Path};
use Mode;
use crate::Mode;
use build_helper::up_to_date;
use util::symlink_dir;
use builder::{Builder, Compiler, RunConfig, ShouldRun, Step};
use tool::{self, prepare_tool_cargo, Tool, SourceType};
use compile;
use cache::{INTERNER, Interned};
use config::Config;
use crate::util::symlink_dir;
use crate::builder::{Builder, Compiler, RunConfig, ShouldRun, Step};
use crate::tool::{self, prepare_tool_cargo, Tool, SourceType};
use crate::compile;
use crate::cache::{INTERNER, Interned};
use crate::config::Config;
macro_rules! book {
($($name:ident, $path:expr, $book_name:expr;)+) => {
@ -379,12 +378,11 @@ impl Step for Standalone {
let version_info = out.join("version_info.html");
if !builder.config.dry_run && !up_to_date(&version_input, &version_info) {
let mut info = String::new();
t!(t!(File::open(&version_input)).read_to_string(&mut info));
let info = info.replace("VERSION", &builder.rust_release())
.replace("SHORT_HASH", builder.rust_info.sha_short().unwrap_or(""))
.replace("STAMP", builder.rust_info.sha().unwrap_or(""));
t!(t!(File::create(&version_info)).write_all(info.as_bytes()));
let info = t!(fs::read_to_string(&version_input))
.replace("VERSION", &builder.rust_release())
.replace("SHORT_HASH", builder.rust_info.sha_short().unwrap_or(""))
.replace("STAMP", builder.rust_info.sha().unwrap_or(""));
t!(fs::write(&version_info, &info));
}
for file in t!(fs::read_dir(builder.src.join("src/doc"))) {
@ -697,9 +695,6 @@ impl Step for Rustc {
return;
}
// Build libstd docs so that we generate relative links.
builder.ensure(Std { stage, target });
// Build rustc.
builder.ensure(compile::Rustc { compiler, target });
@ -718,12 +713,16 @@ impl Step for Rustc {
// Find dependencies for top level crates.
let mut compiler_crates = HashSet::new();
for root_crate in &["rustc", "rustc_driver", "rustc_codegen_llvm"] {
for root_crate in &["rustc_driver", "rustc_codegen_llvm", "rustc_codegen_ssa"] {
let interned_root_crate = INTERNER.intern_str(root_crate);
find_compiler_crates(builder, &interned_root_crate, &mut compiler_crates);
}
for krate in &compiler_crates {
// Create all crate output directories first to make sure rustdoc uses
// relative links.
// FIXME: Cargo should probably do this itself.
t!(fs::create_dir_all(out_dir.join(krate)));
cargo.arg("-p").arg(krate);
}
@ -797,8 +796,8 @@ impl Step for Rustdoc {
return;
}
// Build libstd docs so that we generate relative links.
builder.ensure(Std { stage, target });
// Build rustc docs so that we generate relative links.
builder.ensure(Rustc { stage, target });
// Build rustdoc.
builder.ensure(tool::Rustdoc { host: compiler.host });
@ -822,6 +821,10 @@ impl Step for Rustdoc {
&[]
);
// Only include compiler crates, no dependencies of those, such as `libc`.
cargo.arg("--no-deps");
cargo.arg("-p").arg("rustdoc");
cargo.env("RUSTDOCFLAGS", "--document-private-items");
builder.run(&mut cargo);
}

View file

@ -19,12 +19,12 @@ use std::process;
use getopts::Options;
use builder::Builder;
use config::Config;
use metadata;
use {Build, DocTests};
use crate::builder::Builder;
use crate::config::Config;
use crate::metadata;
use crate::{Build, DocTests};
use cache::{Interned, INTERNER};
use crate::cache::{Interned, INTERNER};
/// Deserialized version of all flags for this compile.
pub struct Flags {
@ -121,11 +121,11 @@ To learn more about a subcommand, run `./x.py <subcommand> -h`"
opts.optopt("", "on-fail", "command to run on failure", "CMD");
opts.optflag("", "dry-run", "dry run; don't build anything");
opts.optopt("", "stage",
"stage to build (indicates compiler to use/test, e.g. stage 0 uses the \
"stage to build (indicates compiler to use/test, e.g., stage 0 uses the \
bootstrap compiler, stage 1 the stage 0 rustc artifacts, etc.)",
"N");
opts.optmulti("", "keep-stage", "stage(s) to keep without recompiling \
(pass multiple times to keep e.g. both stages 0 and 1)", "N");
(pass multiple times to keep e.g., both stages 0 and 1)", "N");
opts.optopt("", "src", "path to the root of the rust checkout", "DIR");
opts.optopt("j", "jobs", "number of jobs to run in parallel", "JOBS");
opts.optflag("h", "help", "print this help message");

View file

@ -18,11 +18,11 @@ use std::fs;
use std::path::{Path, PathBuf, Component};
use std::process::Command;
use dist::{self, pkgname, sanitize_sh, tmpdir};
use crate::dist::{self, pkgname, sanitize_sh, tmpdir};
use builder::{Builder, RunConfig, ShouldRun, Step};
use cache::Interned;
use config::Config;
use crate::builder::{Builder, RunConfig, ShouldRun, Step};
use crate::cache::Interned;
use crate::config::Config;
pub fn install_docs(builder: &Builder, stage: u32, host: Interned<String>) {
install_sh(builder, "docs", "rust-docs", stage, Some(host));

View file

@ -10,7 +10,7 @@
//! Job management on Windows for bootstrapping
//!
//! Most of the time when you're running a build system (e.g. make) you expect
//! Most of the time when you're running a build system (e.g., make) you expect
//! Ctrl-C or abnormal termination to actually terminate the entire tree of
//! process in play, not just the one at the top. This currently works "by
//! default" on Unix platforms because Ctrl-C actually sends a signal to the
@ -42,7 +42,7 @@
use std::env;
use std::io;
use std::mem;
use Build;
use crate::Build;
type HANDLE = *mut u8;
type BOOL = i32;
@ -162,11 +162,11 @@ pub unsafe fn setup(build: &mut Build) {
return
}
// If we've got a parent process (e.g. the python script that called us)
// If we've got a parent process (e.g., the python script that called us)
// then move ownership of this job object up to them. That way if the python
// script is killed (e.g. via ctrl-c) then we'll all be torn down.
// script is killed (e.g., via ctrl-c) then we'll all be torn down.
//
// If we don't have a parent (e.g. this was run directly) then we
// If we don't have a parent (e.g., this was run directly) then we
// intentionally leak the job object handle. When our process exits
// (normally or abnormally) it will close the handle implicitly, causing all
// processes in the job to be cleaned up.
@ -184,7 +184,7 @@ pub unsafe fn setup(build: &mut Build) {
// If this failed, well at least we tried! An example of DuplicateHandle
// failing in the past has been when the wrong python2 package spawned this
// build system (e.g. the `python2` package in MSYS instead of
// build system (e.g., the `python2` package in MSYS instead of
// `mingw-w64-x86_64-python2`. Not sure why it failed, but the "failure
// mode" here is that we only clean everything up when the build system
// dies, not when the python parent does, so not too bad.

View file

@ -38,7 +38,7 @@
//! However, compiletest itself tries to avoid running tests when the artifacts
//! that are involved (mainly the compiler) haven't changed.
//!
//! When you execute `x.py build`, the steps which are executed are:
//! When you execute `x.py build`, the steps executed are:
//!
//! * First, the python script is run. This will automatically download the
//! stage0 rustc and cargo according to `src/stage0.txt`, or use the cached
@ -159,7 +159,7 @@ use std::os::windows::fs::symlink_file;
use build_helper::{run_silent, run_suppressed, try_run_silent, try_run_suppressed, output, mtime};
use filetime::FileTime;
use util::{exe, libdir, OutputFolder, CiEnv};
use crate::util::{exe, libdir, OutputFolder, CiEnv};
mod cc_detect;
mod channel;
@ -188,7 +188,7 @@ mod job;
mod job {
use libc;
pub unsafe fn setup(build: &mut ::Build) {
pub unsafe fn setup(build: &mut crate::Build) {
if build.config.low_priority {
libc::setpriority(libc::PRIO_PGRP as _, 0, 10);
}
@ -197,14 +197,14 @@ mod job {
#[cfg(any(target_os = "haiku", not(any(unix, windows))))]
mod job {
pub unsafe fn setup(_build: &mut ::Build) {
pub unsafe fn setup(_build: &mut crate::Build) {
}
}
pub use config::Config;
use flags::Subcommand;
use cache::{Interned, INTERNER};
use toolstate::ToolState;
pub use crate::config::Config;
use crate::flags::Subcommand;
use crate::cache::{Interned, INTERNER};
use crate::toolstate::ToolState;
const LLVM_TOOLS: &[&str] = &[
"llvm-nm", // used to inspect binaries; it shows symbol names, their sizes and visibility
@ -1067,9 +1067,8 @@ impl Build {
/// Returns the `a.b.c` version that the given package is at.
fn release_num(&self, package: &str) -> String {
let mut toml = String::new();
let toml_file_name = self.src.join(&format!("src/tools/{}/Cargo.toml", package));
t!(t!(File::open(toml_file_name)).read_to_string(&mut toml));
let toml = t!(fs::read_to_string(&toml_file_name));
for line in toml.lines() {
let prefix = "version = \"";
let suffix = "\"";
@ -1135,10 +1134,10 @@ impl Build {
let krate = &self.crates[&krate];
if krate.is_local(self) {
ret.push(krate);
for dep in &krate.deps {
if visited.insert(dep) && dep != "build_helper" {
list.push(*dep);
}
}
for dep in &krate.deps {
if visited.insert(dep) && dep != "build_helper" {
list.push(*dep);
}
}
}
@ -1151,8 +1150,7 @@ impl Build {
}
let mut paths = Vec::new();
let mut contents = Vec::new();
t!(t!(File::open(stamp)).read_to_end(&mut contents));
let contents = t!(fs::read(stamp));
// This is the method we use for extracting paths from the stamp file passed to us. See
// run_cargo for more information (in compile.rs).
for part in contents.split(|b| *b == 0) {

View file

@ -16,8 +16,8 @@ use std::collections::HashSet;
use build_helper::output;
use serde_json;
use {Build, Crate};
use cache::INTERNER;
use crate::{Build, Crate};
use crate::cache::INTERNER;
#[derive(Deserialize)]
struct Output {

View file

@ -21,7 +21,6 @@
use std::env;
use std::ffi::OsString;
use std::fs::{self, File};
use std::io::{Read, Write};
use std::path::{Path, PathBuf};
use std::process::Command;
@ -29,11 +28,11 @@ use build_helper::output;
use cmake;
use cc;
use util::{self, exe};
use crate::util::{self, exe};
use build_helper::up_to_date;
use builder::{Builder, RunConfig, ShouldRun, Step};
use cache::Interned;
use GitRepo;
use crate::builder::{Builder, RunConfig, ShouldRun, Step};
use crate::cache::Interned;
use crate::GitRepo;
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
pub struct Llvm {
@ -75,8 +74,7 @@ impl Step for Llvm {
}
let rebuild_trigger = builder.src.join("src/rustllvm/llvm-rebuild-trigger");
let mut rebuild_trigger_contents = String::new();
t!(t!(File::open(&rebuild_trigger)).read_to_string(&mut rebuild_trigger_contents));
let rebuild_trigger_contents = t!(fs::read_to_string(&rebuild_trigger));
let (out_dir, llvm_config_ret_dir) = if emscripten {
let dir = builder.emscripten_llvm_out(target);
@ -93,8 +91,7 @@ impl Step for Llvm {
let build_llvm_config = llvm_config_ret_dir
.join(exe("llvm-config", &*builder.config.build));
if done_stamp.exists() {
let mut done_contents = String::new();
t!(t!(File::open(&done_stamp)).read_to_string(&mut done_contents));
let done_contents = t!(fs::read_to_string(&done_stamp));
// If LLVM was already built previously and contents of the rebuild-trigger file
// didn't change from the previous build, then no action is required.
@ -251,7 +248,7 @@ impl Step for Llvm {
configure_cmake(builder, target, &mut cfg, false);
// FIXME: we don't actually need to build all LLVM tools and all LLVM
// libraries here, e.g. we just want a few components and a few
// libraries here, e.g., we just want a few components and a few
// tools. Figure out how to filter them down and only build the right
// tools and libs on all platforms.
@ -261,7 +258,7 @@ impl Step for Llvm {
cfg.build();
t!(t!(File::create(&done_stamp)).write_all(rebuild_trigger_contents.as_bytes()));
t!(fs::write(&done_stamp, &rebuild_trigger_contents));
build_llvm_config
}
@ -281,11 +278,11 @@ fn check_llvm_version(builder: &Builder, llvm_config: &Path) {
let mut parts = version.split('.').take(2)
.filter_map(|s| s.parse::<u32>().ok());
if let (Some(major), Some(_minor)) = (parts.next(), parts.next()) {
if major >= 5 {
if major >= 6 {
return
}
}
panic!("\n\nbad LLVM version: {}, need >=5.0\n\n", version)
panic!("\n\nbad LLVM version: {}, need >=6.0\n\n", version)
}
fn configure_cmake(builder: &Builder,

View file

@ -21,14 +21,13 @@
use std::collections::HashMap;
use std::env;
use std::ffi::{OsString, OsStr};
use std::fs::{self, File};
use std::io::Read;
use std::fs;
use std::path::PathBuf;
use std::process::Command;
use build_helper::output;
use Build;
use crate::Build;
struct Finder {
cache: HashMap<OsString, Option<PathBuf>>,
@ -235,9 +234,7 @@ $ pacman -R cmake && pacman -S mingw-w64-x86_64-cmake
}
if build.config.channel == "stable" {
let mut stage0 = String::new();
t!(t!(File::open(build.src.join("src/stage0.txt")))
.read_to_string(&mut stage0));
let stage0 = t!(fs::read_to_string(build.src.join("src/stage0.txt")));
if stage0.contains("\ndev:") {
panic!("bootstrapping from a dev compiler in a stable release, but \
should only be bootstrapping from a released compiler!");

View file

@ -16,25 +16,24 @@
use std::env;
use std::ffi::OsString;
use std::fmt;
use std::fs::{self, File};
use std::io::Read;
use std::fs;
use std::iter;
use std::path::{Path, PathBuf};
use std::process::Command;
use build_helper::{self, output};
use builder::{Builder, Compiler, Kind, RunConfig, ShouldRun, Step};
use cache::{Interned, INTERNER};
use compile;
use dist;
use flags::Subcommand;
use native;
use tool::{self, Tool, SourceType};
use toolstate::ToolState;
use util::{self, dylib_path, dylib_path_var};
use Crate as CargoCrate;
use {DocTests, Mode, GitRepo};
use crate::builder::{Builder, Compiler, Kind, RunConfig, ShouldRun, Step};
use crate::cache::{Interned, INTERNER};
use crate::compile;
use crate::dist;
use crate::flags::Subcommand;
use crate::native;
use crate::tool::{self, Tool, SourceType};
use crate::toolstate::ToolState;
use crate::util::{self, dylib_path, dylib_path_var};
use crate::Crate as CargoCrate;
use crate::{DocTests, Mode, GitRepo};
const ADB_TEST_DIR: &str = "/data/tmp/work";
@ -430,6 +429,45 @@ impl Step for Miri {
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct CompiletestTest {
stage: u32,
host: Interned<String>,
}
impl Step for CompiletestTest {
type Output = ();
fn should_run(run: ShouldRun) -> ShouldRun {
run.path("src/tools/compiletest")
}
fn make_run(run: RunConfig) {
run.builder.ensure(CompiletestTest {
stage: run.builder.top_stage,
host: run.target,
});
}
/// Runs `cargo test` for compiletest.
fn run(self, builder: &Builder) {
let stage = self.stage;
let host = self.host;
let compiler = builder.compiler(stage, host);
let mut cargo = tool::prepare_tool_cargo(builder,
compiler,
Mode::ToolBootstrap,
host,
"test",
"src/tools/compiletest",
SourceType::InTree,
&[]);
try_run(builder, &mut cargo);
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct Clippy {
stage: u32,
@ -578,7 +616,7 @@ impl Step for RustdocJS {
if let Some(ref nodejs) = builder.config.nodejs {
let mut command = Command::new(nodejs);
command.args(&["src/tools/rustdoc-js/tester.js", &*self.host]);
builder.ensure(::doc::Std {
builder.ensure(crate::doc::Std {
target: self.target,
stage: builder.top_stage,
});
@ -833,12 +871,6 @@ host_test!(RunFailFullDeps {
suite: "run-fail-fulldeps"
});
host_test!(CompileFailFullDeps {
path: "src/test/compile-fail-fulldeps",
mode: "compile-fail",
suite: "compile-fail-fulldeps"
});
host_test!(Rustdoc {
path: "src/test/rustdoc",
mode: "rustdoc",
@ -971,7 +1003,7 @@ impl Step for Compiletest {
}
if builder.no_std(target) == Some(true) {
// for no_std run-make (e.g. thumb*),
// for no_std run-make (e.g., thumb*),
// we need a host compiler which is called by cargo.
builder.ensure(compile::Std { compiler, target: compiler.host });
}
@ -1277,7 +1309,7 @@ impl Step for DocTest {
/// Run `rustdoc --test` for all documentation in `src/doc`.
///
/// This will run all tests in our markdown documentation (e.g. the book)
/// This will run all tests in our markdown documentation (e.g., the book)
/// located in `src/doc`. The `rustdoc` that's run is the one that sits next to
/// `compiler`.
fn run(self, builder: &Builder) {
@ -1427,10 +1459,8 @@ impl Step for ErrorIndex {
}
fn markdown_test(builder: &Builder, compiler: Compiler, markdown: &Path) -> bool {
match File::open(markdown) {
Ok(mut file) => {
let mut contents = String::new();
t!(file.read_to_string(&mut contents));
match fs::read_to_string(markdown) {
Ok(contents) => {
if !contents.contains("```") {
return true;
}
@ -1567,10 +1597,7 @@ impl Step for Crate {
let builder = run.builder;
run = run.krate("test");
for krate in run.builder.in_tree_crates("std") {
if krate.is_local(&run.builder)
&& !(krate.name.starts_with("rustc_") && krate.name.ends_with("san"))
&& krate.name != "dlmalloc"
{
if !(krate.name.starts_with("rustc_") && krate.name.ends_with("san")) {
run = run.path(krate.local_path(&builder).to_str().unwrap());
}
}

View file

@ -15,16 +15,16 @@ use std::path::PathBuf;
use std::process::{Command, exit};
use std::collections::HashSet;
use Mode;
use Compiler;
use builder::{Step, RunConfig, ShouldRun, Builder};
use util::{exe, add_lib_path};
use compile;
use native;
use channel::GitInfo;
use channel;
use cache::Interned;
use toolstate::ToolState;
use crate::Mode;
use crate::Compiler;
use crate::builder::{Step, RunConfig, ShouldRun, Builder};
use crate::util::{exe, add_lib_path};
use crate::compile;
use crate::native;
use crate::channel::GitInfo;
use crate::channel;
use crate::cache::Interned;
use crate::toolstate::ToolState;
#[derive(Debug, Clone, Hash, PartialEq, Eq)]
pub enum SourceType {
@ -646,7 +646,7 @@ impl<'a> Builder<'a> {
self.cargo_out(compiler, tool.get_mode(), *host).join("deps"),
];
// On MSVC a tool may invoke a C compiler (e.g. compiletest in run-make
// On MSVC a tool may invoke a C compiler (e.g., compiletest in run-make
// mode) and that C compiler may need some extra PATH modification. Do
// so here.
if compiler.host.contains("msvc") {

View file

@ -21,8 +21,8 @@ use std::path::{Path, PathBuf};
use std::process::Command;
use std::time::{SystemTime, Instant};
use config::Config;
use builder::Builder;
use crate::config::Config;
use crate::builder::Builder;
/// Returns the `name` as the filename of a static library for `target`.
pub fn staticlib(name: &str, target: &str) -> String {

View file

@ -224,14 +224,12 @@ impl Drop for NativeLibBoilerplate {
// Timestamps are created automatically when the result of `native_lib_boilerplate` goes out
// of scope, so all the build actions should be completed until then.
pub fn native_lib_boilerplate(
src_name: &str,
src_dir: &Path,
out_name: &str,
link_name: &str,
search_subdir: &str,
) -> Result<NativeLibBoilerplate, ()> {
let current_dir = PathBuf::from(env::var("CARGO_MANIFEST_DIR").unwrap());
let src_dir = current_dir.join("..").join(src_name);
rerun_if_changed_anything_in_dir(&src_dir);
rerun_if_changed_anything_in_dir(src_dir);
let out_dir = env::var_os("RUSTBUILD_NATIVE_DIR").unwrap_or_else(||
env::var_os("OUT_DIR").unwrap());
@ -248,9 +246,9 @@ pub fn native_lib_boilerplate(
);
let timestamp = out_dir.join("rustbuild.timestamp");
if !up_to_date(Path::new("build.rs"), &timestamp) || !up_to_date(&src_dir, &timestamp) {
if !up_to_date(Path::new("build.rs"), &timestamp) || !up_to_date(src_dir, &timestamp) {
Ok(NativeLibBoilerplate {
src_dir: src_dir,
src_dir: src_dir.to_path_buf(),
out_dir: out_dir,
})
} else {
@ -279,8 +277,11 @@ pub fn sanitizer_lib_boilerplate(sanitizer_name: &str)
} else {
format!("static={}", link_name)
};
// The source for `compiler-rt` comes from the `compiler-builtins` crate, so
// load our env var set by cargo to find the source code.
let dir = env::var_os("DEP_COMPILER_RT_COMPILER_RT").unwrap();
let lib = native_lib_boilerplate(
"libcompiler_builtins/compiler-rt",
dir.as_ref(),
sanitizer_name,
&to_link,
search_path,

View file

@ -16,6 +16,7 @@ RUN . /scripts/android-ndk.sh && \
# env
ENV TARGETS=arm-linux-androideabi
ENV TARGETS=$TARGETS,armv7-linux-androideabi
ENV TARGETS=$TARGETS,thumbv7neon-linux-androideabi
ENV TARGETS=$TARGETS,i686-linux-android
ENV TARGETS=$TARGETS,aarch64-linux-android
ENV TARGETS=$TARGETS,x86_64-linux-android
@ -24,6 +25,7 @@ ENV RUST_CONFIGURE_ARGS \
--enable-extended \
--arm-linux-androideabi-ndk=/android/ndk/arm-14 \
--armv7-linux-androideabi-ndk=/android/ndk/arm-14 \
--thumbv7neon-linux-androideabi-ndk=/android/ndk/arm-14 \
--i686-linux-android-ndk=/android/ndk/x86-14 \
--aarch64-linux-android-ndk=/android/ndk/arm64-21 \
--x86_64-linux-android-ndk=/android/ndk/x86_64-21 \

View file

@ -52,8 +52,8 @@ RUN env \
CXX=arm-linux-gnueabi-g++ CXXFLAGS="-march=armv6 -marm" \
bash musl.sh arm && \
env \
CC=arm-linux-gnueabihf-gcc CFLAGS="-march=armv6 -marm" \
CXX=arm-linux-gnueabihf-g++ CXXFLAGS="-march=armv6 -marm" \
CC=arm-linux-gnueabihf-gcc CFLAGS="-march=armv6 -marm -mfpu=vfp" \
CXX=arm-linux-gnueabihf-g++ CXXFLAGS="-march=armv6 -marm -mfpu=vfp" \
bash musl.sh armhf && \
env \
CC=arm-linux-gnueabihf-gcc CFLAGS="-march=armv7-a" \

View file

@ -13,31 +13,28 @@ set -ex
source shared.sh
LLVM=7.0.0
# Currently these commits are all tip-of-tree as of 2018-12-16, used to pick up
# a fix for rust-lang/rust#56849
LLVM=032b00a5404865765cda7db3039f39d54964d8b0
LLD=3e4aa4e8671523321af51449e0569f455ef3ad43
CLANG=a6b9739069763243020f4ea6fe586bc135fde1f9
mkdir clang
cd clang
curl https://releases.llvm.org/$LLVM/llvm-$LLVM.src.tar.xz | \
xz -d | \
tar xf -
cd llvm-$LLVM.src
curl -L https://github.com/llvm-mirror/llvm/archive/$LLVM.tar.gz | \
tar xzf - --strip-components=1
mkdir -p tools/clang
curl https://releases.llvm.org/$LLVM/cfe-$LLVM.src.tar.xz | \
xz -d | \
tar xf - -C tools/clang --strip-components=1
curl -L https://github.com/llvm-mirror/clang/archive/$CLANG.tar.gz | \
tar xzf - --strip-components=1 -C tools/clang
mkdir -p tools/lld
curl -L https://github.com/llvm-mirror/lld/archive/$LLD.tar.gz | \
tar zxf - --strip-components=1 -C tools/lld
curl https://releases.llvm.org/$LLVM/lld-$LLVM.src.tar.xz | \
xz -d | \
tar xf - -C tools/lld --strip-components=1
mkdir ../clang-build
cd ../clang-build
mkdir clang-build
cd clang-build
# For whatever reason the default set of include paths for clang is different
# than that of gcc. As a result we need to manually include our sysroot's
@ -55,7 +52,7 @@ INC="$INC:/rustroot/lib/gcc/x86_64-unknown-linux-gnu/4.8.5/include-fixed"
INC="$INC:/usr/include"
hide_output \
cmake ../llvm-$LLVM.src \
cmake .. \
-DCMAKE_C_COMPILER=/rustroot/bin/gcc \
-DCMAKE_CXX_COMPILER=/rustroot/bin/g++ \
-DCMAKE_BUILD_TYPE=Release \

View file

@ -20,4 +20,5 @@ COPY scripts/sccache.sh /scripts/
RUN sh /scripts/sccache.sh
ENV RUN_CHECK_WITH_PARALLEL_QUERIES 1
ENV SCRIPT python2.7 ../x.py check --target=i686-pc-windows-gnu --host=i686-pc-windows-gnu
ENV SCRIPT python2.7 ../x.py check --target=i686-pc-windows-gnu --host=i686-pc-windows-gnu && \
python2.7 ../x.py build --stage 0 src/tools/build-manifest

View file

@ -11,7 +11,7 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
cmake \
sudo \
gdb \
llvm-5.0-tools \
llvm-6.0-tools \
libedit-dev \
zlib1g-dev \
xz-utils
@ -22,6 +22,6 @@ RUN sh /scripts/sccache.sh
# using llvm-link-shared due to libffi issues -- see #34486
ENV RUST_CONFIGURE_ARGS \
--build=x86_64-unknown-linux-gnu \
--llvm-root=/usr/lib/llvm-5.0 \
--llvm-root=/usr/lib/llvm-6.0 \
--enable-llvm-link-shared
ENV RUST_CHECK_TARGET check

@ -1 +0,0 @@
Subproject commit c99638dc2ecfc750cc1656f6edb2bd062c1e0981

@ -1 +1 @@
Subproject commit 616fe4172b688393aeee5f34935cc25733c9c062
Subproject commit 74d81d80052cb88925f0e73b12fbd0b73ab7b5a0

View file

@ -22,6 +22,13 @@ This flag lets you append a single extra argument to the linker invocation.
This flag lets you append multiple extra arguments to the linker invocation. The
options should be separated by spaces.
## linker-flavor
This flag lets you control the linker flavor used by `rustc`. If a linker is given with the
`-C linker` flag described above then the linker flavor is inferred from the value provided. If no
linker is given then the linker flavor is used to determine the linker to use. Every `rustc` target
defaults to some linker flavor.
## link-dead-code
Normally, the linker will remove dead code. This flag disables this behavior.

View file

@ -6,5 +6,5 @@ the team is supporting directly.
To see the list of built-in targets, you can run `rustc --print target-list`,
or look at [the API
docs](https://doc.rust-lang.org/nightly/nightly-rustc/rustc_back/target/#modules).
docs](https://doc.rust-lang.org/nightly/nightly-rustc/rustc_target/spec/index.html#modules).
Each module there defines a builder for a particular target.

View file

@ -4,7 +4,7 @@
architecture. The list of *targets* are the possible architectures that you can build for.
To see all the options that you can set with a target, see the docs
[here](https://doc.rust-lang.org/nightly/nightly-rustc/rustc_back/target/struct.Target.html).
[here](https://doc.rust-lang.org/nightly/nightly-rustc/rustc_target/spec/struct.Target.html).
To compile to a particular target, use the `--target` flag:

View file

@ -402,3 +402,18 @@ Using `index-page` option enables `enable-index-page` option as well.
### `--enable-index-page`: generate a default index page for docs
This feature allows the generation of a default index-page which lists the generated crates.
### `--static-root-path`: control how static files are loaded in HTML output
Using this flag looks like this:
```bash
$ rustdoc src/lib.rs -Z unstable-options --static-root-path '/cache/'
```
This flag controls how rustdoc links to its static files on HTML pages. If you're hosting a lot of
crates' docs generated by the same version of rustdoc, you can use this flag to cache rustdoc's CSS,
JavaScript, and font files in a single location, rather than duplicating it once per "doc root"
(grouping of crate docs generated into the same output directory, like with `cargo doc`). Per-crate
files like the search index will still load from the documentation root, but anything that gets
renamed with `--resource-suffix` will load from the given path.

View file

@ -1,61 +0,0 @@
# `linker-flavor`
The tracking issue for this feature is: None
------------------------
Every `rustc` target defaults to some linker. For example, Linux targets default
to gcc. In some cases, you may want to override the default; you can do that
with the unstable CLI argument: `-Z linker-flavor`.
Here how you would use this flag to link a Rust binary for the
`thumbv7m-none-eabi` using LLD instead of GCC.
``` text
$ xargo rustc --target thumbv7m-none-eabi -- \
-C linker=ld.lld \
-Z linker-flavor=ld \
-Z print-link-args | tr ' ' '\n'
"ld.lld"
"-L"
"$SYSROOT/lib/rustlib/thumbv7m-none-eabi/lib"
"$PWD/target/thumbv7m-none-eabi/debug/deps/app-512e9dbf385f233c.0.o"
"-o"
"$PWD/target/thumbv7m-none-eabi/debug/deps/app-512e9dbf385f233c"
"--gc-sections"
"-L"
"$PWD/target/thumbv7m-none-eabi/debug/deps"
"-L"
"$PWD/target/debug/deps"
"-L"
"$SYSROOT/lib/rustlib/thumbv7m-none-eabi/lib"
"-Bstatic"
"$SYSROOT/lib/rustlib/thumbv7m-none-eabi/lib/libcore-e1ccb7dfb1cb9ebb.rlib"
"-Bdynamic"
```
Whereas the default is:
``` text
$ xargo rustc --target thumbv7m-none-eabi -- \
-C link-arg=-nostartfiles \
-Z print-link-args | tr ' ' '\n'
"arm-none-eabi-gcc"
"-L"
"$SYSROOT/lib/rustlib/thumbv7m-none-eabi/lib"
"$PWD/target/thumbv7m-none-eabi/debug/deps/app-961e39416baa38d9.0.o"
"-o"
"$PWD/target/thumbv7m-none-eabi/debug/deps/app-961e39416baa38d9"
"-Wl,--gc-sections"
"-nodefaultlibs"
"-L"
"$PWD/target/thumbv7m-none-eabi/debug/deps"
"-L"
"$PWD/target/debug/deps"
"-L"
"$SYSROOT/lib/rustlib/thumbv7m-none-eabi/lib"
"-Wl,-Bstatic"
"$SYSROOT/lib/rustlib/thumbv7m-none-eabi/lib/libcore-e1ccb7dfb1cb9ebb.rlib"
"-nostartfiles"
"-Wl,-Bdynamic"
```

View file

@ -149,7 +149,7 @@ closure-like semantics. Namely:
* Whenever a generator is dropped it will drop all captured environment
variables.
Note that unlike closures generators at this time cannot take any arguments.
Note that unlike closures, generators at this time cannot take any arguments.
That is, generators must always look like `|| { ... }`. This restriction may be
lifted at a future date, the design is ongoing!

View file

@ -1,8 +0,0 @@
# `repr_packed`
The tracking issue for this feature is [#33158]
[#33158]: https://github.com/rust-lang/rust/issues/33158
------------------------

View file

@ -8,7 +8,7 @@ The tracking issue for this feature is: [#48055]
This implements [RFC1909]. When turned on, you can have unsized arguments and locals:
[RFC1909]: https://github.com/rust-lang/rfcs/blob/master/text/1909-coercions.md
[RFC1909]: https://github.com/rust-lang/rfcs/blob/master/text/1909-unsized-rvalues.md
```rust
#![feature(unsized_locals)]

View file

@ -9,6 +9,7 @@
# except according to those terms.
import gdb
import re
import sys
import debugger_pretty_printers_common as rustpp
@ -20,6 +21,16 @@ if sys.version_info[0] >= 3:
rust_enabled = 'set language rust' in gdb.execute('complete set language ru', to_string = True)
# The btree pretty-printers fail in a confusing way unless
# https://sourceware.org/bugzilla/show_bug.cgi?id=21763 is fixed.
# This fix went in 8.1, so check for that.
# See https://github.com/rust-lang/rust/issues/56730
gdb_81 = False
_match = re.match('([0-9]+)\\.([0-9]+)', gdb.VERSION)
if _match:
if int(_match.group(1)) > 8 or (int(_match.group(1)) == 8 and int(_match.group(2)) >= 1):
gdb_81 = True
#===============================================================================
# GDB Pretty Printing Module for Rust
#===============================================================================
@ -110,10 +121,10 @@ def rust_pretty_printer_lookup_function(gdb_val):
if type_kind == rustpp.TYPE_KIND_STD_VECDEQUE:
return RustStdVecDequePrinter(val)
if type_kind == rustpp.TYPE_KIND_STD_BTREESET:
if type_kind == rustpp.TYPE_KIND_STD_BTREESET and gdb_81:
return RustStdBTreeSetPrinter(val)
if type_kind == rustpp.TYPE_KIND_STD_BTREEMAP:
if type_kind == rustpp.TYPE_KIND_STD_BTREEMAP and gdb_81:
return RustStdBTreeMapPrinter(val)
if type_kind == rustpp.TYPE_KIND_STD_STRING:

View file

@ -741,14 +741,14 @@ fn_anon_params
;
fn_params_with_self
: '(' maybe_mut SELF maybe_ty_ascription maybe_comma_params ')' { $$ = mk_node("SelfValue", 3, $2, $4, $5); }
: '(' maybe_mut SELF maybe_ty_ascription maybe_comma_params ')' { $$ = mk_node("SelfLower", 3, $2, $4, $5); }
| '(' '&' maybe_mut SELF maybe_ty_ascription maybe_comma_params ')' { $$ = mk_node("SelfRegion", 3, $3, $5, $6); }
| '(' '&' lifetime maybe_mut SELF maybe_ty_ascription maybe_comma_params ')' { $$ = mk_node("SelfRegion", 4, $3, $4, $6, $7); }
| '(' maybe_params ')' { $$ = mk_node("SelfStatic", 1, $2); }
;
fn_anon_params_with_self
: '(' maybe_mut SELF maybe_ty_ascription maybe_comma_anon_params ')' { $$ = mk_node("SelfValue", 3, $2, $4, $5); }
: '(' maybe_mut SELF maybe_ty_ascription maybe_comma_anon_params ')' { $$ = mk_node("SelfLower", 3, $2, $4, $5); }
| '(' '&' maybe_mut SELF maybe_ty_ascription maybe_comma_anon_params ')' { $$ = mk_node("SelfRegion", 3, $3, $5, $6); }
| '(' '&' lifetime maybe_mut SELF maybe_ty_ascription maybe_comma_anon_params ')' { $$ = mk_node("SelfRegion", 4, $3, $4, $6, $7); }
| '(' maybe_anon_params ')' { $$ = mk_node("SelfStatic", 1, $2); }

View file

@ -11,10 +11,10 @@ path = "lib.rs"
[dependencies]
core = { path = "../libcore" }
compiler_builtins = { path = "../rustc/compiler_builtins_shim" }
compiler_builtins = { version = "0.1.0", features = ['rustc-dep-of-std'] }
[dev-dependencies]
rand = "0.5"
rand = "0.6"
[[test]]
name = "collectionstests"
@ -28,3 +28,6 @@ path = "../liballoc/benches/lib.rs"
name = "vec_deque_append_bench"
path = "../liballoc/benches/vec_deque_append.rs"
harness = false
[features]
compiler-builtins-mem = ['compiler_builtins/mem']

View file

@ -77,7 +77,9 @@ use core::iter::{Iterator, FromIterator, FusedIterator};
use core::marker::{Unpin, Unsize};
use core::mem;
use core::pin::Pin;
use core::ops::{CoerceUnsized, DispatchFromDyn, Deref, DerefMut, Generator, GeneratorState};
use core::ops::{
CoerceUnsized, DispatchFromDyn, Deref, DerefMut, Receiver, Generator, GeneratorState
};
use core::ptr::{self, NonNull, Unique};
use core::task::{LocalWaker, Poll};
@ -583,6 +585,9 @@ impl<T: ?Sized> DerefMut for Box<T> {
}
}
#[unstable(feature = "receiver_trait", issue = "0")]
impl<T: ?Sized> Receiver for Box<T> {}
#[stable(feature = "rust1", since = "1.0.0")]
impl<I: Iterator + ?Sized> Iterator for Box<I> {
type Item = I::Item;
@ -801,7 +806,7 @@ impl<T: ?Sized> AsMut<T> for Box<T> {
* safe.)
* - It is in practice very useful to have Box<T> be unconditionally
* Unpin because of trait objects, for which the structural auto
* trait functionality does not apply (e.g. Box<dyn Foo> would
* trait functionality does not apply (e.g., Box<dyn Foo> would
* otherwise not be Unpin).
*
* Another type with the same semantics as Box but only a conditional

View file

@ -858,7 +858,7 @@ impl<T: Ord> BinaryHeap<T> {
}
}
/// Hole represents a hole in a slice i.e. an index without valid value
/// Hole represents a hole in a slice i.e., an index without valid value
/// (because it was moved from or duplicated).
/// In drop, `Hole` will restore the slice by filling the hole
/// position with the value that was originally removed.

View file

@ -58,9 +58,34 @@ pub const CAPACITY: usize = 2 * B - 1;
/// these should always be put behind pointers, and specifically behind `BoxedNode` in the owned
/// case.
///
/// We put the metadata first so that its position is the same for every `K` and `V`, in order
/// to statically allocate a single dummy node to avoid allocations. This struct is `repr(C)` to
/// prevent them from being reordered.
/// We have a separate type for the header and rely on it matching the prefix of `LeafNode`, in
/// order to statically allocate a single dummy node to avoid allocations. This struct is
/// `repr(C)` to prevent them from being reordered. `LeafNode` does not just contain a
/// `NodeHeader` because we do not want unnecessary padding between `len` and the keys.
/// Crucially, `NodeHeader` can be safely transmuted to different K and V. (This is exploited
/// by `as_header`.)
/// See `into_key_slice` for an explanation of K2. K2 cannot be safely transmuted around
/// because the size of `NodeHeader` depends on its alignment!
#[repr(C)]
struct NodeHeader<K, V, K2 = ()> {
/// We use `*const` as opposed to `*mut` so as to be covariant in `K` and `V`.
/// This either points to an actual node or is null.
parent: *const InternalNode<K, V>,
/// This node's index into the parent node's `edges` array.
/// `*node.parent.edges[node.parent_idx]` should be the same thing as `node`.
/// This is only guaranteed to be initialized when `parent` is non-null.
parent_idx: MaybeUninit<u16>,
/// The number of keys and values this node stores.
///
/// This next to `parent_idx` to encourage the compiler to join `len` and
/// `parent_idx` into the same 32-bit word, reducing space overhead.
len: u16,
/// See `into_key_slice`.
keys_start: [K2; 0],
}
#[repr(C)]
struct LeafNode<K, V> {
/// We use `*const` as opposed to `*mut` so as to be covariant in `K` and `V`.
@ -98,24 +123,25 @@ impl<K, V> LeafNode<K, V> {
len: 0
}
}
}
impl<K, V> NodeHeader<K, V> {
fn is_shared_root(&self) -> bool {
ptr::eq(self, &EMPTY_ROOT_NODE as *const _ as *const _)
}
}
// We need to implement Sync here in order to make a static instance.
unsafe impl Sync for LeafNode<(), ()> {}
unsafe impl Sync for NodeHeader<(), ()> {}
// An empty node used as a placeholder for the root node, to avoid allocations.
// We use () in order to save space, since no operation on an empty tree will
// We use just a header in order to save space, since no operation on an empty tree will
// ever take a pointer past the first key.
static EMPTY_ROOT_NODE: LeafNode<(), ()> = LeafNode {
static EMPTY_ROOT_NODE: NodeHeader<(), ()> = NodeHeader {
parent: ptr::null(),
parent_idx: MaybeUninit::uninitialized(),
len: 0,
keys: MaybeUninit::uninitialized(),
vals: MaybeUninit::uninitialized(),
keys_start: [],
};
/// The underlying representation of internal nodes. As with `LeafNode`s, these should be hidden
@ -281,7 +307,7 @@ impl<K, V> Root<K, V> {
.node)
};
self.height -= 1;
self.as_mut().as_leaf_mut().parent = ptr::null();
unsafe { (*self.as_mut().as_leaf_mut()).parent = ptr::null(); }
unsafe {
Global.dealloc(NonNull::from(top).cast(), Layout::new::<InternalNode<K, V>>());
@ -306,6 +332,11 @@ impl<K, V> Root<K, V> {
/// `Leaf`, the `NodeRef` points to a leaf node, when this is `Internal` the
/// `NodeRef` points to an internal node, and when this is `LeafOrInternal` the
/// `NodeRef` could be pointing to either type of node.
/// Note that in case of a leaf node, this might still be the shared root! Only turn
/// this into a `LeafNode` reference if you know it is not a root! Shared references
/// must be dereferencable *for the entire size of their pointee*, so `&InternalNode`
/// pointing to the shared root is UB.
/// Turning this into a `NodeHeader` is always safe.
pub struct NodeRef<BorrowType, K, V, Type> {
height: usize,
node: NonNull<LeafNode<K, V>>,
@ -352,7 +383,7 @@ impl<BorrowType, K, V, Type> NodeRef<BorrowType, K, V, Type> {
/// Finds the length of the node. This is the number of keys or values. In an
/// internal node, the number of edges is `len() + 1`.
pub fn len(&self) -> usize {
self.as_leaf().len as usize
self.as_header().len as usize
}
/// Returns the height of this node in the whole tree. Zero height denotes the
@ -382,14 +413,19 @@ impl<BorrowType, K, V, Type> NodeRef<BorrowType, K, V, Type> {
}
}
fn as_leaf(&self) -> &LeafNode<K, V> {
/// Assert that this is indeed a proper leaf node, and not the shared root.
unsafe fn as_leaf(&self) -> &LeafNode<K, V> {
self.node.as_ref()
}
fn as_header(&self) -> &NodeHeader<K, V> {
unsafe {
self.node.as_ref()
&*(self.node.as_ptr() as *const NodeHeader<K, V>)
}
}
pub fn is_shared_root(&self) -> bool {
self.as_leaf().is_shared_root()
self.as_header().is_shared_root()
}
pub fn keys(&self) -> &[K] {
@ -418,7 +454,7 @@ impl<BorrowType, K, V, Type> NodeRef<BorrowType, K, V, Type> {
>,
Self
> {
let parent_as_leaf = self.as_leaf().parent as *const LeafNode<K, V>;
let parent_as_leaf = self.as_header().parent as *const LeafNode<K, V>;
if let Some(non_zero) = NonNull::new(parent_as_leaf as *mut _) {
Ok(Handle {
node: NodeRef {
@ -427,7 +463,7 @@ impl<BorrowType, K, V, Type> NodeRef<BorrowType, K, V, Type> {
root: self.root,
_marker: PhantomData
},
idx: unsafe { usize::from(*self.as_leaf().parent_idx.get_ref()) },
idx: unsafe { usize::from(*self.as_header().parent_idx.get_ref()) },
_marker: PhantomData
})
} else {
@ -534,10 +570,10 @@ impl<'a, K, V, Type> NodeRef<marker::Mut<'a>, K, V, Type> {
}
}
fn as_leaf_mut(&mut self) -> &mut LeafNode<K, V> {
unsafe {
self.node.as_mut()
}
/// Returns a raw ptr to avoid asserting exclusive access to the entire node.
fn as_leaf_mut(&mut self) -> *mut LeafNode<K, V> {
// We are mutable, so we cannot be the root, so accessing this as a leaf is okay.
self.node.as_ptr()
}
fn keys_mut(&mut self) -> &mut [K] {
@ -551,28 +587,50 @@ impl<'a, K, V, Type> NodeRef<marker::Mut<'a>, K, V, Type> {
impl<'a, K: 'a, V: 'a, Type> NodeRef<marker::Immut<'a>, K, V, Type> {
fn into_key_slice(self) -> &'a [K] {
// When taking a pointer to the keys, if our key has a stricter
// alignment requirement than the shared root does, then the pointer
// would be out of bounds, which LLVM assumes will not happen. If the
// alignment is more strict, we need to make an empty slice that doesn't
// use an out of bounds pointer.
// We have to be careful here because we might be pointing to the shared root.
// In that case, we must not create an `&LeafNode`. We could just return
// an empty slice whenever the length is 0 (this includes the shared root),
// but we want to avoid that run-time check.
// Instead, we create a slice pointing into the node whenever possible.
// We can sometimes do this even for the shared root, as the slice will be
// empty. We cannot *always* do this because if the type is too highly
// aligned, the offset of `keys` in a "full node" might be outside the bounds
// of the header! So we do an alignment check first, that will be
// evaluated at compile-time, and only do any run-time check in the rare case
// that the alignment is very big.
if mem::align_of::<K>() > mem::align_of::<LeafNode<(), ()>>() && self.is_shared_root() {
&[]
} else {
// Here either it's not the root, or the alignment is less strict,
// in which case the keys pointer will point "one-past-the-end" of
// the node, which is allowed by LLVM.
// Thanks to the alignment check above, we know that `keys` will be
// in-bounds of some allocation even if this is the shared root!
// (We might be one-past-the-end, but that is allowed by LLVM.)
// Getting the pointer is tricky though. `NodeHeader` does not have a `keys`
// field because we want its size to not depend on the alignment of `K`
// (needed becuase `as_header` should be safe). We cannot call `as_leaf`
// because we might be the shared root.
// For this reason, `NodeHeader` has this `K2` parameter (that's usually `()`
// and hence just adds a size-0-align-1 field, not affecting layout).
// We know that we can transmute `NodeHeader<K, V, ()>` to `NodeHeader<K, V, K>`
// because we did the alignment check above, and hence `NodeHeader<K, V, K>`
// is not bigger than `NodeHeader<K, V, ()>`! Then we can use `NodeHeader<K, V, K>`
// to compute the pointer where the keys start.
// This entire hack will become unnecessary once
// <https://github.com/rust-lang/rfcs/pull/2582> lands, then we can just take a raw
// pointer to the `keys` field of `*const InternalNode<K, V>`.
// This is a non-debug-assert because it can be completely compile-time evaluated.
assert!(mem::size_of::<NodeHeader<K, V>>() == mem::size_of::<NodeHeader<K, V, K>>());
let header = self.as_header() as *const _ as *const NodeHeader<K, V, K>;
let keys = unsafe { &(*header).keys_start as *const _ as *const K };
unsafe {
slice::from_raw_parts(
self.as_leaf().keys.as_ptr() as *const K,
self.len()
)
slice::from_raw_parts(keys, self.len())
}
}
}
fn into_val_slice(self) -> &'a [V] {
debug_assert!(!self.is_shared_root());
// We cannot be the root, so `as_leaf` is okay
unsafe {
slice::from_raw_parts(
self.as_leaf().vals.as_ptr() as *const V,
@ -602,7 +660,7 @@ impl<'a, K: 'a, V: 'a, Type> NodeRef<marker::Mut<'a>, K, V, Type> {
} else {
unsafe {
slice::from_raw_parts_mut(
self.as_leaf_mut().keys.as_mut_ptr() as *mut K,
(*self.as_leaf_mut()).keys.as_mut_ptr() as *mut K,
self.len()
)
}
@ -613,7 +671,7 @@ impl<'a, K: 'a, V: 'a, Type> NodeRef<marker::Mut<'a>, K, V, Type> {
debug_assert!(!self.is_shared_root());
unsafe {
slice::from_raw_parts_mut(
self.as_leaf_mut().vals.as_mut_ptr() as *mut V,
(*self.as_leaf_mut()).vals.as_mut_ptr() as *mut V,
self.len()
)
}
@ -637,9 +695,9 @@ impl<'a, K, V> NodeRef<marker::Mut<'a>, K, V, marker::Leaf> {
unsafe {
ptr::write(self.keys_mut().get_unchecked_mut(idx), key);
ptr::write(self.vals_mut().get_unchecked_mut(idx), val);
}
self.as_leaf_mut().len += 1;
(*self.as_leaf_mut()).len += 1;
}
}
/// Adds a key/value pair to the beginning of the node.
@ -651,9 +709,9 @@ impl<'a, K, V> NodeRef<marker::Mut<'a>, K, V, marker::Leaf> {
unsafe {
slice_insert(self.keys_mut(), 0, key);
slice_insert(self.vals_mut(), 0, val);
}
self.as_leaf_mut().len += 1;
(*self.as_leaf_mut()).len += 1;
}
}
}
@ -672,7 +730,7 @@ impl<'a, K, V> NodeRef<marker::Mut<'a>, K, V, marker::Internal> {
ptr::write(self.vals_mut().get_unchecked_mut(idx), val);
ptr::write(self.as_internal_mut().edges.get_unchecked_mut(idx + 1), edge.node);
self.as_leaf_mut().len += 1;
(*self.as_leaf_mut()).len += 1;
Handle::new_edge(self.reborrow_mut(), idx + 1).correct_parent_link();
}
@ -708,7 +766,7 @@ impl<'a, K, V> NodeRef<marker::Mut<'a>, K, V, marker::Internal> {
edge.node
);
self.as_leaf_mut().len += 1;
(*self.as_leaf_mut()).len += 1;
self.correct_all_childrens_parent_links();
}
@ -732,12 +790,12 @@ impl<'a, K, V> NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal> {
ForceResult::Internal(internal) => {
let edge = ptr::read(internal.as_internal().edges.get_unchecked(idx + 1));
let mut new_root = Root { node: edge, height: internal.height - 1 };
new_root.as_mut().as_leaf_mut().parent = ptr::null();
(*new_root.as_mut().as_leaf_mut()).parent = ptr::null();
Some(new_root)
}
};
self.as_leaf_mut().len -= 1;
(*self.as_leaf_mut()).len -= 1;
(key, val, edge)
}
}
@ -765,7 +823,7 @@ impl<'a, K, V> NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal> {
);
let mut new_root = Root { node: edge, height: internal.height - 1 };
new_root.as_mut().as_leaf_mut().parent = ptr::null();
(*new_root.as_mut().as_leaf_mut()).parent = ptr::null();
for i in 0..old_len {
Handle::new_edge(internal.reborrow_mut(), i).correct_parent_link();
@ -775,7 +833,7 @@ impl<'a, K, V> NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal> {
}
};
self.as_leaf_mut().len -= 1;
(*self.as_leaf_mut()).len -= 1;
(key, val, edge)
}
@ -966,7 +1024,7 @@ impl<'a, K, V> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::Edge
slice_insert(self.node.keys_mut(), self.idx, key);
slice_insert(self.node.vals_mut(), self.idx, val);
self.node.as_leaf_mut().len += 1;
(*self.node.as_leaf_mut()).len += 1;
self.node.vals_mut().get_unchecked_mut(self.idx)
}
@ -1009,8 +1067,10 @@ impl<'a, K, V> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Internal>, marker::
let idx = self.idx as u16;
let ptr = self.node.as_internal_mut() as *mut _;
let mut child = self.descend();
child.as_leaf_mut().parent = ptr;
child.as_leaf_mut().parent_idx.set(idx);
unsafe {
(*child.as_leaf_mut()).parent = ptr;
(*child.as_leaf_mut()).parent_idx.set(idx);
}
}
/// Unsafely asserts to the compiler some static information about whether the underlying
@ -1158,7 +1218,7 @@ impl<'a, K, V> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::KV>
new_len
);
self.node.as_leaf_mut().len = self.idx as u16;
(*self.node.as_leaf_mut()).len = self.idx as u16;
new_node.len = new_len as u16;
(
@ -1180,7 +1240,7 @@ impl<'a, K, V> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::KV>
unsafe {
let k = slice_remove(self.node.keys_mut(), self.idx);
let v = slice_remove(self.node.vals_mut(), self.idx);
self.node.as_leaf_mut().len -= 1;
(*self.node.as_leaf_mut()).len -= 1;
(self.left_edge(), k, v)
}
}
@ -1221,7 +1281,7 @@ impl<'a, K, V> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Internal>, marker::
new_len + 1
);
self.node.as_leaf_mut().len = self.idx as u16;
(*self.node.as_leaf_mut()).len = self.idx as u16;
new_node.data.len = new_len as u16;
let mut new_root = Root {
@ -1295,9 +1355,9 @@ impl<'a, K, V> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Internal>, marker::
for i in self.idx+1..self.node.len() {
Handle::new_edge(self.node.reborrow_mut(), i).correct_parent_link();
}
self.node.as_leaf_mut().len -= 1;
(*self.node.as_leaf_mut()).len -= 1;
left_node.as_leaf_mut().len += right_len as u16 + 1;
(*left_node.as_leaf_mut()).len += right_len as u16 + 1;
if self.node.height > 1 {
ptr::copy_nonoverlapping(
@ -1407,8 +1467,8 @@ impl<'a, K, V> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Internal>, marker::
move_kv(left_kv, new_left_len, parent_kv, 0, 1);
}
left_node.reborrow_mut().as_leaf_mut().len -= count as u16;
right_node.reborrow_mut().as_leaf_mut().len += count as u16;
(*left_node.reborrow_mut().as_leaf_mut()).len -= count as u16;
(*right_node.reborrow_mut().as_leaf_mut()).len += count as u16;
match (left_node.force(), right_node.force()) {
(ForceResult::Internal(left), ForceResult::Internal(mut right)) => {
@ -1468,8 +1528,8 @@ impl<'a, K, V> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Internal>, marker::
new_right_len);
}
left_node.reborrow_mut().as_leaf_mut().len += count as u16;
right_node.reborrow_mut().as_leaf_mut().len -= count as u16;
(*left_node.reborrow_mut().as_leaf_mut()).len += count as u16;
(*right_node.reborrow_mut().as_leaf_mut()).len -= count as u16;
match (left_node.force(), right_node.force()) {
(ForceResult::Internal(left), ForceResult::Internal(mut right)) => {
@ -1560,8 +1620,8 @@ impl<'a, K, V> Handle<NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal>, ma
move_kv(left_kv, left_new_len, right_kv, 0, right_new_len);
left_node.reborrow_mut().as_leaf_mut().len = left_new_len as u16;
right_node.reborrow_mut().as_leaf_mut().len = right_new_len as u16;
(*left_node.reborrow_mut().as_leaf_mut()).len = left_new_len as u16;
(*right_node.reborrow_mut().as_leaf_mut()).len = right_new_len as u16;
match (left_node.force(), right_node.force()) {
(ForceResult::Internal(left), ForceResult::Internal(right)) => {

View file

@ -258,7 +258,7 @@ impl<T: Ord> BTreeSet<T> {
}
/// Visits the values representing the difference,
/// i.e. the values that are in `self` but not in `other`,
/// i.e., the values that are in `self` but not in `other`,
/// in ascending order.
///
/// # Examples
@ -286,7 +286,7 @@ impl<T: Ord> BTreeSet<T> {
}
/// Visits the values representing the symmetric difference,
/// i.e. the values that are in `self` or in `other` but not in both,
/// i.e., the values that are in `self` or in `other` but not in both,
/// in ascending order.
///
/// # Examples
@ -316,7 +316,7 @@ impl<T: Ord> BTreeSet<T> {
}
/// Visits the values representing the intersection,
/// i.e. the values that are both in `self` and `other`,
/// i.e., the values that are both in `self` and `other`,
/// in ascending order.
///
/// # Examples
@ -344,7 +344,7 @@ impl<T: Ord> BTreeSet<T> {
}
/// Visits the values representing the union,
/// i.e. all the values in `self` or `other`, without duplicates,
/// i.e., all the values in `self` or `other`, without duplicates,
/// in ascending order.
///
/// # Examples
@ -455,7 +455,7 @@ impl<T: Ord> BTreeSet<T> {
}
/// Returns `true` if the set is a subset of another,
/// i.e. `other` contains at least all the values in `self`.
/// i.e., `other` contains at least all the values in `self`.
///
/// # Examples
///
@ -498,7 +498,7 @@ impl<T: Ord> BTreeSet<T> {
}
/// Returns `true` if the set is a superset of another,
/// i.e. `self` contains at least all the values in `other`.
/// i.e., `self` contains at least all the values in `other`.
///
/// # Examples
///

View file

@ -627,7 +627,9 @@ impl<T> LinkedList<T> {
self.pop_front_node().map(Node::into_element)
}
/// Appends an element to the back of a list
/// Appends an element to the back of a list.
///
/// This operation should compute in O(1) time.
///
/// # Examples
///
@ -647,6 +649,8 @@ impl<T> LinkedList<T> {
/// Removes the last element from a list and returns it, or `None` if
/// it is empty.
///
/// This operation should compute in O(1) time.
///
/// # Examples
///
/// ```

View file

@ -1026,7 +1026,10 @@ impl<T> VecDeque<T> {
iter: Iter {
tail: drain_tail,
head: drain_head,
ring: unsafe { self.buffer_as_mut_slice() },
// Crucially, we only create shared references from `self` here and read from
// it. We do not write to `self` nor reborrow to a mutable reference.
// Hence the raw pointer we created above, for `deque`, remains valid.
ring: unsafe { self.buffer_as_slice() },
},
}
}
@ -1894,8 +1897,6 @@ impl<T> VecDeque<T> {
/// # Examples
///
/// ```
/// #![feature(vec_resize_with)]
///
/// use std::collections::VecDeque;
///
/// let mut buf = VecDeque::new();
@ -1914,7 +1915,7 @@ impl<T> VecDeque<T> {
/// buf.resize_with(5, || { state += 1; state });
/// assert_eq!(buf, [5, 10, 101, 102, 103]);
/// ```
#[unstable(feature = "vec_resize_with", issue = "41758")]
#[stable(feature = "vec_resize_with", since = "1.33.0")]
pub fn resize_with(&mut self, new_len: usize, generator: impl FnMut()->T) {
let len = self.len();
@ -1924,6 +1925,118 @@ impl<T> VecDeque<T> {
self.truncate(new_len);
}
}
/// Rotates the double-ended queue `mid` places to the left.
///
/// Equivalently,
/// - Rotates item `mid` into the first position.
/// - Pops the first `mid` items and pushes them to the end.
/// - Rotates `len() - mid` places to the right.
///
/// # Panics
///
/// If `mid` is greater than `len()`. Note that `mid == len()`
/// does _not_ panic and is a no-op rotation.
///
/// # Complexity
///
/// Takes `O(min(mid, len() - mid))` time and no extra space.
///
/// # Examples
///
/// ```
/// #![feature(vecdeque_rotate)]
///
/// use std::collections::VecDeque;
///
/// let mut buf: VecDeque<_> = (0..10).collect();
///
/// buf.rotate_left(3);
/// assert_eq!(buf, [3, 4, 5, 6, 7, 8, 9, 0, 1, 2]);
///
/// for i in 1..10 {
/// assert_eq!(i * 3 % 10, buf[0]);
/// buf.rotate_left(3);
/// }
/// assert_eq!(buf, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
/// ```
#[unstable(feature = "vecdeque_rotate", issue = "56686")]
pub fn rotate_left(&mut self, mid: usize) {
assert!(mid <= self.len());
let k = self.len() - mid;
if mid <= k {
unsafe { self.rotate_left_inner(mid) }
} else {
unsafe { self.rotate_right_inner(k) }
}
}
/// Rotates the double-ended queue `k` places to the right.
///
/// Equivalently,
/// - Rotates the first item into position `k`.
/// - Pops the last `k` items and pushes them to the front.
/// - Rotates `len() - k` places to the left.
///
/// # Panics
///
/// If `k` is greater than `len()`. Note that `k == len()`
/// does _not_ panic and is a no-op rotation.
///
/// # Complexity
///
/// Takes `O(min(k, len() - k))` time and no extra space.
///
/// # Examples
///
/// ```
/// #![feature(vecdeque_rotate)]
///
/// use std::collections::VecDeque;
///
/// let mut buf: VecDeque<_> = (0..10).collect();
///
/// buf.rotate_right(3);
/// assert_eq!(buf, [7, 8, 9, 0, 1, 2, 3, 4, 5, 6]);
///
/// for i in 1..10 {
/// assert_eq!(0, buf[i * 3 % 10]);
/// buf.rotate_right(3);
/// }
/// assert_eq!(buf, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
/// ```
#[unstable(feature = "vecdeque_rotate", issue = "56686")]
pub fn rotate_right(&mut self, k: usize) {
assert!(k <= self.len());
let mid = self.len() - k;
if k <= mid {
unsafe { self.rotate_right_inner(k) }
} else {
unsafe { self.rotate_left_inner(mid) }
}
}
// Safety: the following two methods require that the rotation amount
// be less than half the length of the deque.
//
// `wrap_copy` requres that `min(x, cap() - x) + copy_len <= cap()`,
// but than `min` is never more than half the capacity, regardless of x,
// so it's sound to call here because we're calling with something
// less than half the length, which is never above half the capacity.
unsafe fn rotate_left_inner(&mut self, mid: usize) {
debug_assert!(mid * 2 <= self.len());
self.wrap_copy(self.head, self.tail, mid);
self.head = self.wrap_add(self.head, mid);
self.tail = self.wrap_add(self.tail, mid);
}
unsafe fn rotate_right_inner(&mut self, k: usize) {
debug_assert!(k * 2 <= self.len());
self.head = self.wrap_sub(self.head, k);
self.tail = self.wrap_sub(self.tail, k);
self.wrap_copy(self.tail, self.head, k);
}
}
impl<T: Clone> VecDeque<T> {
@ -2795,7 +2908,7 @@ mod tests {
// 0, 1, 2, .., len - 1
let expected = (0..).take(len).collect::<VecDeque<_>>();
for tail_pos in 0..cap {
for to_remove in 0..len + 1 {
for to_remove in 0..=len {
tester.tail = tail_pos;
tester.head = tail_pos;
for i in 0..len {
@ -2821,10 +2934,10 @@ mod tests {
let mut tester: VecDeque<usize> = VecDeque::with_capacity(7);
let cap = tester.capacity();
for len in 0..cap + 1 {
for tail in 0..cap + 1 {
for drain_start in 0..len + 1 {
for drain_end in drain_start..len + 1 {
for len in 0..=cap {
for tail in 0..=cap {
for drain_start in 0..=len {
for drain_end in drain_start..=len {
tester.tail = tail;
tester.head = tail;
for i in 0..len {
@ -2866,10 +2979,10 @@ mod tests {
tester.reserve(63);
let max_cap = tester.capacity();
for len in 0..cap + 1 {
for len in 0..=cap {
// 0, 1, 2, .., len - 1
let expected = (0..).take(len).collect::<VecDeque<_>>();
for tail_pos in 0..max_cap + 1 {
for tail_pos in 0..=max_cap {
tester.tail = tail_pos;
tester.head = tail_pos;
tester.reserve(63);
@ -2899,7 +3012,7 @@ mod tests {
// len is the length *before* splitting
for len in 0..cap {
// index to split at
for at in 0..len + 1 {
for at in 0..=len {
// 0, 1, 2, .., at - 1 (may be empty)
let expected_self = (0..).take(at).collect::<VecDeque<_>>();
// at, at + 1, .., len - 1 (may be empty)
@ -2927,7 +3040,7 @@ mod tests {
fn test_from_vec() {
use vec::Vec;
for cap in 0..35 {
for len in 0..cap + 1 {
for len in 0..=cap {
let mut vec = Vec::with_capacity(cap);
vec.extend(0..len);

View file

@ -72,6 +72,8 @@
test(no_crate_inject, attr(allow(unused_variables), deny(warnings))))]
#![no_std]
#![needs_allocator]
#![deny(intra_doc_link_resolution_failure)]
#![deny(missing_debug_implementations)]
#![cfg_attr(not(test), feature(fn_traits))]
@ -104,6 +106,7 @@
#![feature(ptr_internals)]
#![feature(ptr_offset_from)]
#![feature(rustc_attrs)]
#![feature(receiver_trait)]
#![feature(specialization)]
#![feature(split_ascii_whitespace)]
#![feature(staged_api)]

View file

@ -739,7 +739,7 @@ unsafe impl<#[may_dangle] T, A: Alloc> Drop for RawVec<T, A> {
// On 64-bit we just need to check for overflow since trying to allocate
// `> isize::MAX` bytes will surely fail. On 32-bit and 16-bit we need to add
// an extra guard for this in case we're running on a platform which can use
// all 4GB in user-space. e.g. PAE or x32
// all 4GB in user-space. e.g., PAE or x32
#[inline]
fn alloc_guard(alloc_size: usize) -> Result<(), CollectionAllocErr> {

View file

@ -253,7 +253,7 @@ use core::intrinsics::abort;
use core::marker;
use core::marker::{Unpin, Unsize, PhantomData};
use core::mem::{self, align_of_val, forget, size_of_val};
use core::ops::Deref;
use core::ops::{Deref, Receiver};
use core::ops::{CoerceUnsized, DispatchFromDyn};
use core::pin::Pin;
use core::ptr::{self, NonNull};
@ -276,7 +276,7 @@ struct RcBox<T: ?Sized> {
/// See the [module-level documentation](./index.html) for more details.
///
/// The inherent methods of `Rc` are all associated functions, which means
/// that you have to call them as e.g. [`Rc::get_mut(&mut value)`][get_mut] instead of
/// that you have to call them as e.g., [`Rc::get_mut(&mut value)`][get_mut] instead of
/// `value.get_mut()`. This avoids conflicts with methods of the inner
/// type `T`.
///
@ -813,6 +813,9 @@ impl<T: ?Sized> Deref for Rc<T> {
}
}
#[unstable(feature = "receiver_trait", issue = "0")]
impl<T: ?Sized> Receiver for Rc<T> {}
#[stable(feature = "rust1", since = "1.0.0")]
unsafe impl<#[may_dangle] T: ?Sized> Drop for Rc<T> {
/// Drops the `Rc`.
@ -840,6 +843,8 @@ unsafe impl<#[may_dangle] T: ?Sized> Drop for Rc<T> {
/// drop(foo); // Doesn't print anything
/// drop(foo2); // Prints "dropped!"
/// ```
///
/// [`Weak`]: ../../std/rc/struct.Weak.html
fn drop(&mut self) {
unsafe {
self.dec_strong();
@ -900,12 +905,47 @@ impl<T: Default> Default for Rc<T> {
}
}
#[stable(feature = "rust1", since = "1.0.0")]
trait RcEqIdent<T: ?Sized + PartialEq> {
fn eq(&self, other: &Rc<T>) -> bool;
fn ne(&self, other: &Rc<T>) -> bool;
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized + PartialEq> RcEqIdent<T> for Rc<T> {
#[inline]
default fn eq(&self, other: &Rc<T>) -> bool {
**self == **other
}
#[inline]
default fn ne(&self, other: &Rc<T>) -> bool {
**self != **other
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized + Eq> RcEqIdent<T> for Rc<T> {
#[inline]
fn eq(&self, other: &Rc<T>) -> bool {
Rc::ptr_eq(self, other) || **self == **other
}
#[inline]
fn ne(&self, other: &Rc<T>) -> bool {
!Rc::ptr_eq(self, other) && **self != **other
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized + PartialEq> PartialEq for Rc<T> {
/// Equality for two `Rc`s.
///
/// Two `Rc`s are equal if their inner values are equal.
///
/// If `T` also implements `Eq`, two `Rc`s that point to the same value are
/// always equal.
///
/// # Examples
///
/// ```
@ -915,15 +955,18 @@ impl<T: ?Sized + PartialEq> PartialEq for Rc<T> {
///
/// assert!(five == Rc::new(5));
/// ```
#[inline(always)]
#[inline]
fn eq(&self, other: &Rc<T>) -> bool {
**self == **other
RcEqIdent::eq(self, other)
}
/// Inequality for two `Rc`s.
///
/// Two `Rc`s are unequal if their inner values are unequal.
///
/// If `T` also implements `Eq`, two `Rc`s that point to the same value are
/// never unequal.
///
/// # Examples
///
/// ```
@ -933,9 +976,9 @@ impl<T: ?Sized + PartialEq> PartialEq for Rc<T> {
///
/// assert!(five != Rc::new(6));
/// ```
#[inline(always)]
#[inline]
fn ne(&self, other: &Rc<T>) -> bool {
**self != **other
RcEqIdent::ne(self, other)
}
}
@ -1187,8 +1230,9 @@ impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<Weak<U>> for Weak<T> {}
impl<T> Weak<T> {
/// Constructs a new `Weak<T>`, without allocating any memory.
/// Calling [`upgrade`][Weak::upgrade] on the return value always gives [`None`].
/// Calling [`upgrade`] on the return value always gives [`None`].
///
/// [`upgrade`]: #method.upgrade
/// [`None`]: ../../std/option/enum.Option.html
///
/// # Examples
@ -1251,7 +1295,7 @@ impl<T: ?Sized> Weak<T> {
}
/// Return `None` when the pointer is dangling and there is no allocated `RcBox`,
/// i.e. this `Weak` was created by `Weak::new`
/// i.e., this `Weak` was created by `Weak::new`
#[inline]
fn inner(&self) -> Option<&RcBox<T>> {
if is_dangling(self.ptr) {
@ -1260,6 +1304,52 @@ impl<T: ?Sized> Weak<T> {
Some(unsafe { self.ptr.as_ref() })
}
}
/// Returns true if the two `Weak`s point to the same value (not just values
/// that compare as equal).
///
/// # Notes
///
/// Since this compares pointers it means that `Weak::new()` will equal each
/// other, even though they don't point to any value.
///
/// # Examples
///
/// ```
/// #![feature(weak_ptr_eq)]
/// use std::rc::{Rc, Weak};
///
/// let first_rc = Rc::new(5);
/// let first = Rc::downgrade(&first_rc);
/// let second = Rc::downgrade(&first_rc);
///
/// assert!(Weak::ptr_eq(&first, &second));
///
/// let third_rc = Rc::new(5);
/// let third = Rc::downgrade(&third_rc);
///
/// assert!(!Weak::ptr_eq(&first, &third));
/// ```
///
/// Comparing `Weak::new`.
///
/// ```
/// #![feature(weak_ptr_eq)]
/// use std::rc::{Rc, Weak};
///
/// let first = Weak::new();
/// let second = Weak::new();
/// assert!(Weak::ptr_eq(&first, &second));
///
/// let third_rc = Rc::new(());
/// let third = Rc::downgrade(&third_rc);
/// assert!(!Weak::ptr_eq(&first, &third));
/// ```
#[inline]
#[unstable(feature = "weak_ptr_eq", issue = "55981")]
pub fn ptr_eq(this: &Self, other: &Self) -> bool {
this.ptr.as_ptr() == other.ptr.as_ptr()
}
}
#[stable(feature = "rc_weak", since = "1.4.0")]
@ -1334,9 +1424,10 @@ impl<T: ?Sized + fmt::Debug> fmt::Debug for Weak<T> {
#[stable(feature = "downgraded_weak", since = "1.10.0")]
impl<T> Default for Weak<T> {
/// Constructs a new `Weak<T>`, allocating memory for `T` without initializing
/// it. Calling [`upgrade`][Weak::upgrade] on the return value always gives [`None`].
/// it. Calling [`upgrade`] on the return value always gives [`None`].
///
/// [`None`]: ../../std/option/enum.Option.html
/// [`upgrade`]: ../../std/rc/struct.Weak.html#method.upgrade
///
/// # Examples
///

View file

@ -177,7 +177,7 @@ mod hack {
impl<T> [T] {
/// Sorts the slice.
///
/// This sort is stable (i.e. does not reorder equal elements) and `O(n log n)` worst-case.
/// This sort is stable (i.e., does not reorder equal elements) and `O(n log n)` worst-case.
///
/// When applicable, unstable sorting is preferred because it is generally faster than stable
/// sorting and it doesn't allocate auxiliary memory.
@ -211,7 +211,7 @@ impl<T> [T] {
/// Sorts the slice with a comparator function.
///
/// This sort is stable (i.e. does not reorder equal elements) and `O(n log n)` worst-case.
/// This sort is stable (i.e., does not reorder equal elements) and `O(n log n)` worst-case.
///
/// The comparator function must define a total ordering for the elements in the slice. If
/// the ordering is not total, the order of the elements is unspecified. An order is a
@ -264,7 +264,7 @@ impl<T> [T] {
/// Sorts the slice with a key extraction function.
///
/// This sort is stable (i.e. does not reorder equal elements) and `O(m n log(m n))`
/// This sort is stable (i.e., does not reorder equal elements) and `O(m n log(m n))`
/// worst-case, where the key function is `O(m)`.
///
/// When applicable, unstable sorting is preferred because it is generally faster than stable
@ -301,10 +301,10 @@ impl<T> [T] {
///
/// During sorting, the key function is called only once per element.
///
/// This sort is stable (i.e. does not reorder equal elements) and `O(m n + n log n)`
/// This sort is stable (i.e., does not reorder equal elements) and `O(m n + n log n)`
/// worst-case, where the key function is `O(m)`.
///
/// For simple key functions (e.g. functions that are property accesses or
/// For simple key functions (e.g., functions that are property accesses or
/// basic operations), [`sort_by_key`](#method.sort_by_key) is likely to be
/// faster.
///
@ -589,7 +589,7 @@ impl<T: Clone, V: Borrow<[T]>> SliceConcatExt<T> for [V] {
type Output = Vec<T>;
fn concat(&self) -> Vec<T> {
let size = self.iter().fold(0, |acc, v| acc + v.borrow().len());
let size = self.iter().map(|slice| slice.borrow().len()).sum();
let mut result = Vec::with_capacity(size);
for v in self {
result.extend_from_slice(v.borrow())
@ -603,8 +603,8 @@ impl<T: Clone, V: Borrow<[T]>> SliceConcatExt<T> for [V] {
Some(first) => first,
None => return vec![],
};
let size = self.iter().fold(0, |acc, v| acc + v.borrow().len());
let mut result = Vec::with_capacity(size + self.len());
let size = self.iter().map(|slice| slice.borrow().len()).sum::<usize>() + self.len() - 1;
let mut result = Vec::with_capacity(size);
result.extend_from_slice(first.borrow());
for v in iter {

View file

@ -577,7 +577,7 @@ impl String {
return Cow::Borrowed("");
};
const REPLACEMENT: &'static str = "\u{FFFD}";
const REPLACEMENT: &str = "\u{FFFD}";
let mut res = String::with_capacity(v.len());
res.push_str(first_valid);
@ -1732,18 +1732,37 @@ impl<'a> FromIterator<&'a str> for String {
#[stable(feature = "extend_string", since = "1.4.0")]
impl FromIterator<String> for String {
fn from_iter<I: IntoIterator<Item = String>>(iter: I) -> String {
let mut buf = String::new();
buf.extend(iter);
buf
let mut iterator = iter.into_iter();
// Because we're iterating over `String`s, we can avoid at least
// one allocation by getting the first string from the iterator
// and appending to it all the subsequent strings.
match iterator.next() {
None => String::new(),
Some(mut buf) => {
buf.extend(iterator);
buf
}
}
}
}
#[stable(feature = "herd_cows", since = "1.19.0")]
impl<'a> FromIterator<Cow<'a, str>> for String {
fn from_iter<I: IntoIterator<Item = Cow<'a, str>>>(iter: I) -> String {
let mut buf = String::new();
buf.extend(iter);
buf
let mut iterator = iter.into_iter();
// Because we're iterating over CoWs, we can (potentially) avoid at least
// one allocation by getting the first item and appending to it all the
// subsequent items.
match iterator.next() {
None => String::new(),
Some(cow) => {
let mut buf = cow.into_owned();
buf.extend(iterator);
buf
}
}
}
}
@ -1753,9 +1772,7 @@ impl Extend<char> for String {
let iterator = iter.into_iter();
let (lower_bound, _) = iterator.size_hint();
self.reserve(lower_bound);
for ch in iterator {
self.push(ch)
}
iterator.for_each(move |c| self.push(c));
}
}
@ -1769,27 +1786,21 @@ impl<'a> Extend<&'a char> for String {
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a> Extend<&'a str> for String {
fn extend<I: IntoIterator<Item = &'a str>>(&mut self, iter: I) {
for s in iter {
self.push_str(s)
}
iter.into_iter().for_each(move |s| self.push_str(s));
}
}
#[stable(feature = "extend_string", since = "1.4.0")]
impl Extend<String> for String {
fn extend<I: IntoIterator<Item = String>>(&mut self, iter: I) {
for s in iter {
self.push_str(&s)
}
iter.into_iter().for_each(move |s| self.push_str(&s));
}
}
#[stable(feature = "herd_cows", since = "1.19.0")]
impl<'a> Extend<Cow<'a, str>> for String {
fn extend<I: IntoIterator<Item = Cow<'a, str>>>(&mut self, iter: I) {
for s in iter {
self.push_str(&s)
}
iter.into_iter().for_each(move |s| self.push_str(&s));
}
}
@ -2158,7 +2169,7 @@ impl<T: fmt::Display + ?Sized> ToString for T {
use core::fmt::Write;
let mut buf = String::new();
buf.write_fmt(format_args!("{}", self))
.expect("a Display implementation return an error unexpectedly");
.expect("a Display implementation returned an error unexpectedly");
buf.shrink_to_fit();
buf
}

View file

@ -24,7 +24,7 @@ use core::fmt;
use core::cmp::Ordering;
use core::intrinsics::abort;
use core::mem::{self, align_of_val, size_of_val};
use core::ops::Deref;
use core::ops::{Deref, Receiver};
use core::ops::{CoerceUnsized, DispatchFromDyn};
use core::pin::Pin;
use core::ptr::{self, NonNull};
@ -767,6 +767,9 @@ impl<T: ?Sized> Deref for Arc<T> {
}
}
#[unstable(feature = "receiver_trait", issue = "0")]
impl<T: ?Sized> Receiver for Arc<T> {}
impl<T: Clone> Arc<T> {
/// Makes a mutable reference into the given `Arc`.
///
@ -952,6 +955,8 @@ unsafe impl<#[may_dangle] T: ?Sized> Drop for Arc<T> {
/// drop(foo); // Doesn't print anything
/// drop(foo2); // Prints "dropped!"
/// ```
///
/// [`Weak`]: ../../std/sync/struct.Weak.html
#[inline]
fn drop(&mut self) {
// Because `fetch_sub` is already atomic, we do not need to synchronize
@ -1121,7 +1126,7 @@ impl<T: ?Sized> Weak<T> {
}
/// Return `None` when the pointer is dangling and there is no allocated `ArcInner`,
/// i.e. this `Weak` was created by `Weak::new`
/// i.e., this `Weak` was created by `Weak::new`
#[inline]
fn inner(&self) -> Option<&ArcInner<T>> {
if is_dangling(self.ptr) {
@ -1130,6 +1135,53 @@ impl<T: ?Sized> Weak<T> {
Some(unsafe { self.ptr.as_ref() })
}
}
/// Returns true if the two `Weak`s point to the same value (not just values
/// that compare as equal).
///
/// # Notes
///
/// Since this compares pointers it means that `Weak::new()` will equal each
/// other, even though they don't point to any value.
///
///
/// # Examples
///
/// ```
/// #![feature(weak_ptr_eq)]
/// use std::sync::{Arc, Weak};
///
/// let first_rc = Arc::new(5);
/// let first = Arc::downgrade(&first_rc);
/// let second = Arc::downgrade(&first_rc);
///
/// assert!(Weak::ptr_eq(&first, &second));
///
/// let third_rc = Arc::new(5);
/// let third = Arc::downgrade(&third_rc);
///
/// assert!(!Weak::ptr_eq(&first, &third));
/// ```
///
/// Comparing `Weak::new`.
///
/// ```
/// #![feature(weak_ptr_eq)]
/// use std::sync::{Arc, Weak};
///
/// let first = Weak::new();
/// let second = Weak::new();
/// assert!(Weak::ptr_eq(&first, &second));
///
/// let third_rc = Arc::new(());
/// let third = Arc::downgrade(&third_rc);
/// assert!(!Weak::ptr_eq(&first, &third));
/// ```
#[inline]
#[unstable(feature = "weak_ptr_eq", issue = "55981")]
pub fn ptr_eq(this: &Self, other: &Self) -> bool {
this.ptr.as_ptr() == other.ptr.as_ptr()
}
}
#[stable(feature = "arc_weak", since = "1.4.0")]
@ -1172,10 +1224,11 @@ impl<T: ?Sized> Clone for Weak<T> {
#[stable(feature = "downgraded_weak", since = "1.10.0")]
impl<T> Default for Weak<T> {
/// Constructs a new `Weak<T>`, without allocating memory.
/// Calling [`upgrade`][Weak::upgrade] on the return value always
/// Calling [`upgrade`] on the return value always
/// gives [`None`].
///
/// [`None`]: ../../std/option/enum.Option.html#variant.None
/// [`upgrade`]: ../../std/sync/struct.Weak.html#method.upgrade
///
/// # Examples
///
@ -1240,12 +1293,46 @@ impl<T: ?Sized> Drop for Weak<T> {
}
}
#[stable(feature = "rust1", since = "1.0.0")]
trait ArcEqIdent<T: ?Sized + PartialEq> {
fn eq(&self, other: &Arc<T>) -> bool;
fn ne(&self, other: &Arc<T>) -> bool;
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized + PartialEq> ArcEqIdent<T> for Arc<T> {
#[inline]
default fn eq(&self, other: &Arc<T>) -> bool {
**self == **other
}
#[inline]
default fn ne(&self, other: &Arc<T>) -> bool {
**self != **other
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized + Eq> ArcEqIdent<T> for Arc<T> {
#[inline]
fn eq(&self, other: &Arc<T>) -> bool {
Arc::ptr_eq(self, other) || **self == **other
}
#[inline]
fn ne(&self, other: &Arc<T>) -> bool {
!Arc::ptr_eq(self, other) && **self != **other
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized + PartialEq> PartialEq for Arc<T> {
/// Equality for two `Arc`s.
///
/// Two `Arc`s are equal if their inner values are equal.
///
/// If `T` also implements `Eq`, two `Arc`s that point to the same value are
/// always equal.
///
/// # Examples
///
/// ```
@ -1255,14 +1342,18 @@ impl<T: ?Sized + PartialEq> PartialEq for Arc<T> {
///
/// assert!(five == Arc::new(5));
/// ```
#[inline]
fn eq(&self, other: &Arc<T>) -> bool {
*(*self) == *(*other)
ArcEqIdent::eq(self, other)
}
/// Inequality for two `Arc`s.
///
/// Two `Arc`s are unequal if their inner values are unequal.
///
/// If `T` also implements `Eq`, two `Arc`s that point to the same value are
/// never unequal.
///
/// # Examples
///
/// ```
@ -1272,10 +1363,12 @@ impl<T: ?Sized + PartialEq> PartialEq for Arc<T> {
///
/// assert!(five != Arc::new(6));
/// ```
#[inline]
fn ne(&self, other: &Arc<T>) -> bool {
*(*self) != *(*other)
ArcEqIdent::ne(self, other)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized + PartialOrd> PartialOrd for Arc<T> {
/// Partial comparison for two `Arc`s.

View file

@ -10,6 +10,8 @@
use std::any::Any;
use std::sync::{Arc, Weak};
use std::cell::RefCell;
use std::cmp::PartialEq;
#[test]
fn uninhabited() {
@ -53,3 +55,43 @@ fn trait_object() {
b = b.clone();
assert!(b.upgrade().is_none());
}
#[test]
fn float_nan_ne() {
let x = Arc::new(std::f32::NAN);
assert!(x != x);
assert!(!(x == x));
}
#[test]
fn partial_eq() {
struct TestPEq (RefCell<usize>);
impl PartialEq for TestPEq {
fn eq(&self, other: &TestPEq) -> bool {
*self.0.borrow_mut() += 1;
*other.0.borrow_mut() += 1;
true
}
}
let x = Arc::new(TestPEq(RefCell::new(0)));
assert!(x == x);
assert!(!(x != x));
assert_eq!(*x.0.borrow(), 4);
}
#[test]
fn eq() {
#[derive(Eq)]
struct TestEq (RefCell<usize>);
impl PartialEq for TestEq {
fn eq(&self, other: &TestEq) -> bool {
*self.0.borrow_mut() += 1;
*other.0.borrow_mut() += 1;
true
}
}
let x = Arc::new(TestEq(RefCell::new(0)));
assert!(x == x);
assert!(!(x != x));
assert_eq!(*x.0.borrow(), 0);
}

View file

@ -14,7 +14,7 @@ use std::collections::binary_heap::{Drain, PeekMut};
use std::panic::{self, AssertUnwindSafe};
use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
use rand::{thread_rng, Rng};
use rand::{thread_rng, seq::SliceRandom};
#[test]
fn test_iterator() {
@ -318,11 +318,11 @@ fn panic_safe() {
const NTEST: usize = 10;
// don't use 0 in the data -- we want to catch the zeroed-out case.
let data = (1..DATASZ + 1).collect::<Vec<_>>();
let data = (1..=DATASZ).collect::<Vec<_>>();
// since it's a fuzzy test, run several tries.
for _ in 0..NTEST {
for i in 1..DATASZ + 1 {
for i in 1..=DATASZ {
DROP_COUNTER.store(0, Ordering::SeqCst);
let mut panic_ords: Vec<_> = data.iter()
@ -332,7 +332,7 @@ fn panic_safe() {
let panic_item = PanicOrd(i, true);
// heapify the sane items
rng.shuffle(&mut panic_ords);
panic_ords.shuffle(&mut rng);
let mut heap = BinaryHeap::from(panic_ords);
let inner_data;

View file

@ -302,7 +302,7 @@ fn test_range() {
for i in 0..size {
for j in i..size {
let mut kvs = map.range((Included(&i), Included(&j))).map(|(&k, &v)| (k, v));
let mut pairs = (i..j + 1).map(|i| (i, i));
let mut pairs = (i..=j).map(|i| (i, i));
for (kv, pair) in kvs.by_ref().zip(pairs.by_ref()) {
assert_eq!(kv, pair);
@ -321,7 +321,7 @@ fn test_range_mut() {
for i in 0..size {
for j in i..size {
let mut kvs = map.range_mut((Included(&i), Included(&j))).map(|(&k, &mut v)| (k, v));
let mut pairs = (i..j + 1).map(|i| (i, i));
let mut pairs = (i..=j).map(|i| (i, i));
for (kv, pair) in kvs.by_ref().zip(pairs.by_ref()) {
assert_eq!(kv, pair);

View file

@ -13,11 +13,12 @@
#![feature(drain_filter)]
#![feature(exact_size_is_empty)]
#![feature(pattern)]
#![feature(repeat_generic_slice)]
#![feature(slice_sort_by_cached_key)]
#![feature(str_escape)]
#![feature(try_reserve)]
#![feature(unboxed_closures)]
#![feature(repeat_generic_slice)]
#![feature(vecdeque_rotate)]
extern crate core;
extern crate rand;

View file

@ -10,6 +10,8 @@
use std::any::Any;
use std::rc::{Rc, Weak};
use std::cell::RefCell;
use std::cmp::PartialEq;
#[test]
fn uninhabited() {
@ -53,3 +55,43 @@ fn trait_object() {
b = b.clone();
assert!(b.upgrade().is_none());
}
#[test]
fn float_nan_ne() {
let x = Rc::new(std::f32::NAN);
assert!(x != x);
assert!(!(x == x));
}
#[test]
fn partial_eq() {
struct TestPEq (RefCell<usize>);
impl PartialEq for TestPEq {
fn eq(&self, other: &TestPEq) -> bool {
*self.0.borrow_mut() += 1;
*other.0.borrow_mut() += 1;
true
}
}
let x = Rc::new(TestPEq(RefCell::new(0)));
assert!(x == x);
assert!(!(x != x));
assert_eq!(*x.0.borrow(), 4);
}
#[test]
fn eq() {
#[derive(Eq)]
struct TestEq (RefCell<usize>);
impl PartialEq for TestEq {
fn eq(&self, other: &TestEq) -> bool {
*self.0.borrow_mut() += 1;
*other.0.borrow_mut() += 1;
true
}
}
let x = Rc::new(TestEq(RefCell::new(0)));
assert!(x == x);
assert!(!(x != x));
assert_eq!(*x.0.borrow(), 0);
}

View file

@ -18,7 +18,7 @@ use std::sync::atomic::Ordering::Relaxed;
use std::sync::atomic::{ATOMIC_USIZE_INIT, AtomicUsize};
use std::thread;
use rand::{Rng, RngCore, thread_rng};
use rand::{Rng, RngCore, thread_rng, seq::SliceRandom};
use rand::distributions::Standard;
fn square(n: usize) -> usize {
@ -459,7 +459,7 @@ fn test_sort() {
for i in 0..v.len() {
v[i] = i as i32;
}
v.sort_by(|_, _| *rng.choose(&[Less, Equal, Greater]).unwrap());
v.sort_by(|_, _| *[Less, Equal, Greater].choose(&mut rng).unwrap());
v.sort();
for i in 0..v.len() {
assert_eq!(v[i], i as i32);
@ -484,7 +484,7 @@ fn test_sort_stability() {
// create a vector like [(6, 1), (5, 1), (6, 2), ...],
// where the first item of each tuple is random, but
// the second item represents which occurrence of that
// number this element is, i.e. the second elements
// number this element is, i.e., the second elements
// will occur in sorted order.
let mut orig: Vec<_> = (0..len)
.map(|_| {
@ -502,7 +502,7 @@ fn test_sort_stability() {
// This comparison includes the count (the second item
// of the tuple), so elements with equal first items
// will need to be ordered with increasing
// counts... i.e. exactly asserting that this sort is
// counts... i.e., exactly asserting that this sort is
// stable.
assert!(v.windows(2).all(|w| w[0] <= w[1]));
@ -1579,7 +1579,7 @@ macro_rules! test {
}).join();
// Check that the number of things dropped is exactly
// what we expect (i.e. the contents of `v`).
// what we expect (i.e., the contents of `v`).
for (i, c) in DROP_COUNTS.iter().enumerate().take(len) {
let count = c.load(Relaxed);
assert!(count == 1,

View file

@ -1005,7 +1005,7 @@ fn test_escape_debug() {
// Note that there are subtleties with the number of backslashes
// on the left- and right-hand sides. In particular, Unicode code points
// are usually escaped with two backslashes on the right-hand side, as
// they are escaped. However, when the character is unescaped (e.g. for
// they are escaped. However, when the character is unescaped (e.g., for
// printable characters), only a single backslash appears (as the character
// itself appears in the debug string).
assert_eq!("abc".escape_debug(), "abc");
@ -1378,7 +1378,7 @@ fn test_bool_from_str() {
fn check_contains_all_substrings(s: &str) {
assert!(s.contains(""));
for i in 0..s.len() {
for j in i+1..s.len() + 1 {
for j in i+1..=s.len() {
assert!(s.contains(&s[i..j]));
}
}
@ -1514,9 +1514,9 @@ fn contains_weird_cases() {
#[test]
fn trim_ws() {
assert_eq!(" \t a \t ".trim_left_matches(|c: char| c.is_whitespace()),
assert_eq!(" \t a \t ".trim_start_matches(|c: char| c.is_whitespace()),
"a \t ");
assert_eq!(" \t a \t ".trim_right_matches(|c: char| c.is_whitespace()),
assert_eq!(" \t a \t ".trim_end_matches(|c: char| c.is_whitespace()),
" \t a");
assert_eq!(" \t a \t ".trim_start_matches(|c: char| c.is_whitespace()),
"a \t ");
@ -1524,9 +1524,9 @@ fn trim_ws() {
" \t a");
assert_eq!(" \t a \t ".trim_matches(|c: char| c.is_whitespace()),
"a");
assert_eq!(" \t \t ".trim_left_matches(|c: char| c.is_whitespace()),
assert_eq!(" \t \t ".trim_start_matches(|c: char| c.is_whitespace()),
"");
assert_eq!(" \t \t ".trim_right_matches(|c: char| c.is_whitespace()),
assert_eq!(" \t \t ".trim_end_matches(|c: char| c.is_whitespace()),
"");
assert_eq!(" \t \t ".trim_start_matches(|c: char| c.is_whitespace()),
"");

View file

@ -79,6 +79,11 @@ fn test_reserve() {
assert!(v.capacity() >= 33)
}
#[test]
fn test_zst_capacity() {
assert_eq!(Vec::<()>::new().capacity(), usize::max_value());
}
#[test]
fn test_extend() {
let mut v = Vec::new();

View file

@ -861,7 +861,7 @@ fn test_as_slices() {
ring.push_back(i);
let (left, right) = ring.as_slices();
let expected: Vec<_> = (0..i + 1).collect();
let expected: Vec<_> = (0..=i).collect();
assert_eq!(left, &expected[..]);
assert_eq!(right, []);
}
@ -869,7 +869,7 @@ fn test_as_slices() {
for j in -last..0 {
ring.push_front(j);
let (left, right) = ring.as_slices();
let expected_left: Vec<_> = (-last..j + 1).rev().collect();
let expected_left: Vec<_> = (-last..=j).rev().collect();
let expected_right: Vec<_> = (0..first).collect();
assert_eq!(left, &expected_left[..]);
assert_eq!(right, &expected_right[..]);
@ -889,7 +889,7 @@ fn test_as_mut_slices() {
ring.push_back(i);
let (left, right) = ring.as_mut_slices();
let expected: Vec<_> = (0..i + 1).collect();
let expected: Vec<_> = (0..=i).collect();
assert_eq!(left, &expected[..]);
assert_eq!(right, []);
}
@ -897,7 +897,7 @@ fn test_as_mut_slices() {
for j in -last..0 {
ring.push_front(j);
let (left, right) = ring.as_mut_slices();
let expected_left: Vec<_> = (-last..j + 1).rev().collect();
let expected_left: Vec<_> = (-last..=j).rev().collect();
let expected_right: Vec<_> = (0..first).collect();
assert_eq!(left, &expected_left[..]);
assert_eq!(right, &expected_right[..]);
@ -1309,3 +1309,137 @@ fn test_try_reserve_exact() {
}
}
#[test]
fn test_rotate_nop() {
let mut v: VecDeque<_> = (0..10).collect();
assert_unchanged(&v);
v.rotate_left(0);
assert_unchanged(&v);
v.rotate_left(10);
assert_unchanged(&v);
v.rotate_right(0);
assert_unchanged(&v);
v.rotate_right(10);
assert_unchanged(&v);
v.rotate_left(3);
v.rotate_right(3);
assert_unchanged(&v);
v.rotate_right(3);
v.rotate_left(3);
assert_unchanged(&v);
v.rotate_left(6);
v.rotate_right(6);
assert_unchanged(&v);
v.rotate_right(6);
v.rotate_left(6);
assert_unchanged(&v);
v.rotate_left(3);
v.rotate_left(7);
assert_unchanged(&v);
v.rotate_right(4);
v.rotate_right(6);
assert_unchanged(&v);
v.rotate_left(1);
v.rotate_left(2);
v.rotate_left(3);
v.rotate_left(4);
assert_unchanged(&v);
v.rotate_right(1);
v.rotate_right(2);
v.rotate_right(3);
v.rotate_right(4);
assert_unchanged(&v);
fn assert_unchanged(v: &VecDeque<i32>) {
assert_eq!(v, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
}
}
#[test]
fn test_rotate_left_parts() {
let mut v: VecDeque<_> = (1..=7).collect();
v.rotate_left(2);
assert_eq!(v.as_slices(), (&[3, 4, 5, 6, 7, 1][..], &[2][..]));
v.rotate_left(2);
assert_eq!(v.as_slices(), (&[5, 6, 7, 1][..], &[2, 3, 4][..]));
v.rotate_left(2);
assert_eq!(v.as_slices(), (&[7, 1][..], &[2, 3, 4, 5, 6][..]));
v.rotate_left(2);
assert_eq!(v.as_slices(), (&[2, 3, 4, 5, 6, 7, 1][..], &[][..]));
v.rotate_left(2);
assert_eq!(v.as_slices(), (&[4, 5, 6, 7, 1, 2][..], &[3][..]));
v.rotate_left(2);
assert_eq!(v.as_slices(), (&[6, 7, 1, 2][..], &[3, 4, 5][..]));
v.rotate_left(2);
assert_eq!(v.as_slices(), (&[1, 2][..], &[3, 4, 5, 6, 7][..]));
}
#[test]
fn test_rotate_right_parts() {
let mut v: VecDeque<_> = (1..=7).collect();
v.rotate_right(2);
assert_eq!(v.as_slices(), (&[6, 7][..], &[1, 2, 3, 4, 5][..]));
v.rotate_right(2);
assert_eq!(v.as_slices(), (&[4, 5, 6, 7][..], &[1, 2, 3][..]));
v.rotate_right(2);
assert_eq!(v.as_slices(), (&[2, 3, 4, 5, 6, 7][..], &[1][..]));
v.rotate_right(2);
assert_eq!(v.as_slices(), (&[7, 1, 2, 3, 4, 5, 6][..], &[][..]));
v.rotate_right(2);
assert_eq!(v.as_slices(), (&[5, 6][..], &[7, 1, 2, 3, 4][..]));
v.rotate_right(2);
assert_eq!(v.as_slices(), (&[3, 4, 5, 6][..], &[7, 1, 2][..]));
v.rotate_right(2);
assert_eq!(v.as_slices(), (&[1, 2, 3, 4, 5, 6][..], &[7][..]));
}
#[test]
fn test_rotate_left_random() {
let shifts = [
6, 1, 0, 11, 12, 1, 11, 7, 9, 3, 6, 1,
4, 0, 5, 1, 3, 1, 12, 8, 3, 1, 11, 11,
9, 4, 12, 3, 12, 9, 11, 1, 7, 9, 7, 2,
];
let n = 12;
let mut v: VecDeque<_> = (0..n).collect();
let mut total_shift = 0;
for shift in shifts.iter().cloned() {
v.rotate_left(shift);
total_shift += shift;
for i in 0..n {
assert_eq!(v[i], (i + total_shift) % n);
}
}
}
#[test]
fn test_rotate_right_random() {
let shifts = [
6, 1, 0, 11, 12, 1, 11, 7, 9, 3, 6, 1,
4, 0, 5, 1, 3, 1, 12, 8, 3, 1, 11, 11,
9, 4, 12, 3, 12, 9, 11, 1, 7, 9, 7, 2,
];
let n = 12;
let mut v: VecDeque<_> = (0..n).collect();
let mut total_shift = 0;
for shift in shifts.iter().cloned() {
v.rotate_right(shift);
total_shift += shift;
for i in 0..n {
assert_eq!(v[(i + total_shift) % n], i);
}
}
}

View file

@ -213,7 +213,7 @@ use raw_vec::RawVec;
/// about its design. This ensures that it's as low-overhead as possible in
/// the general case, and can be correctly manipulated in primitive ways
/// by unsafe code. Note that these guarantees refer to an unqualified `Vec<T>`.
/// If additional type parameters are added (e.g. to support custom allocators),
/// If additional type parameters are added (e.g., to support custom allocators),
/// overriding their defaults may change the behavior.
///
/// Most fundamentally, `Vec` is and always will be a (pointer, capacity, length)
@ -1241,8 +1241,6 @@ impl<T> Vec<T> {
/// # Examples
///
/// ```
/// #![feature(vec_resize_with)]
///
/// let mut vec = vec![1, 2, 3];
/// vec.resize_with(5, Default::default);
/// assert_eq!(vec, [1, 2, 3, 0, 0]);
@ -1255,7 +1253,7 @@ impl<T> Vec<T> {
///
/// [`resize`]: #method.resize
/// [`Clone`]: ../../std/clone/trait.Clone.html
#[unstable(feature = "vec_resize_with", issue = "41758")]
#[stable(feature = "vec_resize_with", since = "1.33.0")]
pub fn resize_with<F>(&mut self, new_len: usize, f: F)
where F: FnMut() -> T
{

@ -1 +0,0 @@
Subproject commit fe74674f6e4be76d47b66f67d529ebf4186f4eb1

View file

@ -20,7 +20,7 @@ name = "corebenches"
path = "../libcore/benches/lib.rs"
[dev-dependencies]
rand = "0.5"
rand = "0.6"
[features]
# Make panics and failed asserts immediately abort without formatting any message

View file

@ -69,7 +69,7 @@ impl Layout {
/// * `align` must be a power of two,
///
/// * `size`, when rounded up to the nearest multiple of `align`,
/// must not overflow (i.e. the rounded value must be less than
/// must not overflow (i.e., the rounded value must be less than
/// `usize::MAX`).
#[stable(feature = "alloc_layout", since = "1.28.0")]
#[inline]
@ -177,7 +177,7 @@ impl Layout {
/// to ensure that the following address will satisfy `align`
/// (measured in bytes).
///
/// E.g. if `self.size()` is 9, then `self.padding_needed_for(4)`
/// e.g., if `self.size()` is 9, then `self.padding_needed_for(4)`
/// returns 3, because that is the minimum number of bytes of
/// padding required to get a 4-aligned address (assuming that the
/// corresponding memory block starts at a 4-aligned address).
@ -455,7 +455,7 @@ pub unsafe trait GlobalAlloc {
/// if the caller does not ensure that `layout` has non-zero size.
///
/// (Extension subtraits might provide more specific bounds on
/// behavior, e.g. guarantee a sentinel address or a null pointer
/// behavior, e.g., guarantee a sentinel address or a null pointer
/// in response to a zero-size allocation request.)
///
/// The allocated block of memory may or may not be initialized.
@ -550,10 +550,10 @@ pub unsafe trait GlobalAlloc {
/// * `new_size` must be greater than zero.
///
/// * `new_size`, when rounded up to the nearest multiple of `layout.align()`,
/// must not overflow (i.e. the rounded value must be less than `usize::MAX`).
/// must not overflow (i.e., the rounded value must be less than `usize::MAX`).
///
/// (Extension subtraits might provide more specific bounds on
/// behavior, e.g. guarantee a sentinel address or a null pointer
/// behavior, e.g., guarantee a sentinel address or a null pointer
/// in response to a zero-size allocation request.)
///
/// # Errors
@ -616,7 +616,7 @@ pub unsafe trait GlobalAlloc {
/// whether to return `Err`, or to return `Ok` with some pointer.
///
/// * If an `Alloc` implementation chooses to return `Ok` in this
/// case (i.e. the pointer denotes a zero-sized inaccessible block)
/// case (i.e., the pointer denotes a zero-sized inaccessible block)
/// then that returned pointer must be considered "currently
/// allocated". On such an allocator, *all* methods that take
/// currently-allocated pointers as inputs must accept these
@ -651,7 +651,7 @@ pub unsafe trait GlobalAlloc {
///
/// * if a layout `k` fits a memory block (denoted by `ptr`)
/// currently allocated via an allocator `a`, then it is legal to
/// use that layout to deallocate it, i.e. `a.dealloc(ptr, k);`.
/// use that layout to deallocate it, i.e., `a.dealloc(ptr, k);`.
///
/// # Unsafety
///
@ -673,7 +673,7 @@ pub unsafe trait Alloc {
// (Note: some existing allocators have unspecified but well-defined
// behavior in response to a zero size allocation request ;
// e.g. in C, `malloc` of 0 will either return a null pointer or a
// e.g., in C, `malloc` of 0 will either return a null pointer or a
// unique pointer, but will not have arbitrary undefined
// behavior.
// However in jemalloc for example,
@ -688,7 +688,7 @@ pub unsafe trait Alloc {
///
/// The returned block of storage may or may not have its contents
/// initialized. (Extension subtraits might restrict this
/// behavior, e.g. to ensure initialization to particular sets of
/// behavior, e.g., to ensure initialization to particular sets of
/// bit patterns.)
///
/// # Safety
@ -697,7 +697,7 @@ pub unsafe trait Alloc {
/// if the caller does not ensure that `layout` has non-zero size.
///
/// (Extension subtraits might provide more specific bounds on
/// behavior, e.g. guarantee a sentinel address or a null pointer
/// behavior, e.g., guarantee a sentinel address or a null pointer
/// in response to a zero-size allocation request.)
///
/// # Errors
@ -803,10 +803,10 @@ pub unsafe trait Alloc {
/// * `new_size` must be greater than zero.
///
/// * `new_size`, when rounded up to the nearest multiple of `layout.align()`,
/// must not overflow (i.e. the rounded value must be less than `usize::MAX`).
/// must not overflow (i.e., the rounded value must be less than `usize::MAX`).
///
/// (Extension subtraits might provide more specific bounds on
/// behavior, e.g. guarantee a sentinel address or a null pointer
/// behavior, e.g., guarantee a sentinel address or a null pointer
/// in response to a zero-size allocation request.)
///
/// # Errors

View file

@ -126,7 +126,7 @@ impl fmt::Debug for dyn Any {
}
}
// Ensure that the result of e.g. joining a thread can be printed and
// Ensure that the result of e.g., joining a thread can be printed and
// hence used with `unwrap`. May eventually no longer be needed if
// dispatch works with upcasting.
#[stable(feature = "rust1", since = "1.0.0")]

View file

@ -282,6 +282,12 @@ bench_sums! {
(0i64..1000000).chain(1000000..).take_while(|&x| x < 1111111)
}
bench_sums! {
bench_cycle_take_sum,
bench_cycle_take_ref_sum,
(0i64..10000).cycle().take(1000000)
}
// Checks whether Skip<Zip<A,B>> is as fast as Zip<Skip<A>, Skip<B>>, from
// https://users.rust-lang.org/t/performance-difference-between-iterator-zip-and-skip-order/15743
#[bench]

View file

@ -97,7 +97,7 @@
//! ## Implementation details of logically-immutable methods
//!
//! Occasionally it may be desirable not to expose in an API that there is mutation happening
//! "under the hood". This may be because logically the operation is immutable, but e.g. caching
//! "under the hood". This may be because logically the operation is immutable, but e.g., caching
//! forces the implementation to perform mutation; or because you must employ mutation to implement
//! a trait method that was originally defined to take `&self`.
//!
@ -1227,7 +1227,7 @@ impl<T: ?Sized + fmt::Display> fmt::Display for Ref<'_, T> {
}
impl<'b, T: ?Sized> RefMut<'b, T> {
/// Make a new `RefMut` for a component of the borrowed data, e.g. an enum
/// Make a new `RefMut` for a component of the borrowed data, e.g., an enum
/// variant.
///
/// The `RefCell` is already mutably borrowed, so this cannot fail.

View file

@ -131,7 +131,7 @@ pub struct EscapeUnicode {
state: EscapeUnicodeState,
// The index of the next hex digit to be printed (0 if none),
// i.e. the number of remaining hex digits to be printed;
// i.e., the number of remaining hex digits to be printed;
// increasing from the least significant digit: 0x543210
hex_digit_idx: usize,
}

View file

@ -13,7 +13,7 @@
//! In Rust, some simple types are "implicitly copyable" and when you
//! assign them or pass them as arguments, the receiver will get a copy,
//! leaving the original value in place. These types do not require
//! allocation to copy and do not have finalizers (i.e. they do not
//! allocation to copy and do not have finalizers (i.e., they do not
//! contain owned boxes or implement [`Drop`]), so the compiler considers
//! them cheap and safe to copy. For other types copies must be made
//! explicitly, by convention implementing the [`Clone`] trait and calling
@ -93,10 +93,10 @@
/// In addition to the [implementors listed below][impls],
/// the following types also implement `Clone`:
///
/// * Function item types (i.e. the distinct types defined for each function)
/// * Function pointer types (e.g. `fn() -> i32`)
/// * Array types, for all sizes, if the item type also implements `Clone` (e.g. `[i32; 123456]`)
/// * Tuple types, if each component also implements `Clone` (e.g. `()`, `(i32, bool)`)
/// * Function item types (i.e., the distinct types defined for each function)
/// * Function pointer types (e.g., `fn() -> i32`)
/// * Array types, for all sizes, if the item type also implements `Clone` (e.g., `[i32; 123456]`)
/// * Tuple types, if each component also implements `Clone` (e.g., `()`, `(i32, bool)`)
/// * Closure types, if they capture no value from the environment
/// or if all such captured values implement `Clone` themselves.
/// Note that variables captured by shared reference always implement `Clone`

View file

@ -327,7 +327,8 @@ pub trait Into<T>: Sized {
/// An example usage for error handling:
///
/// ```
/// use std::io::{self, Read};
/// use std::fs;
/// use std::io;
/// use std::num;
///
/// enum CliError {
@ -348,9 +349,7 @@ pub trait Into<T>: Sized {
/// }
///
/// fn open_and_parse_file(file_name: &str) -> Result<i32, CliError> {
/// let mut file = std::fs::File::open("test")?;
/// let mut contents = String::new();
/// file.read_to_string(&mut contents)?;
/// let mut contents = fs::read_to_string(&file_name)?;
/// let num: i32 = contents.trim().parse()?;
/// Ok(num)
/// }

View file

@ -1,7 +1,6 @@
#![stable(feature = "", since = "1.30.0")]
#![allow(non_camel_case_types)]
#![cfg_attr(stage0, allow(dead_code))]
//! Utilities related to FFI bindings.
@ -18,7 +17,7 @@ use ::fmt;
///
/// [`!`]: ../../std/primitive.never.html
/// [pointer]: ../../std/primitive.pointer.html
// NB: For LLVM to recognize the void pointer type and by extension
// N.B., for LLVM to recognize the void pointer type and by extension
// functions like malloc(), we need to have it represented as i8* in
// LLVM bitcode. The enum used here ensures this and prevents misuse
// of the "raw" type by only having private variants.. We need two
@ -45,6 +44,7 @@ impl fmt::Debug for c_void {
/// Basic implementation of a `va_list`.
#[cfg(any(all(not(target_arch = "aarch64"), not(target_arch = "powerpc"),
not(target_arch = "x86_64")),
all(target_arch = "aarch4", target_os = "ios"),
windows))]
#[unstable(feature = "c_variadic",
reason = "the `c_variadic` feature has not been properly tested on \
@ -122,7 +122,6 @@ struct VaListImpl {
all supported platforms",
issue = "27745")]
#[repr(transparent)]
#[cfg(not(stage0))]
pub struct VaList<'a>(&'a mut VaListImpl);
// The VaArgSafe trait needs to be used in public interfaces, however, the trait
@ -172,7 +171,6 @@ impl<T> sealed_trait::VaArgSafe for *mut T {}
issue = "27745")]
impl<T> sealed_trait::VaArgSafe for *const T {}
#[cfg(not(stage0))]
impl<'a> VaList<'a> {
/// Advance to the next arg.
#[unstable(feature = "c_variadic",
@ -192,6 +190,7 @@ impl<'a> VaList<'a> {
where F: for<'copy> FnOnce(VaList<'copy>) -> R {
#[cfg(any(all(not(target_arch = "aarch64"), not(target_arch = "powerpc"),
not(target_arch = "x86_64")),
all(target_arch = "aarch4", target_os = "ios"),
windows))]
let mut ap = va_copy(self);
#[cfg(all(any(target_arch = "aarch64", target_arch = "powerpc", target_arch = "x86_64"),
@ -206,7 +205,6 @@ impl<'a> VaList<'a> {
}
}
#[cfg(not(stage0))]
extern "rust-intrinsic" {
/// Destroy the arglist `ap` after initialization with `va_start` or
/// `va_copy`.

View file

@ -609,10 +609,15 @@ pub trait Debug {
/// println!("The origin is: {}", origin);
/// ```
#[rustc_on_unimplemented(
on(
_Self="std::path::Path",
label="`{Self}` cannot be formatted with the default formatter; call `.display()` on it",
note="call `.display()` or `.to_string_lossy()` to safely print paths, \
as they may contain non-Unicode data"
),
message="`{Self}` doesn't implement `{Display}`",
label="`{Self}` cannot be formatted with the default formatter",
note="in format strings you may be able to use `{{:?}}` \
(or {{:#?}} for pretty-print) instead",
note="in format strings you may be able to use `{{:?}}` (or {{:#?}} for pretty-print) instead",
)]
#[doc(alias = "{}")]
#[stable(feature = "rust1", since = "1.0.0")]
@ -1381,7 +1386,7 @@ impl<'a> Formatter<'a> {
for part in formatted.parts {
match *part {
flt2dec::Part::Zero(mut nzeroes) => {
const ZEROES: &'static str = // 64 zeroes
const ZEROES: &str = // 64 zeroes
"0000000000000000000000000000000000000000000000000000000000000000";
while nzeroes > ZEROES.len() {
self.buf.write_str(ZEROES)?;

View file

@ -33,6 +33,7 @@ use task::{Poll, LocalWaker};
///
/// When using a future, you generally won't call `poll` directly, but instead
/// `await!` the value.
#[must_use]
pub trait Future {
/// The result of the `Future`.
type Output;

View file

@ -408,7 +408,7 @@ impl<H: Hasher + ?Sized> Hasher for &mut H {
/// A trait for creating instances of [`Hasher`].
///
/// A `BuildHasher` is typically used (e.g. by [`HashMap`]) to create
/// A `BuildHasher` is typically used (e.g., by [`HashMap`]) to create
/// [`Hasher`]s for each key such that they are hashed independently of one
/// another, since [`Hasher`]s contain state.
///

View file

@ -24,7 +24,7 @@ use intrinsics;
/// therefore will eliminate all branches that reach to a call to
/// `unreachable_unchecked()`.
///
/// Like all instances of UB, if this assumption turns out to be wrong, i.e. the
/// Like all instances of UB, if this assumption turns out to be wrong, i.e., the
/// `unreachable_unchecked()` call is actually reachable among all possible
/// control flow, the compiler will apply the wrong optimization strategy, and
/// may sometimes even corrupt seemingly unrelated code, causing

View file

@ -52,7 +52,7 @@
pub use ptr::drop_in_place;
extern "rust-intrinsic" {
// NB: These intrinsics take raw pointers because they mutate aliased
// N.B., these intrinsics take raw pointers because they mutate aliased
// memory, which is not valid for either `&` or `&mut`.
/// Stores a value if the current value is the same as the `old` value.
@ -635,7 +635,7 @@ extern "rust-intrinsic" {
/// Tells LLVM that this point in the code is not reachable, enabling
/// further optimizations.
///
/// NB: This is very different from the `unreachable!()` macro: Unlike the
/// N.B., this is very different from the `unreachable!()` macro: Unlike the
/// macro, which panics when it is executed, it is *undefined behavior* to
/// reach code marked with this function.
///
@ -718,7 +718,6 @@ extern "rust-intrinsic" {
pub fn uninit<T>() -> T;
/// Moves a value out of scope without running drop glue.
#[cfg(not(stage0))]
pub fn forget<T: ?Sized>(_: T);
/// Reinterprets the bits of a value of one type as another type.
@ -1476,14 +1475,12 @@ extern "rust-intrinsic" {
/// The stabilized versions of this intrinsic are available on the integer
/// primitives via the `rotate_left` method. For example,
/// [`std::u32::rotate_left`](../../std/primitive.u32.html#method.rotate_left)
#[cfg(not(stage0))]
pub fn rotate_left<T>(x: T, y: T) -> T;
/// Performs rotate right.
/// The stabilized versions of this intrinsic are available on the integer
/// primitives via the `rotate_right` method. For example,
/// [`std::u32::rotate_right`](../../std/primitive.u32.html#method.rotate_right)
#[cfg(not(stage0))]
pub fn rotate_right<T>(x: T, y: T) -> T;
/// Returns (a + b) mod 2<sup>N</sup>, where N is the width of T in bits.

View file

@ -87,7 +87,7 @@ fn _assert_is_object_safe(_: &dyn Iterator<Item=()>) {}
on(
_Self="[]",
label="borrow the array with `&` or call `.iter()` on it to iterate over it",
note="arrays are not an iterators, but slices like the following are: `&[1, 2, 3]`"
note="arrays are not iterators, but slices like the following are: `&[1, 2, 3]`"
),
on(
_Self="{integral}",
@ -98,6 +98,7 @@ fn _assert_is_object_safe(_: &dyn Iterator<Item=()>) {}
message="`{Self}` is not an iterator"
)]
#[doc(spotlight)]
#[must_use]
pub trait Iterator {
/// The type of the elements being iterated over.
#[stable(feature = "rust1", since = "1.0.0")]
@ -154,7 +155,7 @@ pub trait Iterator {
///
/// `size_hint()` is primarily intended to be used for optimizations such as
/// reserving space for the elements of the iterator, but must not be
/// trusted to e.g. omit bounds checks in unsafe code. An incorrect
/// trusted to e.g., omit bounds checks in unsafe code. An incorrect
/// implementation of `size_hint()` should not lead to memory safety
/// violations.
///

View file

@ -429,6 +429,9 @@ impl<I> Iterator for Rev<I> where I: DoubleEndedIterator {
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) { self.iter.size_hint() }
#[inline]
fn nth(&mut self, n: usize) -> Option<<I as Iterator>::Item> { self.iter.nth_back(n) }
fn try_fold<B, F, R>(&mut self, init: B, f: F) -> R where
Self: Sized, F: FnMut(B, Self::Item) -> R, R: Try<Ok=B>
{
@ -461,6 +464,9 @@ impl<I> DoubleEndedIterator for Rev<I> where I: DoubleEndedIterator {
#[inline]
fn next_back(&mut self) -> Option<<I as Iterator>::Item> { self.iter.next() }
#[inline]
fn nth_back(&mut self, n: usize) -> Option<<I as Iterator>::Item> { self.iter.nth(n) }
fn try_rfold<B, F, R>(&mut self, init: B, f: F) -> R where
Self: Sized, F: FnMut(B, Self::Item) -> R, R: Try<Ok=B>
{
@ -702,7 +708,9 @@ unsafe impl<'a, I, T: 'a> TrustedRandomAccess for Cloned<I>
}
#[inline]
fn may_have_side_effect() -> bool { false }
fn may_have_side_effect() -> bool {
I::may_have_side_effect()
}
}
#[unstable(feature = "trusted_len", issue = "37572")]
@ -1953,18 +1961,11 @@ impl<I: Iterator> Iterator for Peekable<I> {
#[inline]
fn nth(&mut self, n: usize) -> Option<I::Item> {
// FIXME(#43234): merge these when borrow-checking gets better.
if n == 0 {
match self.peeked.take() {
Some(v) => v,
None => self.iter.nth(n),
}
} else {
match self.peeked.take() {
Some(None) => None,
Some(Some(_)) => self.iter.nth(n - 1),
None => self.iter.nth(n),
}
match self.peeked.take() {
Some(None) => None,
Some(v @ Some(_)) if n == 0 => v,
Some(Some(_)) => self.iter.nth(n - 1),
None => self.iter.nth(n),
}
}
@ -2063,14 +2064,8 @@ impl<I: Iterator> Peekable<I> {
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn peek(&mut self) -> Option<&I::Item> {
if self.peeked.is_none() {
self.peeked = Some(self.iter.next());
}
match self.peeked {
Some(Some(ref value)) => Some(value),
Some(None) => None,
_ => unreachable!(),
}
let iter = &mut self.iter;
self.peeked.get_or_insert_with(|| iter.next()).as_ref()
}
}
@ -2207,8 +2202,12 @@ impl<I: Iterator, P> Iterator for TakeWhile<I, P>
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let (_, upper) = self.iter.size_hint();
(0, upper) // can't know a lower bound, due to the predicate
if self.flag {
(0, Some(0))
} else {
let (_, upper) = self.iter.size_hint();
(0, upper) // can't know a lower bound, due to the predicate
}
}
#[inline]
@ -2419,6 +2418,10 @@ impl<I> Iterator for Take<I> where I: Iterator{
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
if self.n == 0 {
return (0, Some(0));
}
let (lower, upper) = self.iter.size_hint();
let lower = cmp::min(lower, self.n);

View file

@ -427,6 +427,62 @@ pub trait DoubleEndedIterator: Iterator {
#[stable(feature = "rust1", since = "1.0.0")]
fn next_back(&mut self) -> Option<Self::Item>;
/// Returns the `n`th element from the end of the iterator.
///
/// This is essentially the reversed version of [`nth`]. Although like most indexing
/// operations, the count starts from zero, so `nth_back(0)` returns the first value fro
/// the end, `nth_back(1)` the second, and so on.
///
/// Note that all elements between the end and the returned element will be
/// consumed, including the returned element. This also means that calling
/// `nth_back(0)` multiple times on the same iterator will return different
/// elements.
///
/// `nth_back()` will return [`None`] if `n` is greater than or equal to the length of the
/// iterator.
///
/// [`None`]: ../../std/option/enum.Option.html#variant.None
/// [`nth`]: ../../std/iter/trait.Iterator.html#method.nth
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// #![feature(iter_nth_back)]
/// let a = [1, 2, 3];
/// assert_eq!(a.iter().nth_back(2), Some(&1));
/// ```
///
/// Calling `nth_back()` multiple times doesn't rewind the iterator:
///
/// ```
/// #![feature(iter_nth_back)]
/// let a = [1, 2, 3];
///
/// let mut iter = a.iter();
///
/// assert_eq!(iter.nth_back(1), Some(&2));
/// assert_eq!(iter.nth_back(1), None);
/// ```
///
/// Returning `None` if there are less than `n + 1` elements:
///
/// ```
/// #![feature(iter_nth_back)]
/// let a = [1, 2, 3];
/// assert_eq!(a.iter().nth_back(10), None);
/// ```
#[inline]
#[unstable(feature = "iter_nth_back", issue = "56995")]
fn nth_back(&mut self, mut n: usize) -> Option<Self::Item> {
for x in self.rev() {
if n == 0 { return Some(x) }
n -= 1;
}
None
}
/// This is the reverse version of [`try_fold()`]: it takes elements
/// starting from the back of the iterator.
///
@ -461,8 +517,11 @@ pub trait DoubleEndedIterator: Iterator {
/// ```
#[inline]
#[stable(feature = "iterator_try_fold", since = "1.27.0")]
fn try_rfold<B, F, R>(&mut self, init: B, mut f: F) -> R where
Self: Sized, F: FnMut(B, Self::Item) -> R, R: Try<Ok=B>
fn try_rfold<B, F, R>(&mut self, init: B, mut f: F) -> R
where
Self: Sized,
F: FnMut(B, Self::Item) -> R,
R: Try<Ok=B>
{
let mut accum = init;
while let Some(x) = self.next_back() {
@ -524,8 +583,10 @@ pub trait DoubleEndedIterator: Iterator {
/// ```
#[inline]
#[stable(feature = "iter_rfold", since = "1.27.0")]
fn rfold<B, F>(mut self, accum: B, mut f: F) -> B where
Self: Sized, F: FnMut(B, Self::Item) -> B,
fn rfold<B, F>(mut self, accum: B, mut f: F) -> B
where
Self: Sized,
F: FnMut(B, Self::Item) -> B,
{
self.try_rfold(accum, move |acc, x| Ok::<B, !>(f(acc, x))).unwrap()
}
@ -574,7 +635,8 @@ pub trait DoubleEndedIterator: Iterator {
/// ```
#[inline]
#[stable(feature = "iter_rfind", since = "1.27.0")]
fn rfind<P>(&mut self, mut predicate: P) -> Option<Self::Item> where
fn rfind<P>(&mut self, mut predicate: P) -> Option<Self::Item>
where
Self: Sized,
P: FnMut(&Self::Item) -> bool
{
@ -587,7 +649,12 @@ pub trait DoubleEndedIterator: Iterator {
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, I: DoubleEndedIterator + ?Sized> DoubleEndedIterator for &'a mut I {
fn next_back(&mut self) -> Option<I::Item> { (**self).next_back() }
fn next_back(&mut self) -> Option<I::Item> {
(**self).next_back()
}
fn nth_back(&mut self, n: usize) -> Option<I::Item> {
(**self).nth_back(n)
}
}
/// An iterator that knows its exact length.
@ -770,7 +837,7 @@ pub trait Product<A = Self>: Sized {
fn product<I: Iterator<Item=A>>(iter: I) -> Self;
}
// NB: explicitly use Add and Mul here to inherit overflow checks
// N.B., explicitly use Add and Mul here to inherit overflow checks
macro_rules! integer_sum_product {
(@impls $zero:expr, $one:expr, #[$attr:meta], $($a:ty)*) => ($(
#[$attr]

View file

@ -71,6 +71,7 @@
#![no_core]
#![deny(missing_docs)]
#![deny(intra_doc_link_resolution_failure)]
#![deny(missing_debug_implementations)]
#![feature(allow_internal_unstable)]
@ -92,6 +93,7 @@
#![feature(link_llvm_intrinsics)]
#![feature(never_type)]
#![feature(nll)]
#![feature(bind_by_move_pattern_guards)]
#![feature(exhaustive_patterns)]
#![feature(no_core)]
#![feature(on_unimplemented)]
@ -120,7 +122,6 @@
#![feature(const_slice_len)]
#![feature(const_str_as_bytes)]
#![feature(const_str_len)]
#![feature(const_let)]
#![feature(const_int_rotate)]
#![feature(const_int_wrapping)]
#![feature(const_int_sign)]
@ -250,9 +251,7 @@ macro_rules! vector_impl { ($([$f:ident, $($args:tt)*]),*) => { $($f!($($args)*)
#[path = "../stdsimd/coresimd/mod.rs"]
#[allow(missing_docs, missing_debug_implementations, dead_code, unused_imports)]
#[unstable(feature = "stdsimd", issue = "48556")]
#[cfg(not(stage0))] // allow changes to how stdsimd works in stage0
mod coresimd;
#[stable(feature = "simd_arch", since = "1.27.0")]
#[cfg(not(stage0))]
pub use coresimd::arch;

View file

@ -238,6 +238,10 @@ macro_rules! debug_assert_ne {
/// with converting downstream errors.
///
/// The `?` operator was added to replace `try!` and should be used instead.
/// Furthermore, `try` is a reserved word in Rust 2018, so if you must use
/// it, you will need to use the [raw-identifier syntax][ris]: `r#try`.
///
/// [ris]: https://doc.rust-lang.org/nightly/rust-by-example/compatibility/raw_identifiers.html
///
/// `try!` matches the given [`Result`]. In case of the `Ok` variant, the
/// expression has the value of the wrapped value.

View file

@ -274,10 +274,10 @@ pub trait Unsize<T: ?Sized> {
/// In addition to the [implementors listed below][impls],
/// the following types also implement `Copy`:
///
/// * Function item types (i.e. the distinct types defined for each function)
/// * Function pointer types (e.g. `fn() -> i32`)
/// * Array types, for all sizes, if the item type also implements `Copy` (e.g. `[i32; 123456]`)
/// * Tuple types, if each component also implements `Copy` (e.g. `()`, `(i32, bool)`)
/// * Function item types (i.e., the distinct types defined for each function)
/// * Function pointer types (e.g., `fn() -> i32`)
/// * Array types, for all sizes, if the item type also implements `Copy` (e.g., `[i32; 123456]`)
/// * Tuple types, if each component also implements `Copy` (e.g., `()`, `(i32, bool)`)
/// * Closure types, if they capture no value from the environment
/// or if all such captured values implement `Copy` themselves.
/// Note that variables captured by shared reference always implement `Copy`
@ -596,7 +596,7 @@ mod impls {
/// This affects, for example, whether a `static` of that type is
/// placed in read-only static memory or writable static memory.
#[lang = "freeze"]
unsafe auto trait Freeze {}
pub(crate) unsafe auto trait Freeze {}
impl<T: ?Sized> !Freeze for UnsafeCell<T> {}
unsafe impl<T: ?Sized> Freeze for PhantomData<T> {}
@ -640,15 +640,15 @@ unsafe impl<T: ?Sized> Freeze for &mut T {}
#[unstable(feature = "pin", issue = "49150")]
pub auto trait Unpin {}
/// A type which does not implement `Unpin`.
/// A marker type which does not implement `Unpin`.
///
/// If a type contains a `Pinned`, it will not implement `Unpin` by default.
/// If a type contains a `PhantomPinned`, it will not implement `Unpin` by default.
#[unstable(feature = "pin", issue = "49150")]
#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
pub struct Pinned;
pub struct PhantomPinned;
#[unstable(feature = "pin", issue = "49150")]
impl !Unpin for Pinned {}
impl !Unpin for PhantomPinned {}
#[unstable(feature = "pin", issue = "49150")]
impl<'a, T: ?Sized + 'a> Unpin for &'a T {}

View file

@ -149,7 +149,6 @@ pub fn forget<T>(t: T) {
///
/// [`forget`]: fn.forget.html
#[inline]
#[cfg(not(stage0))]
#[unstable(feature = "forget_unsized", issue = "0")]
pub fn forget_unsized<T: ?Sized>(t: T) {
unsafe { intrinsics::forget(t) }
@ -305,7 +304,7 @@ pub const fn size_of<T>() -> usize {
/// Returns the size of the pointed-to value in bytes.
///
/// This is usually the same as `size_of::<T>()`. However, when `T` *has* no
/// statically known size, e.g. a slice [`[T]`][slice] or a [trait object],
/// statically known size, e.g., a slice [`[T]`][slice] or a [trait object],
/// then `size_of_val` can be used to get the dynamically-known size.
///
/// [slice]: ../../std/primitive.slice.html
@ -531,6 +530,12 @@ pub unsafe fn zeroed<T>() -> T {
/// it goes out of scope (and therefore would be dropped). Note that this
/// includes a `panic` occurring and unwinding the stack suddenly.
///
/// If you partially initialize an array, you may need to use
/// [`ptr::drop_in_place`][drop_in_place] to remove the elements you have fully
/// initialized followed by [`mem::forget`][mem_forget] to prevent drop running
/// on the array. If a partially allocated array is dropped this will lead to
/// undefined behaviour.
///
/// # Examples
///
/// Here's how to safely initialize an array of [`Vec`]s.
@ -584,11 +589,44 @@ pub unsafe fn zeroed<T>() -> T {
/// println!("{:?}", &data[0]);
/// ```
///
/// This example shows how to handle partially initialized arrays, which could
/// be found in low-level datastructures.
///
/// ```
/// use std::mem;
/// use std::ptr;
///
/// // Count the number of elements we have assigned.
/// let mut data_len: usize = 0;
/// let mut data: [String; 1000];
///
/// unsafe {
/// data = mem::uninitialized();
///
/// for elem in &mut data[0..500] {
/// ptr::write(elem, String::from("hello"));
/// data_len += 1;
/// }
///
/// // For each item in the array, drop if we allocated it.
/// for i in &mut data[0..data_len] {
/// ptr::drop_in_place(i);
/// }
/// }
/// // Forget the data. If this is allowed to drop, you may see a crash such as:
/// // 'mem_uninit_test(2457,0x7fffb55dd380) malloc: *** error for object
/// // 0x7ff3b8402920: pointer being freed was not allocated'
/// mem::forget(data);
/// ```
///
/// [`Vec`]: ../../std/vec/struct.Vec.html
/// [`vec!`]: ../../std/macro.vec.html
/// [`Clone`]: ../../std/clone/trait.Clone.html
/// [ub]: ../../reference/behavior-considered-undefined.html
/// [write]: ../ptr/fn.write.html
/// [drop_in_place]: ../ptr/fn.drop_in_place.html
/// [mem_zeroed]: fn.zeroed.html
/// [mem_forget]: fn.forget.html
/// [copy]: ../intrinsics/fn.copy.html
/// [copy_no]: ../intrinsics/fn.copy_nonoverlapping.html
/// [`Drop`]: ../ops/trait.Drop.html
@ -985,6 +1023,9 @@ impl<T> ManuallyDrop<T> {
///
/// This function semantically moves out the contained value without preventing further usage.
/// It is up to the user of this method to ensure that this container is not used again.
///
/// [`ManuallyDrop::drop`]: #method.drop
/// [`ManuallyDrop::into_inner`]: #method.into_inner
#[must_use = "if you don't need the value, you can use `ManuallyDrop::drop` instead"]
#[unstable(feature = "manually_drop_take", issue = "55422")]
#[inline]
@ -1119,7 +1160,7 @@ impl<T> MaybeUninit<T> {
/// It is up to the caller to guarantee that the `MaybeUninit` really is in an initialized
/// state, otherwise this will immediately cause undefined behavior.
// FIXME(#53491): We currently rely on the above being incorrect, i.e., we have references
// to uninitialized data (e.g. in `libcore/fmt/float.rs`). We should make
// to uninitialized data (e.g., in `libcore/fmt/float.rs`). We should make
// a final decision about the rules before stabilization.
#[unstable(feature = "maybe_uninit", issue = "53491")]
#[inline(always)]

View file

@ -11,14 +11,23 @@
//! Exposes the NonZero lang item which provides optimization hints.
use ops::{CoerceUnsized, DispatchFromDyn};
use marker::Freeze;
/// A wrapper type for raw pointers and integers that will never be
/// NULL or 0 that might allow certain optimizations.
#[rustc_layout_scalar_valid_range_start(1)]
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
#[derive(Copy, Eq, PartialEq, Ord, PartialOrd, Hash)]
#[repr(transparent)]
pub(crate) struct NonZero<T>(pub(crate) T);
pub(crate) struct NonZero<T: Freeze>(pub(crate) T);
impl<T: CoerceUnsized<U>, U> CoerceUnsized<NonZero<U>> for NonZero<T> {}
// Do not call `T::clone` as theoretically it could turn the field into `0`
// invalidating `NonZero`'s invariant.
impl<T: Copy + Freeze> Clone for NonZero<T> {
fn clone(&self) -> Self {
unsafe { NonZero(self.0) }
}
}
impl<T: DispatchFromDyn<U>, U> DispatchFromDyn<NonZero<U>> for NonZero<T> {}
impl<T: CoerceUnsized<U> + Freeze, U: Freeze> CoerceUnsized<NonZero<U>> for NonZero<T> {}
impl<T: DispatchFromDyn<U> + Freeze, U: Freeze> DispatchFromDyn<NonZero<U>> for NonZero<T> {}

View file

@ -183,7 +183,7 @@ macro_rules! define_bignum {
let nonzero = &digits[..end];
if nonzero.is_empty() {
// There are no non-zero digits, i.e. the number is zero.
// There are no non-zero digits, i.e., the number is zero.
return 0;
}
// This could be optimized with leading_zeros() and bit shifts, but that's

View file

@ -61,9 +61,9 @@ mod fpu_precision {
///
/// The only field which is relevant for the following code is PC, Precision Control. This
/// field determines the precision of the operations performed by the FPU. It can be set to:
/// - 0b00, single precision i.e. 32-bits
/// - 0b10, double precision i.e. 64-bits
/// - 0b11, double extended precision i.e. 80-bits (default state)
/// - 0b00, single precision i.e., 32-bits
/// - 0b10, double precision i.e., 64-bits
/// - 0b11, double extended precision i.e., 80-bits (default state)
/// The 0b01 value is reserved and should not be used.
pub struct FPUControlWord(u16);

View file

@ -349,7 +349,7 @@ pub fn prev_float<T: RawFloat>(x: T) -> T {
}
// Find the smallest floating point number strictly larger than the argument.
// This operation is saturating, i.e. next_float(inf) == inf.
// This operation is saturating, i.e., next_float(inf) == inf.
// Unlike most code in this module, this function does handle zero, subnormals, and infinities.
// However, like all other code here, it does not deal with NaN and negative numbers.
pub fn next_float<T: RawFloat>(x: T) -> T {

View file

@ -23,7 +23,7 @@ representation `V = 0.d[0..n-1] * 10^k` such that:
- `d[0]` is non-zero.
- It's correctly rounded when parsed back: `v - minus < V < v + plus`.
Furthermore it is shortest such one, i.e. there is no representation
Furthermore it is shortest such one, i.e., there is no representation
with less than `n` digits that is correctly rounded.
- It's closest to the original value: `abs(V - v) <= 10^(k-n) / 2`. Note that
@ -398,7 +398,7 @@ fn determine_sign(sign: Sign, decoded: &FullDecoded, negative: bool) -> &'static
/// given number of fractional digits. The result is stored to the supplied parts
/// array while utilizing given byte buffer as a scratch. `upper` is currently
/// unused but left for the future decision to change the case of non-finite values,
/// i.e. `inf` and `nan`. The first part to be rendered is always a `Part::Sign`
/// i.e., `inf` and `nan`. The first part to be rendered is always a `Part::Sign`
/// (which can be an empty string if no sign is rendered).
///
/// `format_shortest` should be the underlying digit-generation function.
@ -591,7 +591,7 @@ pub fn to_exact_exp_str<'a, T, F>(mut format_exact: F, v: T,
/// given number of fractional digits. The result is stored to the supplied parts
/// array while utilizing given byte buffer as a scratch. `upper` is currently
/// unused but left for the future decision to change the case of non-finite values,
/// i.e. `inf` and `nan`. The first part to be rendered is always a `Part::Sign`
/// i.e., `inf` and `nan`. The first part to be rendered is always a `Part::Sign`
/// (which can be an empty string if no sign is rendered).
///
/// `format_exact` should be the underlying digit-generation function.

View file

@ -81,11 +81,11 @@ pub fn format_shortest(d: &Decoded, buf: &mut [u8]) -> (/*#digits*/ usize, /*exp
// - followed by `(mant + 2 * plus) * 2^exp` in the original type.
//
// obviously, `minus` and `plus` cannot be zero. (for infinities, we use out-of-range values.)
// also we assume that at least one digit is generated, i.e. `mant` cannot be zero too.
// also we assume that at least one digit is generated, i.e., `mant` cannot be zero too.
//
// this also means that any number between `low = (mant - minus) * 2^exp` and
// `high = (mant + plus) * 2^exp` will map to this exact floating point number,
// with bounds included when the original mantissa was even (i.e. `!mant_was_odd`).
// with bounds included when the original mantissa was even (i.e., `!mant_was_odd`).
assert!(d.mant > 0);
assert!(d.minus > 0);
@ -172,7 +172,7 @@ pub fn format_shortest(d: &Decoded, buf: &mut [u8]) -> (/*#digits*/ usize, /*exp
// - `high - v = plus / scale * 10^(k-n)`
//
// assume that `d[0..n-1]` is the shortest representation between `low` and `high`,
// i.e. `d[0..n-1]` satisfies both of the following but `d[0..n-2]` doesn't:
// i.e., `d[0..n-1]` satisfies both of the following but `d[0..n-2]` doesn't:
// - `low < d[0..n-1] * 10^(k-n) < high` (bijectivity: digits round to `v`); and
// - `abs(v / 10^(k-n) - d[0..n-1]) <= 1/2` (the last digit is correct).
//
@ -304,7 +304,7 @@ pub fn format_exact(d: &Decoded, buf: &mut [u8], limit: i16) -> (/*#digits*/ usi
// rounding up if we stop in the middle of digits
// if the following digits are exactly 5000..., check the prior digit and try to
// round to even (i.e. avoid rounding up when the prior digit is even).
// round to even (i.e., avoid rounding up when the prior digit is even).
let order = mant.cmp(scale.mul_small(5));
if order == Ordering::Greater || (order == Ordering::Equal &&
(len == 0 || buf[len-1] & 1 == 1)) {

View file

@ -242,7 +242,7 @@ pub fn format_shortest_opt(d: &Decoded,
//
// find the digit length `kappa` between `(minus1, plus1)` as per Theorem 6.2.
// Theorem 6.2 can be adopted to exclude `x` by requiring `y mod 10^k < y - x` instead.
// (e.g. `x` = 32000, `y` = 32777; `kappa` = 2 since `y mod 10^3 = 777 < y - x = 777`.)
// (e.g., `x` = 32000, `y` = 32777; `kappa` = 2 since `y mod 10^3 = 777 < y - x = 777`.)
// the algorithm relies on the later verification phase to exclude `y`.
let delta1 = plus1 - minus1;
// let delta1int = (delta1 >> e) as usize; // only for explanation
@ -362,19 +362,19 @@ pub fn format_shortest_opt(d: &Decoded,
// proceed, but we then have at least one valid representation known to be closest to
// `v + 1 ulp` anyway. we will denote them as TC1 through TC3 for brevity.
//
// TC1: `w(n) <= v + 1 ulp`, i.e. this is the last repr that can be the closest one.
// TC1: `w(n) <= v + 1 ulp`, i.e., this is the last repr that can be the closest one.
// this is equivalent to `plus1 - w(n) = plus1w(n) >= plus1 - (v + 1 ulp) = plus1v_up`.
// combined with TC2 (which checks if `w(n+1)` is valid), this prevents the possible
// overflow on the calculation of `plus1w(n)`.
//
// TC2: `w(n+1) < minus1`, i.e. the next repr definitely does not round to `v`.
// TC2: `w(n+1) < minus1`, i.e., the next repr definitely does not round to `v`.
// this is equivalent to `plus1 - w(n) + 10^kappa = plus1w(n) + 10^kappa >
// plus1 - minus1 = threshold`. the left hand side can overflow, but we know
// `threshold > plus1v`, so if TC1 is false, `threshold - plus1w(n) >
// threshold - (plus1v - 1 ulp) > 1 ulp` and we can safely test if
// `threshold - plus1w(n) < 10^kappa` instead.
//
// TC3: `abs(w(n) - (v + 1 ulp)) <= abs(w(n+1) - (v + 1 ulp))`, i.e. the next repr is
// TC3: `abs(w(n) - (v + 1 ulp)) <= abs(w(n+1) - (v + 1 ulp))`, i.e., the next repr is
// no closer to `v + 1 ulp` than the current repr. given `z(n) = plus1v_up - plus1w(n)`,
// this becomes `abs(z(n)) <= abs(z(n+1))`. again assuming that TC1 is false, we have
// `z(n) > 0`. we have two cases to consider:
@ -384,7 +384,7 @@ pub fn format_shortest_opt(d: &Decoded,
// - when `z(n+1) < 0`:
// - TC3a: the precondition is `plus1v_up < plus1w(n) + 10^kappa`. assuming TC2 is
// false, `threshold >= plus1w(n) + 10^kappa` so it cannot overflow.
// - TC3b: TC3 becomes `z(n) <= -z(n+1)`, i.e. `plus1v_up - plus1w(n) >=
// - TC3b: TC3 becomes `z(n) <= -z(n+1)`, i.e., `plus1v_up - plus1w(n) >=
// plus1w(n+1) - plus1v_up = plus1w(n) + 10^kappa - plus1v_up`. the negated TC1
// gives `plus1v_up > plus1w(n)`, so it cannot overflow or underflow when
// combined with TC3a.
@ -414,7 +414,7 @@ pub fn format_shortest_opt(d: &Decoded,
// now we have the closest representation to `v` between `plus1` and `minus1`.
// this is too liberal, though, so we reject any `w(n)` not between `plus0` and `minus0`,
// i.e. `plus1 - plus1w(n) <= minus0` or `plus1 - plus1w(n) >= plus0`. we utilize the facts
// i.e., `plus1 - plus1w(n) <= minus0` or `plus1 - plus1w(n) >= plus0`. we utilize the facts
// that `threshold = plus1 - minus1` and `plus1 - plus0 = minus0 - minus1 = 2 ulp`.
if 2 * ulp <= plus1w && plus1w <= threshold - 4 * ulp {
Some((buf.len(), exp))
@ -675,7 +675,7 @@ pub fn format_exact_opt(d: &Decoded, buf: &mut [u8], limit: i16)
return Some((len, exp));
}
// otherwise we are doomed (i.e. some values between `v - 1 ulp` and `v + 1 ulp` are
// otherwise we are doomed (i.e., some values between `v - 1 ulp` and `v + 1 ulp` are
// rounding down and others are rounding up) and give up.
None
}

Some files were not shown because too many files have changed in this diff Show more