Auto merge of #113162 - matthiaskrgr:rollup-fct3wj7, r=matthiaskrgr
Rollup of 7 pull requests Successful merges: - #111322 (Support for native WASM exceptions) - #112086 (resolve: Remove artificial import ambiguity errors) - #112234 (refactor `tool_doc!`) - #112300 (Convert `run-make/coverage-reports` tests to use a custom compiletest mode) - #112795 (Migrate some rustc_builtin_macros to SessionDiagnostic) - #113144 (Make the `Elaboratable` trait take clauses) - #113161 (Fix type privacy lints error message) r? `@ghost` `@rustbot` modify labels: rollup
This commit is contained in:
commit
97279e91d8
171 changed files with 1053 additions and 790 deletions
|
@ -1,4 +1,5 @@
|
||||||
builtin_macros_alloc_error_must_be_fn = alloc_error_handler must be a function
|
builtin_macros_alloc_error_must_be_fn = alloc_error_handler must be a function
|
||||||
|
builtin_macros_alloc_must_statics = allocators must be statics
|
||||||
|
|
||||||
builtin_macros_asm_clobber_abi = clobber_abi
|
builtin_macros_asm_clobber_abi = clobber_abi
|
||||||
builtin_macros_asm_clobber_no_reg = asm with `clobber_abi` must specify explicit registers for outputs
|
builtin_macros_asm_clobber_no_reg = asm with `clobber_abi` must specify explicit registers for outputs
|
||||||
|
@ -56,6 +57,9 @@ builtin_macros_bad_derive_target = `derive` may only be applied to `struct`s, `e
|
||||||
.label = not applicable here
|
.label = not applicable here
|
||||||
.label2 = not a `struct`, `enum` or `union`
|
.label2 = not a `struct`, `enum` or `union`
|
||||||
|
|
||||||
|
builtin_macros_bench_sig = functions used as benches must have signature `fn(&mut Bencher) -> impl Termination`
|
||||||
|
|
||||||
|
|
||||||
builtin_macros_cannot_derive_union = this trait cannot be derived for unions
|
builtin_macros_cannot_derive_union = this trait cannot be derived for unions
|
||||||
|
|
||||||
builtin_macros_cfg_accessible_has_args = `cfg_accessible` path cannot accept arguments
|
builtin_macros_cfg_accessible_has_args = `cfg_accessible` path cannot accept arguments
|
||||||
|
@ -84,6 +88,7 @@ builtin_macros_concat_bytes_non_u8 = numeric literal is not a `u8`
|
||||||
builtin_macros_concat_bytes_oob = numeric literal is out of bounds
|
builtin_macros_concat_bytes_oob = numeric literal is out of bounds
|
||||||
|
|
||||||
builtin_macros_concat_bytestr = cannot concatenate a byte string literal
|
builtin_macros_concat_bytestr = cannot concatenate a byte string literal
|
||||||
|
builtin_macros_concat_c_str_lit = cannot concatenate a C string literal
|
||||||
|
|
||||||
builtin_macros_concat_idents_ident_args = `concat_idents!()` requires ident args
|
builtin_macros_concat_idents_ident_args = `concat_idents!()` requires ident args
|
||||||
|
|
||||||
|
@ -111,6 +116,10 @@ builtin_macros_env_takes_args = `env!()` takes 1 or 2 arguments
|
||||||
|
|
||||||
builtin_macros_expected_one_cfg_pattern = expected 1 cfg-pattern
|
builtin_macros_expected_one_cfg_pattern = expected 1 cfg-pattern
|
||||||
|
|
||||||
|
builtin_macros_expected_register_class_or_explicit_register = expected register class or explicit register
|
||||||
|
|
||||||
|
builtin_macros_export_macro_rules = cannot export macro_rules! macros from a `proc-macro` crate type currently
|
||||||
|
|
||||||
builtin_macros_format_duplicate_arg = duplicate argument named `{$ident}`
|
builtin_macros_format_duplicate_arg = duplicate argument named `{$ident}`
|
||||||
.label1 = previously here
|
.label1 = previously here
|
||||||
.label2 = duplicate argument
|
.label2 = duplicate argument
|
||||||
|
@ -158,6 +167,8 @@ builtin_macros_format_unused_args = multiple unused formatting arguments
|
||||||
|
|
||||||
builtin_macros_global_asm_clobber_abi = `clobber_abi` cannot be used with `global_asm!`
|
builtin_macros_global_asm_clobber_abi = `clobber_abi` cannot be used with `global_asm!`
|
||||||
|
|
||||||
|
builtin_macros_invalid_crate_attribute = invalid crate attribute
|
||||||
|
|
||||||
builtin_macros_multiple_default_attrs = multiple `#[default]` attributes
|
builtin_macros_multiple_default_attrs = multiple `#[default]` attributes
|
||||||
.note = only one `#[default]` attribute is needed
|
.note = only one `#[default]` attribute is needed
|
||||||
.label = `#[default]` used here
|
.label = `#[default]` used here
|
||||||
|
@ -177,6 +188,8 @@ builtin_macros_no_default_variant = no default declared
|
||||||
.help = make a unit variant default by placing `#[default]` above it
|
.help = make a unit variant default by placing `#[default]` above it
|
||||||
.suggestion = make `{$ident}` default
|
.suggestion = make `{$ident}` default
|
||||||
|
|
||||||
|
builtin_macros_non_abi = at least one abi must be provided as an argument to `clobber_abi`
|
||||||
|
|
||||||
builtin_macros_non_exhaustive_default = default variant must be exhaustive
|
builtin_macros_non_exhaustive_default = default variant must be exhaustive
|
||||||
.label = declared `#[non_exhaustive]` here
|
.label = declared `#[non_exhaustive]` here
|
||||||
.help = consider a manual implementation of `Default`
|
.help = consider a manual implementation of `Default`
|
||||||
|
@ -184,12 +197,20 @@ builtin_macros_non_exhaustive_default = default variant must be exhaustive
|
||||||
builtin_macros_non_unit_default = the `#[default]` attribute may only be used on unit enum variants
|
builtin_macros_non_unit_default = the `#[default]` attribute may only be used on unit enum variants
|
||||||
.help = consider a manual implementation of `Default`
|
.help = consider a manual implementation of `Default`
|
||||||
|
|
||||||
|
builtin_macros_proc_macro = `proc-macro` crate types currently cannot export any items other than functions tagged with `#[proc_macro]`, `#[proc_macro_derive]`, or `#[proc_macro_attribute]`
|
||||||
|
|
||||||
builtin_macros_requires_cfg_pattern =
|
builtin_macros_requires_cfg_pattern =
|
||||||
macro requires a cfg-pattern as an argument
|
macro requires a cfg-pattern as an argument
|
||||||
.label = cfg-pattern required
|
.label = cfg-pattern required
|
||||||
|
|
||||||
|
builtin_macros_should_panic = functions using `#[should_panic]` must return `()`
|
||||||
|
|
||||||
builtin_macros_sugg = consider using a positional formatting argument instead
|
builtin_macros_sugg = consider using a positional formatting argument instead
|
||||||
|
|
||||||
|
builtin_macros_test_arg_non_lifetime = functions used as tests can not have any non-lifetime generic parameters
|
||||||
|
|
||||||
|
builtin_macros_test_args = functions used as tests can not have any arguments
|
||||||
|
|
||||||
builtin_macros_test_bad_fn = {$kind} functions cannot be used for tests
|
builtin_macros_test_bad_fn = {$kind} functions cannot be used for tests
|
||||||
.label = `{$kind}` because of this
|
.label = `{$kind}` because of this
|
||||||
|
|
||||||
|
@ -198,6 +219,10 @@ builtin_macros_test_case_non_item = `#[test_case]` attribute is only allowed on
|
||||||
builtin_macros_test_runner_invalid = `test_runner` argument must be a path
|
builtin_macros_test_runner_invalid = `test_runner` argument must be a path
|
||||||
builtin_macros_test_runner_nargs = `#![test_runner(..)]` accepts exactly 1 argument
|
builtin_macros_test_runner_nargs = `#![test_runner(..)]` accepts exactly 1 argument
|
||||||
|
|
||||||
|
builtin_macros_tests_not_support = building tests with panic=abort is not supported without `-Zpanic_abort_tests`
|
||||||
|
|
||||||
|
builtin_macros_trace_macros = trace_macros! accepts only `true` or `false`
|
||||||
|
|
||||||
builtin_macros_unexpected_lit = expected path to a trait, found literal
|
builtin_macros_unexpected_lit = expected path to a trait, found literal
|
||||||
.label = not a trait
|
.label = not a trait
|
||||||
.str_lit = try using `#[derive({$sym})]`
|
.str_lit = try using `#[derive({$sym})]`
|
||||||
|
|
|
@ -371,11 +371,7 @@ fn parse_clobber_abi<'a>(p: &mut Parser<'a>, args: &mut AsmArgs) -> PResult<'a,
|
||||||
p.expect(&token::OpenDelim(Delimiter::Parenthesis))?;
|
p.expect(&token::OpenDelim(Delimiter::Parenthesis))?;
|
||||||
|
|
||||||
if p.eat(&token::CloseDelim(Delimiter::Parenthesis)) {
|
if p.eat(&token::CloseDelim(Delimiter::Parenthesis)) {
|
||||||
let err = p.sess.span_diagnostic.struct_span_err(
|
return Err(p.sess.span_diagnostic.create_err(errors::NonABI { span: p.token.span }));
|
||||||
p.token.span,
|
|
||||||
"at least one abi must be provided as an argument to `clobber_abi`",
|
|
||||||
);
|
|
||||||
return Err(err);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut new_abis = Vec::new();
|
let mut new_abis = Vec::new();
|
||||||
|
@ -428,9 +424,9 @@ fn parse_reg<'a>(
|
||||||
ast::InlineAsmRegOrRegClass::Reg(symbol)
|
ast::InlineAsmRegOrRegClass::Reg(symbol)
|
||||||
}
|
}
|
||||||
_ => {
|
_ => {
|
||||||
return Err(
|
return Err(p.sess.create_err(errors::ExpectedRegisterClassOrExplicitRegister {
|
||||||
p.struct_span_err(p.token.span, "expected register class or explicit register")
|
span: p.token.span,
|
||||||
);
|
}));
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
p.bump();
|
p.bump();
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
//! Attributes injected into the crate root from command line using `-Z crate-attr`.
|
//! Attributes injected into the crate root from command line using `-Z crate-attr`.
|
||||||
|
|
||||||
|
use crate::errors;
|
||||||
use rustc_ast::attr::mk_attr;
|
use rustc_ast::attr::mk_attr;
|
||||||
use rustc_ast::token;
|
use rustc_ast::token;
|
||||||
use rustc_ast::{self as ast, AttrItem, AttrStyle};
|
use rustc_ast::{self as ast, AttrItem, AttrStyle};
|
||||||
|
@ -24,7 +25,9 @@ pub fn inject(krate: &mut ast::Crate, parse_sess: &ParseSess, attrs: &[String])
|
||||||
};
|
};
|
||||||
let end_span = parser.token.span;
|
let end_span = parser.token.span;
|
||||||
if parser.token != token::Eof {
|
if parser.token != token::Eof {
|
||||||
parse_sess.span_diagnostic.span_err(start_span.to(end_span), "invalid crate attribute");
|
parse_sess
|
||||||
|
.span_diagnostic
|
||||||
|
.emit_err(errors::InvalidCrateAttr { span: start_span.to(end_span) });
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -33,7 +33,7 @@ pub fn expand_concat(
|
||||||
accumulator.push_str(&b.to_string());
|
accumulator.push_str(&b.to_string());
|
||||||
}
|
}
|
||||||
Ok(ast::LitKind::CStr(..)) => {
|
Ok(ast::LitKind::CStr(..)) => {
|
||||||
cx.span_err(e.span, "cannot concatenate a C string literal");
|
cx.emit_err(errors::ConcatCStrLit{ span: e.span});
|
||||||
has_errors = true;
|
has_errors = true;
|
||||||
}
|
}
|
||||||
Ok(ast::LitKind::Byte(..) | ast::LitKind::ByteStr(..)) => {
|
Ok(ast::LitKind::Byte(..) | ast::LitKind::ByteStr(..)) => {
|
||||||
|
|
|
@ -21,7 +21,7 @@ fn invalid_type_err(
|
||||||
Ok(ast::LitKind::CStr(_, _)) => {
|
Ok(ast::LitKind::CStr(_, _)) => {
|
||||||
// FIXME(c_str_literals): should concatenation of C string literals
|
// FIXME(c_str_literals): should concatenation of C string literals
|
||||||
// include the null bytes in the end?
|
// include the null bytes in the end?
|
||||||
cx.span_err(span, "cannot concatenate C string literals");
|
cx.emit_err(errors::ConcatCStrLit { span: span });
|
||||||
}
|
}
|
||||||
Ok(ast::LitKind::Char(_)) => {
|
Ok(ast::LitKind::Char(_)) => {
|
||||||
let sugg =
|
let sugg =
|
||||||
|
|
|
@ -87,6 +87,83 @@ pub(crate) struct ConcatBytestr {
|
||||||
pub(crate) span: Span,
|
pub(crate) span: Span,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Diagnostic)]
|
||||||
|
#[diag(builtin_macros_concat_c_str_lit)]
|
||||||
|
pub(crate) struct ConcatCStrLit {
|
||||||
|
#[primary_span]
|
||||||
|
pub(crate) span: Span,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Diagnostic)]
|
||||||
|
#[diag(builtin_macros_export_macro_rules)]
|
||||||
|
pub(crate) struct ExportMacroRules {
|
||||||
|
#[primary_span]
|
||||||
|
pub(crate) span: Span,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Diagnostic)]
|
||||||
|
#[diag(builtin_macros_proc_macro)]
|
||||||
|
pub(crate) struct ProcMacro {
|
||||||
|
#[primary_span]
|
||||||
|
pub(crate) span: Span,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Diagnostic)]
|
||||||
|
#[diag(builtin_macros_invalid_crate_attribute)]
|
||||||
|
pub(crate) struct InvalidCrateAttr {
|
||||||
|
#[primary_span]
|
||||||
|
pub(crate) span: Span,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Diagnostic)]
|
||||||
|
#[diag(builtin_macros_non_abi)]
|
||||||
|
pub(crate) struct NonABI {
|
||||||
|
#[primary_span]
|
||||||
|
pub(crate) span: Span,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Diagnostic)]
|
||||||
|
#[diag(builtin_macros_trace_macros)]
|
||||||
|
pub(crate) struct TraceMacros {
|
||||||
|
#[primary_span]
|
||||||
|
pub(crate) span: Span,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Diagnostic)]
|
||||||
|
#[diag(builtin_macros_bench_sig)]
|
||||||
|
pub(crate) struct BenchSig {
|
||||||
|
#[primary_span]
|
||||||
|
pub(crate) span: Span,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Diagnostic)]
|
||||||
|
#[diag(builtin_macros_test_arg_non_lifetime)]
|
||||||
|
pub(crate) struct TestArgNonLifetime {
|
||||||
|
#[primary_span]
|
||||||
|
pub(crate) span: Span,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Diagnostic)]
|
||||||
|
#[diag(builtin_macros_should_panic)]
|
||||||
|
pub(crate) struct ShouldPanic {
|
||||||
|
#[primary_span]
|
||||||
|
pub(crate) span: Span,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Diagnostic)]
|
||||||
|
#[diag(builtin_macros_test_args)]
|
||||||
|
pub(crate) struct TestArgs {
|
||||||
|
#[primary_span]
|
||||||
|
pub(crate) span: Span,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Diagnostic)]
|
||||||
|
#[diag(builtin_macros_alloc_must_statics)]
|
||||||
|
pub(crate) struct AllocMustStatics {
|
||||||
|
#[primary_span]
|
||||||
|
pub(crate) span: Span,
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Diagnostic)]
|
#[derive(Diagnostic)]
|
||||||
#[diag(builtin_macros_concat_bytes_invalid)]
|
#[diag(builtin_macros_concat_bytes_invalid)]
|
||||||
pub(crate) struct ConcatBytesInvalid {
|
pub(crate) struct ConcatBytesInvalid {
|
||||||
|
@ -201,6 +278,10 @@ pub(crate) struct BadDeriveTarget {
|
||||||
pub(crate) item: Span,
|
pub(crate) item: Span,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Diagnostic)]
|
||||||
|
#[diag(builtin_macros_tests_not_support)]
|
||||||
|
pub(crate) struct TestsNotSupport {}
|
||||||
|
|
||||||
#[derive(Diagnostic)]
|
#[derive(Diagnostic)]
|
||||||
#[diag(builtin_macros_unexpected_lit, code = "E0777")]
|
#[diag(builtin_macros_unexpected_lit, code = "E0777")]
|
||||||
pub(crate) struct BadDeriveLit {
|
pub(crate) struct BadDeriveLit {
|
||||||
|
@ -732,3 +813,10 @@ pub(crate) struct TestRunnerNargs {
|
||||||
#[primary_span]
|
#[primary_span]
|
||||||
pub(crate) span: Span,
|
pub(crate) span: Span,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Diagnostic)]
|
||||||
|
#[diag(builtin_macros_expected_register_class_or_explicit_register)]
|
||||||
|
pub(crate) struct ExpectedRegisterClassOrExplicitRegister {
|
||||||
|
#[primary_span]
|
||||||
|
pub(crate) span: Span,
|
||||||
|
}
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
use crate::util::check_builtin_macro_attribute;
|
use crate::util::check_builtin_macro_attribute;
|
||||||
|
|
||||||
|
use crate::errors;
|
||||||
use rustc_ast::expand::allocator::{
|
use rustc_ast::expand::allocator::{
|
||||||
global_fn_name, AllocatorMethod, AllocatorTy, ALLOCATOR_METHODS,
|
global_fn_name, AllocatorMethod, AllocatorTy, ALLOCATOR_METHODS,
|
||||||
};
|
};
|
||||||
|
@ -34,7 +35,7 @@ pub fn expand(
|
||||||
{
|
{
|
||||||
(item, true, ecx.with_def_site_ctxt(ty.span))
|
(item, true, ecx.with_def_site_ctxt(ty.span))
|
||||||
} else {
|
} else {
|
||||||
ecx.sess.parse_sess.span_diagnostic.span_err(item.span(), "allocators must be statics");
|
ecx.sess.parse_sess.span_diagnostic.emit_err(errors::AllocMustStatics{span: item.span()});
|
||||||
return vec![orig_item];
|
return vec![orig_item];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
use crate::errors;
|
||||||
use rustc_ast::ptr::P;
|
use rustc_ast::ptr::P;
|
||||||
use rustc_ast::visit::{self, Visitor};
|
use rustc_ast::visit::{self, Visitor};
|
||||||
use rustc_ast::{self as ast, attr, NodeId};
|
use rustc_ast::{self as ast, attr, NodeId};
|
||||||
|
@ -83,12 +84,7 @@ pub fn inject(
|
||||||
impl<'a> CollectProcMacros<'a> {
|
impl<'a> CollectProcMacros<'a> {
|
||||||
fn check_not_pub_in_root(&self, vis: &ast::Visibility, sp: Span) {
|
fn check_not_pub_in_root(&self, vis: &ast::Visibility, sp: Span) {
|
||||||
if self.is_proc_macro_crate && self.in_root && vis.kind.is_pub() {
|
if self.is_proc_macro_crate && self.in_root && vis.kind.is_pub() {
|
||||||
self.handler.span_err(
|
self.handler.emit_err(errors::ProcMacro { span: sp });
|
||||||
sp,
|
|
||||||
"`proc-macro` crate types currently cannot export any items other \
|
|
||||||
than functions tagged with `#[proc_macro]`, `#[proc_macro_derive]`, \
|
|
||||||
or `#[proc_macro_attribute]`",
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -157,9 +153,9 @@ impl<'a> Visitor<'a> for CollectProcMacros<'a> {
|
||||||
fn visit_item(&mut self, item: &'a ast::Item) {
|
fn visit_item(&mut self, item: &'a ast::Item) {
|
||||||
if let ast::ItemKind::MacroDef(..) = item.kind {
|
if let ast::ItemKind::MacroDef(..) = item.kind {
|
||||||
if self.is_proc_macro_crate && attr::contains_name(&item.attrs, sym::macro_export) {
|
if self.is_proc_macro_crate && attr::contains_name(&item.attrs, sym::macro_export) {
|
||||||
let msg =
|
self.handler.emit_err(errors::ExportMacroRules {
|
||||||
"cannot export macro_rules! macros from a `proc-macro` crate type currently";
|
span: self.source_map.guess_head_span(item.span),
|
||||||
self.handler.span_err(self.source_map.guess_head_span(item.span), msg);
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -576,12 +576,7 @@ fn check_bench_signature(
|
||||||
// N.B., inadequate check, but we're running
|
// N.B., inadequate check, but we're running
|
||||||
// well before resolve, can't get too deep.
|
// well before resolve, can't get too deep.
|
||||||
if f.sig.decl.inputs.len() != 1 {
|
if f.sig.decl.inputs.len() != 1 {
|
||||||
return Err(cx.sess.parse_sess.span_diagnostic.span_err(
|
return Err(cx.sess.parse_sess.span_diagnostic.emit_err(errors::BenchSig { span: i.span }));
|
||||||
i.span,
|
|
||||||
"functions used as benches must have \
|
|
||||||
signature `fn(&mut Bencher) -> impl Termination`",
|
|
||||||
));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
|
@ -63,10 +63,7 @@ pub fn inject(krate: &mut ast::Crate, sess: &Session, resolver: &mut dyn Resolve
|
||||||
// Silently allow compiling with panic=abort on these platforms,
|
// Silently allow compiling with panic=abort on these platforms,
|
||||||
// but with old behavior (abort if a test fails).
|
// but with old behavior (abort if a test fails).
|
||||||
} else {
|
} else {
|
||||||
span_diagnostic.err(
|
span_diagnostic.emit_err(errors::TestsNotSupport {});
|
||||||
"building tests with panic=abort is not supported \
|
|
||||||
without `-Zpanic_abort_tests`",
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
PanicStrategy::Unwind
|
PanicStrategy::Unwind
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
use crate::errors;
|
||||||
use rustc_ast::tokenstream::{TokenStream, TokenTree};
|
use rustc_ast::tokenstream::{TokenStream, TokenTree};
|
||||||
use rustc_expand::base::{self, ExtCtxt};
|
use rustc_expand::base::{self, ExtCtxt};
|
||||||
use rustc_span::symbol::kw;
|
use rustc_span::symbol::kw;
|
||||||
|
@ -20,7 +21,7 @@ pub fn expand_trace_macros(
|
||||||
};
|
};
|
||||||
err |= cursor.next().is_some();
|
err |= cursor.next().is_some();
|
||||||
if err {
|
if err {
|
||||||
cx.span_err(sp, "trace_macros! accepts only `true` or `false`")
|
cx.emit_err(errors::TraceMacros { span: sp });
|
||||||
} else {
|
} else {
|
||||||
cx.set_trace_macros(value);
|
cx.set_trace_macros(value);
|
||||||
}
|
}
|
||||||
|
|
|
@ -8,7 +8,7 @@ use crate::llvm_util;
|
||||||
use crate::type_::Type;
|
use crate::type_::Type;
|
||||||
use crate::value::Value;
|
use crate::value::Value;
|
||||||
|
|
||||||
use rustc_codegen_ssa::base::wants_msvc_seh;
|
use rustc_codegen_ssa::base::{wants_msvc_seh, wants_wasm_eh};
|
||||||
use rustc_codegen_ssa::traits::*;
|
use rustc_codegen_ssa::traits::*;
|
||||||
use rustc_data_structures::base_n;
|
use rustc_data_structures::base_n;
|
||||||
use rustc_data_structures::fx::FxHashMap;
|
use rustc_data_structures::fx::FxHashMap;
|
||||||
|
@ -532,19 +532,28 @@ impl<'ll, 'tcx> MiscMethods<'tcx> for CodegenCx<'ll, 'tcx> {
|
||||||
if let Some(llpersonality) = self.eh_personality.get() {
|
if let Some(llpersonality) = self.eh_personality.get() {
|
||||||
return llpersonality;
|
return llpersonality;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let name = if wants_msvc_seh(self.sess()) {
|
||||||
|
Some("__CxxFrameHandler3")
|
||||||
|
} else if wants_wasm_eh(self.sess()) {
|
||||||
|
// LLVM specifically tests for the name of the personality function
|
||||||
|
// There is no need for this function to exist anywhere, it will
|
||||||
|
// not be called. However, its name has to be "__gxx_wasm_personality_v0"
|
||||||
|
// for native wasm exceptions.
|
||||||
|
Some("__gxx_wasm_personality_v0")
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
let tcx = self.tcx;
|
let tcx = self.tcx;
|
||||||
let llfn = match tcx.lang_items().eh_personality() {
|
let llfn = match tcx.lang_items().eh_personality() {
|
||||||
Some(def_id) if !wants_msvc_seh(self.sess()) => self.get_fn_addr(
|
Some(def_id) if name.is_none() => self.get_fn_addr(
|
||||||
ty::Instance::resolve(tcx, ty::ParamEnv::reveal_all(), def_id, ty::List::empty())
|
ty::Instance::resolve(tcx, ty::ParamEnv::reveal_all(), def_id, ty::List::empty())
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.unwrap(),
|
.unwrap(),
|
||||||
),
|
),
|
||||||
_ => {
|
_ => {
|
||||||
let name = if wants_msvc_seh(self.sess()) {
|
let name = name.unwrap_or("rust_eh_personality");
|
||||||
"__CxxFrameHandler3"
|
|
||||||
} else {
|
|
||||||
"rust_eh_personality"
|
|
||||||
};
|
|
||||||
if let Some(llfn) = self.get_declared_value(name) {
|
if let Some(llfn) = self.get_declared_value(name) {
|
||||||
llfn
|
llfn
|
||||||
} else {
|
} else {
|
||||||
|
@ -662,6 +671,10 @@ impl<'ll> CodegenCx<'ll, '_> {
|
||||||
let t_f32 = self.type_f32();
|
let t_f32 = self.type_f32();
|
||||||
let t_f64 = self.type_f64();
|
let t_f64 = self.type_f64();
|
||||||
let t_metadata = self.type_metadata();
|
let t_metadata = self.type_metadata();
|
||||||
|
let t_token = self.type_token();
|
||||||
|
|
||||||
|
ifn!("llvm.wasm.get.exception", fn(t_token) -> i8p);
|
||||||
|
ifn!("llvm.wasm.get.ehselector", fn(t_token) -> t_i32);
|
||||||
|
|
||||||
ifn!("llvm.wasm.trunc.unsigned.i32.f32", fn(t_f32) -> t_i32);
|
ifn!("llvm.wasm.trunc.unsigned.i32.f32", fn(t_f32) -> t_i32);
|
||||||
ifn!("llvm.wasm.trunc.unsigned.i32.f64", fn(t_f64) -> t_i32);
|
ifn!("llvm.wasm.trunc.unsigned.i32.f64", fn(t_f64) -> t_i32);
|
||||||
|
|
|
@ -7,7 +7,7 @@ use crate::type_of::LayoutLlvmExt;
|
||||||
use crate::va_arg::emit_va_arg;
|
use crate::va_arg::emit_va_arg;
|
||||||
use crate::value::Value;
|
use crate::value::Value;
|
||||||
|
|
||||||
use rustc_codegen_ssa::base::{compare_simd_types, wants_msvc_seh};
|
use rustc_codegen_ssa::base::{compare_simd_types, wants_msvc_seh, wants_wasm_eh};
|
||||||
use rustc_codegen_ssa::common::{IntPredicate, TypeKind};
|
use rustc_codegen_ssa::common::{IntPredicate, TypeKind};
|
||||||
use rustc_codegen_ssa::errors::{ExpectedPointerMutability, InvalidMonomorphization};
|
use rustc_codegen_ssa::errors::{ExpectedPointerMutability, InvalidMonomorphization};
|
||||||
use rustc_codegen_ssa::mir::operand::OperandRef;
|
use rustc_codegen_ssa::mir::operand::OperandRef;
|
||||||
|
@ -452,6 +452,8 @@ fn try_intrinsic<'ll>(
|
||||||
bx.store(bx.const_i32(0), dest, ret_align);
|
bx.store(bx.const_i32(0), dest, ret_align);
|
||||||
} else if wants_msvc_seh(bx.sess()) {
|
} else if wants_msvc_seh(bx.sess()) {
|
||||||
codegen_msvc_try(bx, try_func, data, catch_func, dest);
|
codegen_msvc_try(bx, try_func, data, catch_func, dest);
|
||||||
|
} else if wants_wasm_eh(bx.sess()) {
|
||||||
|
codegen_wasm_try(bx, try_func, data, catch_func, dest);
|
||||||
} else if bx.sess().target.os == "emscripten" {
|
} else if bx.sess().target.os == "emscripten" {
|
||||||
codegen_emcc_try(bx, try_func, data, catch_func, dest);
|
codegen_emcc_try(bx, try_func, data, catch_func, dest);
|
||||||
} else {
|
} else {
|
||||||
|
@ -610,6 +612,80 @@ fn codegen_msvc_try<'ll>(
|
||||||
bx.store(ret, dest, i32_align);
|
bx.store(ret, dest, i32_align);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WASM's definition of the `rust_try` function.
|
||||||
|
fn codegen_wasm_try<'ll>(
|
||||||
|
bx: &mut Builder<'_, 'll, '_>,
|
||||||
|
try_func: &'ll Value,
|
||||||
|
data: &'ll Value,
|
||||||
|
catch_func: &'ll Value,
|
||||||
|
dest: &'ll Value,
|
||||||
|
) {
|
||||||
|
let (llty, llfn) = get_rust_try_fn(bx, &mut |mut bx| {
|
||||||
|
bx.set_personality_fn(bx.eh_personality());
|
||||||
|
|
||||||
|
let normal = bx.append_sibling_block("normal");
|
||||||
|
let catchswitch = bx.append_sibling_block("catchswitch");
|
||||||
|
let catchpad = bx.append_sibling_block("catchpad");
|
||||||
|
let caught = bx.append_sibling_block("caught");
|
||||||
|
|
||||||
|
let try_func = llvm::get_param(bx.llfn(), 0);
|
||||||
|
let data = llvm::get_param(bx.llfn(), 1);
|
||||||
|
let catch_func = llvm::get_param(bx.llfn(), 2);
|
||||||
|
|
||||||
|
// We're generating an IR snippet that looks like:
|
||||||
|
//
|
||||||
|
// declare i32 @rust_try(%try_func, %data, %catch_func) {
|
||||||
|
// %slot = alloca i8*
|
||||||
|
// invoke %try_func(%data) to label %normal unwind label %catchswitch
|
||||||
|
//
|
||||||
|
// normal:
|
||||||
|
// ret i32 0
|
||||||
|
//
|
||||||
|
// catchswitch:
|
||||||
|
// %cs = catchswitch within none [%catchpad] unwind to caller
|
||||||
|
//
|
||||||
|
// catchpad:
|
||||||
|
// %tok = catchpad within %cs [null]
|
||||||
|
// %ptr = call @llvm.wasm.get.exception(token %tok)
|
||||||
|
// %sel = call @llvm.wasm.get.ehselector(token %tok)
|
||||||
|
// call %catch_func(%data, %ptr)
|
||||||
|
// catchret from %tok to label %caught
|
||||||
|
//
|
||||||
|
// caught:
|
||||||
|
// ret i32 1
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void());
|
||||||
|
bx.invoke(try_func_ty, None, None, try_func, &[data], normal, catchswitch, None);
|
||||||
|
|
||||||
|
bx.switch_to_block(normal);
|
||||||
|
bx.ret(bx.const_i32(0));
|
||||||
|
|
||||||
|
bx.switch_to_block(catchswitch);
|
||||||
|
let cs = bx.catch_switch(None, None, &[catchpad]);
|
||||||
|
|
||||||
|
bx.switch_to_block(catchpad);
|
||||||
|
let null = bx.const_null(bx.type_i8p());
|
||||||
|
let funclet = bx.catch_pad(cs, &[null]);
|
||||||
|
|
||||||
|
let ptr = bx.call_intrinsic("llvm.wasm.get.exception", &[funclet.cleanuppad()]);
|
||||||
|
let _sel = bx.call_intrinsic("llvm.wasm.get.ehselector", &[funclet.cleanuppad()]);
|
||||||
|
|
||||||
|
let catch_ty = bx.type_func(&[bx.type_i8p(), bx.type_i8p()], bx.type_void());
|
||||||
|
bx.call(catch_ty, None, None, catch_func, &[data, ptr], Some(&funclet));
|
||||||
|
bx.catch_ret(&funclet, caught);
|
||||||
|
|
||||||
|
bx.switch_to_block(caught);
|
||||||
|
bx.ret(bx.const_i32(1));
|
||||||
|
});
|
||||||
|
|
||||||
|
// Note that no invoke is used here because by definition this function
|
||||||
|
// can't panic (that's what it's catching).
|
||||||
|
let ret = bx.call(llty, None, None, llfn, &[try_func, data, catch_func], None);
|
||||||
|
let i32_align = bx.tcx().data_layout.i32_align.abi;
|
||||||
|
bx.store(ret, dest, i32_align);
|
||||||
|
}
|
||||||
|
|
||||||
// Definition of the standard `try` function for Rust using the GNU-like model
|
// Definition of the standard `try` function for Rust using the GNU-like model
|
||||||
// of exceptions (e.g., the normal semantics of LLVM's `landingpad` and `invoke`
|
// of exceptions (e.g., the normal semantics of LLVM's `landingpad` and `invoke`
|
||||||
// instructions).
|
// instructions).
|
||||||
|
|
|
@ -1071,6 +1071,7 @@ extern "C" {
|
||||||
|
|
||||||
// Operations on other types
|
// Operations on other types
|
||||||
pub fn LLVMVoidTypeInContext(C: &Context) -> &Type;
|
pub fn LLVMVoidTypeInContext(C: &Context) -> &Type;
|
||||||
|
pub fn LLVMTokenTypeInContext(C: &Context) -> &Type;
|
||||||
pub fn LLVMMetadataTypeInContext(C: &Context) -> &Type;
|
pub fn LLVMMetadataTypeInContext(C: &Context) -> &Type;
|
||||||
|
|
||||||
// Operations on all values
|
// Operations on all values
|
||||||
|
|
|
@ -52,6 +52,10 @@ impl<'ll> CodegenCx<'ll, '_> {
|
||||||
unsafe { llvm::LLVMVoidTypeInContext(self.llcx) }
|
unsafe { llvm::LLVMVoidTypeInContext(self.llcx) }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub(crate) fn type_token(&self) -> &'ll Type {
|
||||||
|
unsafe { llvm::LLVMTokenTypeInContext(self.llcx) }
|
||||||
|
}
|
||||||
|
|
||||||
pub(crate) fn type_metadata(&self) -> &'ll Type {
|
pub(crate) fn type_metadata(&self) -> &'ll Type {
|
||||||
unsafe { llvm::LLVMMetadataTypeInContext(self.llcx) }
|
unsafe { llvm::LLVMMetadataTypeInContext(self.llcx) }
|
||||||
}
|
}
|
||||||
|
|
|
@ -357,6 +357,13 @@ pub fn cast_shift_expr_rhs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Returns `true` if this session's target will use native wasm
|
||||||
|
// exceptions. This means that the VM does the unwinding for
|
||||||
|
// us
|
||||||
|
pub fn wants_wasm_eh(sess: &Session) -> bool {
|
||||||
|
sess.target.is_like_wasm && sess.target.os != "emscripten"
|
||||||
|
}
|
||||||
|
|
||||||
/// Returns `true` if this session's target will use SEH-based unwinding.
|
/// Returns `true` if this session's target will use SEH-based unwinding.
|
||||||
///
|
///
|
||||||
/// This is only true for MSVC targets, and even then the 64-bit MSVC target
|
/// This is only true for MSVC targets, and even then the 64-bit MSVC target
|
||||||
|
@ -366,6 +373,13 @@ pub fn wants_msvc_seh(sess: &Session) -> bool {
|
||||||
sess.target.is_like_msvc
|
sess.target.is_like_msvc
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns `true` if this session's target requires the new exception
|
||||||
|
/// handling LLVM IR instructions (catchpad / cleanuppad / ... instead
|
||||||
|
/// of landingpad)
|
||||||
|
pub fn wants_new_eh_instructions(sess: &Session) -> bool {
|
||||||
|
wants_wasm_eh(sess) || wants_msvc_seh(sess)
|
||||||
|
}
|
||||||
|
|
||||||
pub fn memcpy_ty<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
|
pub fn memcpy_ty<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
|
||||||
bx: &mut Bx,
|
bx: &mut Bx,
|
||||||
dst: Bx::Value,
|
dst: Bx::Value,
|
||||||
|
|
|
@ -79,8 +79,8 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
|
||||||
lltarget = fx.landing_pad_for(target);
|
lltarget = fx.landing_pad_for(target);
|
||||||
}
|
}
|
||||||
if is_cleanupret {
|
if is_cleanupret {
|
||||||
// MSVC cross-funclet jump - need a trampoline
|
// Cross-funclet jump - need a trampoline
|
||||||
debug_assert!(base::wants_msvc_seh(fx.cx.tcx().sess));
|
debug_assert!(base::wants_new_eh_instructions(fx.cx.tcx().sess));
|
||||||
debug!("llbb_with_cleanup: creating cleanup trampoline for {:?}", target);
|
debug!("llbb_with_cleanup: creating cleanup trampoline for {:?}", target);
|
||||||
let name = &format!("{:?}_cleanup_trampoline_{:?}", self.bb, target);
|
let name = &format!("{:?}_cleanup_trampoline_{:?}", self.bb, target);
|
||||||
let trampoline_llbb = Bx::append_block(fx.cx, fx.llfn, name);
|
let trampoline_llbb = Bx::append_block(fx.cx, fx.llfn, name);
|
||||||
|
@ -177,9 +177,16 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
|
||||||
mir::UnwindAction::Continue => None,
|
mir::UnwindAction::Continue => None,
|
||||||
mir::UnwindAction::Unreachable => None,
|
mir::UnwindAction::Unreachable => None,
|
||||||
mir::UnwindAction::Terminate => {
|
mir::UnwindAction::Terminate => {
|
||||||
if fx.mir[self.bb].is_cleanup && base::wants_msvc_seh(fx.cx.tcx().sess) {
|
if fx.mir[self.bb].is_cleanup && base::wants_new_eh_instructions(fx.cx.tcx().sess) {
|
||||||
// SEH will abort automatically if an exception tries to
|
// MSVC SEH will abort automatically if an exception tries to
|
||||||
// propagate out from cleanup.
|
// propagate out from cleanup.
|
||||||
|
|
||||||
|
// FIXME(@mirkootter): For wasm, we currently do not support terminate during
|
||||||
|
// cleanup, because this requires a few more changes: The current code
|
||||||
|
// caches the `terminate_block` for each function; funclet based code - however -
|
||||||
|
// requires a different terminate_block for each funclet
|
||||||
|
// Until this is implemented, we just do not unwind inside cleanup blocks
|
||||||
|
|
||||||
None
|
None
|
||||||
} else {
|
} else {
|
||||||
Some(fx.terminate_block())
|
Some(fx.terminate_block())
|
||||||
|
@ -1528,7 +1535,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||||
// FIXME(eddyb) rename this to `eh_pad_for_uncached`.
|
// FIXME(eddyb) rename this to `eh_pad_for_uncached`.
|
||||||
fn landing_pad_for_uncached(&mut self, bb: mir::BasicBlock) -> Bx::BasicBlock {
|
fn landing_pad_for_uncached(&mut self, bb: mir::BasicBlock) -> Bx::BasicBlock {
|
||||||
let llbb = self.llbb(bb);
|
let llbb = self.llbb(bb);
|
||||||
if base::wants_msvc_seh(self.cx.sess()) {
|
if base::wants_new_eh_instructions(self.cx.sess()) {
|
||||||
let cleanup_bb = Bx::append_block(self.cx, self.llfn, &format!("funclet_{:?}", bb));
|
let cleanup_bb = Bx::append_block(self.cx, self.llfn, &format!("funclet_{:?}", bb));
|
||||||
let mut cleanup_bx = Bx::build(self.cx, cleanup_bb);
|
let mut cleanup_bx = Bx::build(self.cx, cleanup_bb);
|
||||||
let funclet = cleanup_bx.cleanup_pad(None, &[]);
|
let funclet = cleanup_bx.cleanup_pad(None, &[]);
|
||||||
|
@ -1587,6 +1594,15 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||||
// } catch (...) {
|
// } catch (...) {
|
||||||
// bar();
|
// bar();
|
||||||
// }
|
// }
|
||||||
|
//
|
||||||
|
// which creates an IR snippet like
|
||||||
|
//
|
||||||
|
// cs_terminate:
|
||||||
|
// %cs = catchswitch within none [%cp_terminate] unwind to caller
|
||||||
|
// cp_terminate:
|
||||||
|
// %cp = catchpad within %cs [null, i32 64, null]
|
||||||
|
// ...
|
||||||
|
|
||||||
llbb = Bx::append_block(self.cx, self.llfn, "cs_terminate");
|
llbb = Bx::append_block(self.cx, self.llfn, "cs_terminate");
|
||||||
let cp_llbb = Bx::append_block(self.cx, self.llfn, "cp_terminate");
|
let cp_llbb = Bx::append_block(self.cx, self.llfn, "cp_terminate");
|
||||||
|
|
||||||
|
|
|
@ -179,7 +179,8 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
|
||||||
start_bx.set_personality_fn(cx.eh_personality());
|
start_bx.set_personality_fn(cx.eh_personality());
|
||||||
}
|
}
|
||||||
|
|
||||||
let cleanup_kinds = base::wants_msvc_seh(cx.tcx().sess).then(|| analyze::cleanup_kinds(&mir));
|
let cleanup_kinds =
|
||||||
|
base::wants_new_eh_instructions(cx.tcx().sess).then(|| analyze::cleanup_kinds(&mir));
|
||||||
|
|
||||||
let cached_llbbs: IndexVec<mir::BasicBlock, CachedLlbb<Bx::BasicBlock>> =
|
let cached_llbbs: IndexVec<mir::BasicBlock, CachedLlbb<Bx::BasicBlock>> =
|
||||||
mir.basic_blocks
|
mir.basic_blocks
|
||||||
|
|
|
@ -284,6 +284,7 @@ const WASM_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
|
||||||
// tidy-alphabetical-start
|
// tidy-alphabetical-start
|
||||||
("atomics", Some(sym::wasm_target_feature)),
|
("atomics", Some(sym::wasm_target_feature)),
|
||||||
("bulk-memory", Some(sym::wasm_target_feature)),
|
("bulk-memory", Some(sym::wasm_target_feature)),
|
||||||
|
("exception-handling", Some(sym::wasm_target_feature)),
|
||||||
("multivalue", Some(sym::wasm_target_feature)),
|
("multivalue", Some(sym::wasm_target_feature)),
|
||||||
("mutable-globals", Some(sym::wasm_target_feature)),
|
("mutable-globals", Some(sym::wasm_target_feature)),
|
||||||
("nontrapping-fptoint", Some(sym::wasm_target_feature)),
|
("nontrapping-fptoint", Some(sym::wasm_target_feature)),
|
||||||
|
|
|
@ -80,14 +80,14 @@ pub struct Elaborator<'tcx, O> {
|
||||||
pub trait Elaboratable<'tcx> {
|
pub trait Elaboratable<'tcx> {
|
||||||
fn predicate(&self) -> ty::Predicate<'tcx>;
|
fn predicate(&self) -> ty::Predicate<'tcx>;
|
||||||
|
|
||||||
// Makes a new `Self` but with a different predicate.
|
// Makes a new `Self` but with a different clause that comes from elaboration.
|
||||||
fn child(&self, predicate: ty::Predicate<'tcx>) -> Self;
|
fn child(&self, clause: ty::Clause<'tcx>) -> Self;
|
||||||
|
|
||||||
// Makes a new `Self` but with a different predicate and a different cause
|
// Makes a new `Self` but with a different clause and a different cause
|
||||||
// code (if `Self` has one).
|
// code (if `Self` has one, such as [`PredicateObligation`]).
|
||||||
fn child_with_derived_cause(
|
fn child_with_derived_cause(
|
||||||
&self,
|
&self,
|
||||||
predicate: ty::Predicate<'tcx>,
|
clause: ty::Clause<'tcx>,
|
||||||
span: Span,
|
span: Span,
|
||||||
parent_trait_pred: ty::PolyTraitPredicate<'tcx>,
|
parent_trait_pred: ty::PolyTraitPredicate<'tcx>,
|
||||||
index: usize,
|
index: usize,
|
||||||
|
@ -99,18 +99,18 @@ impl<'tcx> Elaboratable<'tcx> for PredicateObligation<'tcx> {
|
||||||
self.predicate
|
self.predicate
|
||||||
}
|
}
|
||||||
|
|
||||||
fn child(&self, predicate: ty::Predicate<'tcx>) -> Self {
|
fn child(&self, clause: ty::Clause<'tcx>) -> Self {
|
||||||
Obligation {
|
Obligation {
|
||||||
cause: self.cause.clone(),
|
cause: self.cause.clone(),
|
||||||
param_env: self.param_env,
|
param_env: self.param_env,
|
||||||
recursion_depth: 0,
|
recursion_depth: 0,
|
||||||
predicate,
|
predicate: clause.as_predicate(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn child_with_derived_cause(
|
fn child_with_derived_cause(
|
||||||
&self,
|
&self,
|
||||||
predicate: ty::Predicate<'tcx>,
|
clause: ty::Clause<'tcx>,
|
||||||
span: Span,
|
span: Span,
|
||||||
parent_trait_pred: ty::PolyTraitPredicate<'tcx>,
|
parent_trait_pred: ty::PolyTraitPredicate<'tcx>,
|
||||||
index: usize,
|
index: usize,
|
||||||
|
@ -123,7 +123,12 @@ impl<'tcx> Elaboratable<'tcx> for PredicateObligation<'tcx> {
|
||||||
span,
|
span,
|
||||||
}))
|
}))
|
||||||
});
|
});
|
||||||
Obligation { cause, param_env: self.param_env, recursion_depth: 0, predicate }
|
Obligation {
|
||||||
|
cause,
|
||||||
|
param_env: self.param_env,
|
||||||
|
recursion_depth: 0,
|
||||||
|
predicate: clause.as_predicate(),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -132,18 +137,18 @@ impl<'tcx> Elaboratable<'tcx> for ty::Predicate<'tcx> {
|
||||||
*self
|
*self
|
||||||
}
|
}
|
||||||
|
|
||||||
fn child(&self, predicate: ty::Predicate<'tcx>) -> Self {
|
fn child(&self, clause: ty::Clause<'tcx>) -> Self {
|
||||||
predicate
|
clause.as_predicate()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn child_with_derived_cause(
|
fn child_with_derived_cause(
|
||||||
&self,
|
&self,
|
||||||
predicate: ty::Predicate<'tcx>,
|
clause: ty::Clause<'tcx>,
|
||||||
_span: Span,
|
_span: Span,
|
||||||
_parent_trait_pred: ty::PolyTraitPredicate<'tcx>,
|
_parent_trait_pred: ty::PolyTraitPredicate<'tcx>,
|
||||||
_index: usize,
|
_index: usize,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
predicate
|
clause.as_predicate()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -152,18 +157,18 @@ impl<'tcx> Elaboratable<'tcx> for (ty::Predicate<'tcx>, Span) {
|
||||||
self.0
|
self.0
|
||||||
}
|
}
|
||||||
|
|
||||||
fn child(&self, predicate: ty::Predicate<'tcx>) -> Self {
|
fn child(&self, clause: ty::Clause<'tcx>) -> Self {
|
||||||
(predicate, self.1)
|
(clause.as_predicate(), self.1)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn child_with_derived_cause(
|
fn child_with_derived_cause(
|
||||||
&self,
|
&self,
|
||||||
predicate: ty::Predicate<'tcx>,
|
clause: ty::Clause<'tcx>,
|
||||||
_span: Span,
|
_span: Span,
|
||||||
_parent_trait_pred: ty::PolyTraitPredicate<'tcx>,
|
_parent_trait_pred: ty::PolyTraitPredicate<'tcx>,
|
||||||
_index: usize,
|
_index: usize,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
(predicate, self.1)
|
(clause.as_predicate(), self.1)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -172,18 +177,18 @@ impl<'tcx> Elaboratable<'tcx> for (ty::Clause<'tcx>, Span) {
|
||||||
self.0.as_predicate()
|
self.0.as_predicate()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn child(&self, predicate: ty::Predicate<'tcx>) -> Self {
|
fn child(&self, clause: ty::Clause<'tcx>) -> Self {
|
||||||
(predicate.expect_clause(), self.1)
|
(clause, self.1)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn child_with_derived_cause(
|
fn child_with_derived_cause(
|
||||||
&self,
|
&self,
|
||||||
predicate: ty::Predicate<'tcx>,
|
clause: ty::Clause<'tcx>,
|
||||||
_span: Span,
|
_span: Span,
|
||||||
_parent_trait_pred: ty::PolyTraitPredicate<'tcx>,
|
_parent_trait_pred: ty::PolyTraitPredicate<'tcx>,
|
||||||
_index: usize,
|
_index: usize,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
(predicate.expect_clause(), self.1)
|
(clause, self.1)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -192,18 +197,18 @@ impl<'tcx> Elaboratable<'tcx> for ty::Clause<'tcx> {
|
||||||
self.as_predicate()
|
self.as_predicate()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn child(&self, predicate: ty::Predicate<'tcx>) -> Self {
|
fn child(&self, clause: ty::Clause<'tcx>) -> Self {
|
||||||
predicate.expect_clause()
|
clause
|
||||||
}
|
}
|
||||||
|
|
||||||
fn child_with_derived_cause(
|
fn child_with_derived_cause(
|
||||||
&self,
|
&self,
|
||||||
predicate: ty::Predicate<'tcx>,
|
clause: ty::Clause<'tcx>,
|
||||||
_span: Span,
|
_span: Span,
|
||||||
_parent_trait_pred: ty::PolyTraitPredicate<'tcx>,
|
_parent_trait_pred: ty::PolyTraitPredicate<'tcx>,
|
||||||
_index: usize,
|
_index: usize,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
predicate.expect_clause()
|
clause
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -252,14 +257,13 @@ impl<'tcx, O: Elaboratable<'tcx>> Elaborator<'tcx, O> {
|
||||||
};
|
};
|
||||||
|
|
||||||
let obligations =
|
let obligations =
|
||||||
predicates.predicates.iter().enumerate().map(|(index, &(mut pred, span))| {
|
predicates.predicates.iter().enumerate().map(|(index, &(mut clause, span))| {
|
||||||
// when parent predicate is non-const, elaborate it to non-const predicates.
|
// when parent predicate is non-const, elaborate it to non-const predicates.
|
||||||
if data.constness == ty::BoundConstness::NotConst {
|
if data.constness == ty::BoundConstness::NotConst {
|
||||||
pred = pred.without_const(tcx);
|
clause = clause.without_const(tcx);
|
||||||
}
|
}
|
||||||
elaboratable.child_with_derived_cause(
|
elaboratable.child_with_derived_cause(
|
||||||
pred.subst_supertrait(tcx, &bound_predicate.rebind(data.trait_ref))
|
clause.subst_supertrait(tcx, &bound_predicate.rebind(data.trait_ref)),
|
||||||
.as_predicate(),
|
|
||||||
span,
|
span,
|
||||||
bound_predicate.rebind(data),
|
bound_predicate.rebind(data),
|
||||||
index,
|
index,
|
||||||
|
@ -333,17 +337,15 @@ impl<'tcx, O: Elaboratable<'tcx>> Elaborator<'tcx, O> {
|
||||||
if r.is_late_bound() {
|
if r.is_late_bound() {
|
||||||
None
|
None
|
||||||
} else {
|
} else {
|
||||||
Some(ty::PredicateKind::Clause(ty::ClauseKind::RegionOutlives(
|
Some(ty::ClauseKind::RegionOutlives(ty::OutlivesPredicate(
|
||||||
ty::OutlivesPredicate(r, r_min),
|
r, r_min,
|
||||||
)))
|
)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Component::Param(p) => {
|
Component::Param(p) => {
|
||||||
let ty = tcx.mk_ty_param(p.index, p.name);
|
let ty = tcx.mk_ty_param(p.index, p.name);
|
||||||
Some(ty::PredicateKind::Clause(ty::ClauseKind::TypeOutlives(
|
Some(ty::ClauseKind::TypeOutlives(ty::OutlivesPredicate(ty, r_min)))
|
||||||
ty::OutlivesPredicate(ty, r_min),
|
|
||||||
)))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Component::UnresolvedInferenceVariable(_) => None,
|
Component::UnresolvedInferenceVariable(_) => None,
|
||||||
|
@ -351,8 +353,9 @@ impl<'tcx, O: Elaboratable<'tcx>> Elaborator<'tcx, O> {
|
||||||
Component::Alias(alias_ty) => {
|
Component::Alias(alias_ty) => {
|
||||||
// We might end up here if we have `Foo<<Bar as Baz>::Assoc>: 'a`.
|
// We might end up here if we have `Foo<<Bar as Baz>::Assoc>: 'a`.
|
||||||
// With this, we can deduce that `<Bar as Baz>::Assoc: 'a`.
|
// With this, we can deduce that `<Bar as Baz>::Assoc: 'a`.
|
||||||
Some(ty::PredicateKind::Clause(ty::ClauseKind::TypeOutlives(
|
Some(ty::ClauseKind::TypeOutlives(ty::OutlivesPredicate(
|
||||||
ty::OutlivesPredicate(alias_ty.to_ty(tcx), r_min),
|
alias_ty.to_ty(tcx),
|
||||||
|
r_min,
|
||||||
)))
|
)))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -362,10 +365,9 @@ impl<'tcx, O: Elaboratable<'tcx>> Elaborator<'tcx, O> {
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
.map(|predicate_kind| {
|
.map(|clause| {
|
||||||
bound_predicate.rebind(predicate_kind).to_predicate(tcx)
|
elaboratable.child(bound_predicate.rebind(clause).to_predicate(tcx))
|
||||||
})
|
}),
|
||||||
.map(|predicate| elaboratable.child(predicate)),
|
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
ty::PredicateKind::Clause(ty::ClauseKind::TypeWellFormedFromEnv(..)) => {
|
ty::PredicateKind::Clause(ty::ClauseKind::TypeWellFormedFromEnv(..)) => {
|
||||||
|
|
|
@ -18,7 +18,7 @@ privacy_private_in_public_lint =
|
||||||
})
|
})
|
||||||
|
|
||||||
privacy_private_interface_or_bounds_lint = {$ty_kind} `{$ty_descr}` is more private than the item `{$item_descr}`
|
privacy_private_interface_or_bounds_lint = {$ty_kind} `{$ty_descr}` is more private than the item `{$item_descr}`
|
||||||
.item_note = {$item_kind} `{$item_descr}` is reachable at visibility `{$item_vis_descr}`
|
.item_label = {$item_kind} `{$item_descr}` is reachable at visibility `{$item_vis_descr}`
|
||||||
.ty_note = but {$ty_kind} `{$ty_descr}` is only usable at visibility `{$ty_vis_descr}`
|
.ty_note = but {$ty_kind} `{$ty_descr}` is only usable at visibility `{$ty_vis_descr}`
|
||||||
|
|
||||||
privacy_report_effective_visibility = {$descr}
|
privacy_report_effective_visibility = {$descr}
|
||||||
|
|
|
@ -116,7 +116,7 @@ pub struct UnnameableTypesLint<'a> {
|
||||||
#[derive(LintDiagnostic)]
|
#[derive(LintDiagnostic)]
|
||||||
#[diag(privacy_private_interface_or_bounds_lint)]
|
#[diag(privacy_private_interface_or_bounds_lint)]
|
||||||
pub struct PrivateInterfacesOrBoundsLint<'a> {
|
pub struct PrivateInterfacesOrBoundsLint<'a> {
|
||||||
#[note(privacy_item_note)]
|
#[label(privacy_item_label)]
|
||||||
pub item_span: Span,
|
pub item_span: Span,
|
||||||
pub item_kind: &'a str,
|
pub item_kind: &'a str,
|
||||||
pub item_descr: DiagnosticArgFromDisplay<'a>,
|
pub item_descr: DiagnosticArgFromDisplay<'a>,
|
||||||
|
|
|
@ -1865,9 +1865,10 @@ impl SearchInterfaceForPrivateItemsVisitor<'_> {
|
||||||
} else {
|
} else {
|
||||||
lint::builtin::PRIVATE_BOUNDS
|
lint::builtin::PRIVATE_BOUNDS
|
||||||
};
|
};
|
||||||
self.tcx.emit_lint(
|
self.tcx.emit_spanned_lint(
|
||||||
lint,
|
lint,
|
||||||
hir_id,
|
hir_id,
|
||||||
|
span,
|
||||||
PrivateInterfacesOrBoundsLint {
|
PrivateInterfacesOrBoundsLint {
|
||||||
item_span: span,
|
item_span: span,
|
||||||
item_kind: self.tcx.def_descr(self.item_def_id.to_def_id()),
|
item_kind: self.tcx.def_descr(self.item_def_id.to_def_id()),
|
||||||
|
|
|
@ -1403,7 +1403,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
|
||||||
for ns in [Namespace::MacroNS, Namespace::TypeNS, Namespace::ValueNS] {
|
for ns in [Namespace::MacroNS, Namespace::TypeNS, Namespace::ValueNS] {
|
||||||
if let Ok(binding) = self.early_resolve_ident_in_lexical_scope(
|
if let Ok(binding) = self.early_resolve_ident_in_lexical_scope(
|
||||||
ident,
|
ident,
|
||||||
ScopeSet::All(ns, false),
|
ScopeSet::All(ns),
|
||||||
&parent_scope,
|
&parent_scope,
|
||||||
None,
|
None,
|
||||||
false,
|
false,
|
||||||
|
@ -1841,10 +1841,9 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
|
||||||
_ => None,
|
_ => None,
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
let scopes = ScopeSet::All(ns_to_try, opt_ns.is_none());
|
|
||||||
self.early_resolve_ident_in_lexical_scope(
|
self.early_resolve_ident_in_lexical_scope(
|
||||||
ident,
|
ident,
|
||||||
scopes,
|
ScopeSet::All(ns_to_try),
|
||||||
parent_scope,
|
parent_scope,
|
||||||
None,
|
None,
|
||||||
false,
|
false,
|
||||||
|
|
|
@ -88,7 +88,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
|
||||||
|
|
||||||
let rust_2015 = ctxt.edition().is_rust_2015();
|
let rust_2015 = ctxt.edition().is_rust_2015();
|
||||||
let (ns, macro_kind, is_absolute_path) = match scope_set {
|
let (ns, macro_kind, is_absolute_path) = match scope_set {
|
||||||
ScopeSet::All(ns, _) => (ns, None, false),
|
ScopeSet::All(ns) => (ns, None, false),
|
||||||
ScopeSet::AbsolutePath(ns) => (ns, None, true),
|
ScopeSet::AbsolutePath(ns) => (ns, None, true),
|
||||||
ScopeSet::Macro(macro_kind) => (MacroNS, Some(macro_kind), false),
|
ScopeSet::Macro(macro_kind) => (MacroNS, Some(macro_kind), false),
|
||||||
ScopeSet::Late(ns, ..) => (ns, None, false),
|
ScopeSet::Late(ns, ..) => (ns, None, false),
|
||||||
|
@ -397,11 +397,11 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
|
||||||
return Err(Determinacy::Determined);
|
return Err(Determinacy::Determined);
|
||||||
}
|
}
|
||||||
|
|
||||||
let (ns, macro_kind, is_import) = match scope_set {
|
let (ns, macro_kind) = match scope_set {
|
||||||
ScopeSet::All(ns, is_import) => (ns, None, is_import),
|
ScopeSet::All(ns) => (ns, None),
|
||||||
ScopeSet::AbsolutePath(ns) => (ns, None, false),
|
ScopeSet::AbsolutePath(ns) => (ns, None),
|
||||||
ScopeSet::Macro(macro_kind) => (MacroNS, Some(macro_kind), false),
|
ScopeSet::Macro(macro_kind) => (MacroNS, Some(macro_kind)),
|
||||||
ScopeSet::Late(ns, ..) => (ns, None, false),
|
ScopeSet::Late(ns, ..) => (ns, None),
|
||||||
};
|
};
|
||||||
|
|
||||||
// This is *the* result, resolution from the scope closest to the resolved identifier.
|
// This is *the* result, resolution from the scope closest to the resolved identifier.
|
||||||
|
@ -631,9 +631,9 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
|
||||||
let derive_helper_compat =
|
let derive_helper_compat =
|
||||||
Res::NonMacroAttr(NonMacroAttrKind::DeriveHelperCompat);
|
Res::NonMacroAttr(NonMacroAttrKind::DeriveHelperCompat);
|
||||||
|
|
||||||
let ambiguity_error_kind = if is_import {
|
let ambiguity_error_kind = if is_builtin(innermost_res)
|
||||||
Some(AmbiguityKind::Import)
|
|| is_builtin(res)
|
||||||
} else if is_builtin(innermost_res) || is_builtin(res) {
|
{
|
||||||
Some(AmbiguityKind::BuiltinAttr)
|
Some(AmbiguityKind::BuiltinAttr)
|
||||||
} else if innermost_res == derive_helper_compat
|
} else if innermost_res == derive_helper_compat
|
||||||
|| res == derive_helper_compat && innermost_res != derive_helper
|
|| res == derive_helper_compat && innermost_res != derive_helper
|
||||||
|
@ -853,10 +853,9 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let scopes = ScopeSet::All(ns, true);
|
|
||||||
let binding = self.early_resolve_ident_in_lexical_scope(
|
let binding = self.early_resolve_ident_in_lexical_scope(
|
||||||
ident,
|
ident,
|
||||||
scopes,
|
ScopeSet::All(ns),
|
||||||
parent_scope,
|
parent_scope,
|
||||||
finalize,
|
finalize,
|
||||||
finalize.is_some(),
|
finalize.is_some(),
|
||||||
|
@ -1497,7 +1496,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
|
||||||
} else {
|
} else {
|
||||||
self.early_resolve_ident_in_lexical_scope(
|
self.early_resolve_ident_in_lexical_scope(
|
||||||
ident,
|
ident,
|
||||||
ScopeSet::All(ns, opt_ns.is_none()),
|
ScopeSet::All(ns),
|
||||||
parent_scope,
|
parent_scope,
|
||||||
finalize,
|
finalize,
|
||||||
finalize.is_some(),
|
finalize.is_some(),
|
||||||
|
|
|
@ -10,10 +10,7 @@ use crate::errors::{
|
||||||
use crate::Determinacy::{self, *};
|
use crate::Determinacy::{self, *};
|
||||||
use crate::{fluent_generated as fluent, Namespace::*};
|
use crate::{fluent_generated as fluent, Namespace::*};
|
||||||
use crate::{module_to_string, names_to_string, ImportSuggestion};
|
use crate::{module_to_string, names_to_string, ImportSuggestion};
|
||||||
use crate::{
|
use crate::{AmbiguityKind, BindingKey, ModuleKind, ResolutionError, Resolver, Segment};
|
||||||
AmbiguityError, AmbiguityErrorMisc, AmbiguityKind, BindingKey, ModuleKind, ResolutionError,
|
|
||||||
Resolver, Segment,
|
|
||||||
};
|
|
||||||
use crate::{Finalize, Module, ModuleOrUniformRoot, ParentScope, PerNS, ScopeSet};
|
use crate::{Finalize, Module, ModuleOrUniformRoot, ParentScope, PerNS, ScopeSet};
|
||||||
use crate::{NameBinding, NameBindingKind, PathResult};
|
use crate::{NameBinding, NameBindingKind, PathResult};
|
||||||
|
|
||||||
|
@ -984,7 +981,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
|
||||||
match binding {
|
match binding {
|
||||||
Ok(binding) => {
|
Ok(binding) => {
|
||||||
// Consistency checks, analogous to `finalize_macro_resolutions`.
|
// Consistency checks, analogous to `finalize_macro_resolutions`.
|
||||||
let initial_binding = source_bindings[ns].get().map(|initial_binding| {
|
let initial_res = source_bindings[ns].get().map(|initial_binding| {
|
||||||
all_ns_err = false;
|
all_ns_err = false;
|
||||||
if let Some(target_binding) = target_bindings[ns].get() {
|
if let Some(target_binding) = target_bindings[ns].get() {
|
||||||
if target.name == kw::Underscore
|
if target.name == kw::Underscore
|
||||||
|
@ -998,20 +995,12 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
initial_binding
|
initial_binding.res()
|
||||||
});
|
});
|
||||||
let res = binding.res();
|
let res = binding.res();
|
||||||
if let Ok(initial_binding) = initial_binding {
|
if let Ok(initial_res) = initial_res {
|
||||||
let initial_res = initial_binding.res();
|
|
||||||
if res != initial_res && this.ambiguity_errors.is_empty() {
|
if res != initial_res && this.ambiguity_errors.is_empty() {
|
||||||
this.ambiguity_errors.push(AmbiguityError {
|
span_bug!(import.span, "inconsistent resolution for an import");
|
||||||
kind: AmbiguityKind::Import,
|
|
||||||
ident,
|
|
||||||
b1: initial_binding,
|
|
||||||
b2: binding,
|
|
||||||
misc1: AmbiguityErrorMisc::None,
|
|
||||||
misc2: AmbiguityErrorMisc::None,
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
} else if res != Res::Err
|
} else if res != Res::Err
|
||||||
&& this.ambiguity_errors.is_empty()
|
&& this.ambiguity_errors.is_empty()
|
||||||
|
@ -1283,7 +1272,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
|
||||||
|
|
||||||
match this.early_resolve_ident_in_lexical_scope(
|
match this.early_resolve_ident_in_lexical_scope(
|
||||||
target,
|
target,
|
||||||
ScopeSet::All(ns, false),
|
ScopeSet::All(ns),
|
||||||
&import.parent_scope,
|
&import.parent_scope,
|
||||||
None,
|
None,
|
||||||
false,
|
false,
|
||||||
|
|
|
@ -131,7 +131,7 @@ enum Scope<'a> {
|
||||||
#[derive(Clone, Copy)]
|
#[derive(Clone, Copy)]
|
||||||
enum ScopeSet<'a> {
|
enum ScopeSet<'a> {
|
||||||
/// All scopes with the given namespace.
|
/// All scopes with the given namespace.
|
||||||
All(Namespace, /*is_import*/ bool),
|
All(Namespace),
|
||||||
/// Crate root, then extern prelude (used for mixed 2015-2018 mode in macros).
|
/// Crate root, then extern prelude (used for mixed 2015-2018 mode in macros).
|
||||||
AbsolutePath(Namespace),
|
AbsolutePath(Namespace),
|
||||||
/// All scopes with macro namespace and the given macro kind restriction.
|
/// All scopes with macro namespace and the given macro kind restriction.
|
||||||
|
@ -718,7 +718,6 @@ struct UseError<'a> {
|
||||||
|
|
||||||
#[derive(Clone, Copy, PartialEq, Debug)]
|
#[derive(Clone, Copy, PartialEq, Debug)]
|
||||||
enum AmbiguityKind {
|
enum AmbiguityKind {
|
||||||
Import,
|
|
||||||
BuiltinAttr,
|
BuiltinAttr,
|
||||||
DeriveHelper,
|
DeriveHelper,
|
||||||
MacroRulesVsModularized,
|
MacroRulesVsModularized,
|
||||||
|
@ -731,7 +730,6 @@ enum AmbiguityKind {
|
||||||
impl AmbiguityKind {
|
impl AmbiguityKind {
|
||||||
fn descr(self) -> &'static str {
|
fn descr(self) -> &'static str {
|
||||||
match self {
|
match self {
|
||||||
AmbiguityKind::Import => "multiple potential import sources",
|
|
||||||
AmbiguityKind::BuiltinAttr => "a name conflict with a builtin attribute",
|
AmbiguityKind::BuiltinAttr => "a name conflict with a builtin attribute",
|
||||||
AmbiguityKind::DeriveHelper => "a name conflict with a derive helper attribute",
|
AmbiguityKind::DeriveHelper => "a name conflict with a derive helper attribute",
|
||||||
AmbiguityKind::MacroRulesVsModularized => {
|
AmbiguityKind::MacroRulesVsModularized => {
|
||||||
|
@ -1557,7 +1555,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
self.visit_scopes(ScopeSet::All(TypeNS, false), parent_scope, ctxt, |this, scope, _, _| {
|
self.visit_scopes(ScopeSet::All(TypeNS), parent_scope, ctxt, |this, scope, _, _| {
|
||||||
match scope {
|
match scope {
|
||||||
Scope::Module(module, _) => {
|
Scope::Module(module, _) => {
|
||||||
this.traits_in_module(module, assoc_item, &mut found_traits);
|
this.traits_in_module(module, assoc_item, &mut found_traits);
|
||||||
|
|
|
@ -645,7 +645,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
|
||||||
self.prohibit_imported_non_macro_attrs(None, res.ok(), path_span);
|
self.prohibit_imported_non_macro_attrs(None, res.ok(), path_span);
|
||||||
res
|
res
|
||||||
} else {
|
} else {
|
||||||
let scope_set = kind.map_or(ScopeSet::All(MacroNS, false), ScopeSet::Macro);
|
let scope_set = kind.map_or(ScopeSet::All(MacroNS), ScopeSet::Macro);
|
||||||
let binding = self.early_resolve_ident_in_lexical_scope(
|
let binding = self.early_resolve_ident_in_lexical_scope(
|
||||||
path[0].ident,
|
path[0].ident,
|
||||||
scope_set,
|
scope_set,
|
||||||
|
|
|
@ -686,6 +686,7 @@ impl<'a> Builder<'a> {
|
||||||
test::Tidy,
|
test::Tidy,
|
||||||
test::Ui,
|
test::Ui,
|
||||||
test::RunPassValgrind,
|
test::RunPassValgrind,
|
||||||
|
test::RunCoverage,
|
||||||
test::MirOpt,
|
test::MirOpt,
|
||||||
test::Codegen,
|
test::Codegen,
|
||||||
test::CodegenUnits,
|
test::CodegenUnits,
|
||||||
|
@ -694,6 +695,7 @@ impl<'a> Builder<'a> {
|
||||||
test::Debuginfo,
|
test::Debuginfo,
|
||||||
test::UiFullDeps,
|
test::UiFullDeps,
|
||||||
test::Rustdoc,
|
test::Rustdoc,
|
||||||
|
test::RunCoverageRustdoc,
|
||||||
test::Pretty,
|
test::Pretty,
|
||||||
test::Crate,
|
test::Crate,
|
||||||
test::CrateLibrustc,
|
test::CrateLibrustc,
|
||||||
|
|
|
@ -751,7 +751,15 @@ impl Step for Rustc {
|
||||||
}
|
}
|
||||||
|
|
||||||
macro_rules! tool_doc {
|
macro_rules! tool_doc {
|
||||||
($tool: ident, $should_run: literal, $path: literal, $(rustc_tool = $rustc_tool:literal, )? $(in_tree = $in_tree:literal, )? [$($krate: literal),+ $(,)?] $(,)?) => {
|
(
|
||||||
|
$tool: ident,
|
||||||
|
$should_run: literal,
|
||||||
|
$path: literal,
|
||||||
|
$(rustc_tool = $rustc_tool:literal, )?
|
||||||
|
$(in_tree = $in_tree:literal, )?
|
||||||
|
[$($extra_arg: literal),+ $(,)?]
|
||||||
|
$(,)?
|
||||||
|
) => {
|
||||||
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
|
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
|
||||||
pub struct $tool {
|
pub struct $tool {
|
||||||
target: TargetSelection,
|
target: TargetSelection,
|
||||||
|
@ -832,9 +840,9 @@ macro_rules! tool_doc {
|
||||||
cargo.arg("-Zskip-rustdoc-fingerprint");
|
cargo.arg("-Zskip-rustdoc-fingerprint");
|
||||||
// Only include compiler crates, no dependencies of those, such as `libc`.
|
// Only include compiler crates, no dependencies of those, such as `libc`.
|
||||||
cargo.arg("--no-deps");
|
cargo.arg("--no-deps");
|
||||||
cargo.arg("--lib");
|
|
||||||
$(
|
$(
|
||||||
cargo.arg("-p").arg($krate);
|
cargo.arg($extra_arg);
|
||||||
)+
|
)+
|
||||||
|
|
||||||
cargo.rustdocflag("--document-private-items");
|
cargo.rustdocflag("--document-private-items");
|
||||||
|
@ -850,15 +858,20 @@ macro_rules! tool_doc {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
tool_doc!(Rustdoc, "rustdoc-tool", "src/tools/rustdoc", ["rustdoc", "rustdoc-json-types"],);
|
tool_doc!(
|
||||||
|
Rustdoc,
|
||||||
|
"rustdoc-tool",
|
||||||
|
"src/tools/rustdoc",
|
||||||
|
["-p", "rustdoc", "-p", "rustdoc-json-types"]
|
||||||
|
);
|
||||||
tool_doc!(
|
tool_doc!(
|
||||||
Rustfmt,
|
Rustfmt,
|
||||||
"rustfmt-nightly",
|
"rustfmt-nightly",
|
||||||
"src/tools/rustfmt",
|
"src/tools/rustfmt",
|
||||||
["rustfmt-nightly", "rustfmt-config_proc_macro"],
|
["-p", "rustfmt-nightly", "-p", "rustfmt-config_proc_macro"],
|
||||||
);
|
);
|
||||||
tool_doc!(Clippy, "clippy", "src/tools/clippy", ["clippy_utils"]);
|
tool_doc!(Clippy, "clippy", "src/tools/clippy", ["-p", "clippy_utils"]);
|
||||||
tool_doc!(Miri, "miri", "src/tools/miri", ["miri"]);
|
tool_doc!(Miri, "miri", "src/tools/miri", ["-p", "miri"]);
|
||||||
tool_doc!(
|
tool_doc!(
|
||||||
Cargo,
|
Cargo,
|
||||||
"cargo",
|
"cargo",
|
||||||
|
@ -866,25 +879,44 @@ tool_doc!(
|
||||||
rustc_tool = false,
|
rustc_tool = false,
|
||||||
in_tree = false,
|
in_tree = false,
|
||||||
[
|
[
|
||||||
|
"-p",
|
||||||
"cargo",
|
"cargo",
|
||||||
|
"-p",
|
||||||
"cargo-platform",
|
"cargo-platform",
|
||||||
|
"-p",
|
||||||
"cargo-util",
|
"cargo-util",
|
||||||
|
"-p",
|
||||||
"crates-io",
|
"crates-io",
|
||||||
|
"-p",
|
||||||
"cargo-test-macro",
|
"cargo-test-macro",
|
||||||
|
"-p",
|
||||||
"cargo-test-support",
|
"cargo-test-support",
|
||||||
|
"-p",
|
||||||
"cargo-credential",
|
"cargo-credential",
|
||||||
|
"-p",
|
||||||
"cargo-credential-1password",
|
"cargo-credential-1password",
|
||||||
|
"-p",
|
||||||
"mdman",
|
"mdman",
|
||||||
// FIXME: this trips a license check in tidy.
|
// FIXME: this trips a license check in tidy.
|
||||||
|
// "-p",
|
||||||
// "resolver-tests",
|
// "resolver-tests",
|
||||||
// FIXME: we should probably document these, but they're different per-platform so we can't use `tool_doc`.
|
// FIXME: we should probably document these, but they're different per-platform so we can't use `tool_doc`.
|
||||||
|
// "-p",
|
||||||
// "cargo-credential-gnome-secret",
|
// "cargo-credential-gnome-secret",
|
||||||
|
// "-p",
|
||||||
// "cargo-credential-macos-keychain",
|
// "cargo-credential-macos-keychain",
|
||||||
|
// "-p",
|
||||||
// "cargo-credential-wincred",
|
// "cargo-credential-wincred",
|
||||||
]
|
]
|
||||||
);
|
);
|
||||||
tool_doc!(Tidy, "tidy", "src/tools/tidy", rustc_tool = false, ["tidy"]);
|
tool_doc!(Tidy, "tidy", "src/tools/tidy", rustc_tool = false, ["-p", "tidy"]);
|
||||||
tool_doc!(Bootstrap, "bootstrap", "src/bootstrap", rustc_tool = false, ["bootstrap"]);
|
tool_doc!(
|
||||||
|
Bootstrap,
|
||||||
|
"bootstrap",
|
||||||
|
"src/bootstrap",
|
||||||
|
rustc_tool = false,
|
||||||
|
["--lib", "-p", "bootstrap"]
|
||||||
|
);
|
||||||
|
|
||||||
#[derive(Ord, PartialOrd, Debug, Copy, Clone, Hash, PartialEq, Eq)]
|
#[derive(Ord, PartialOrd, Debug, Copy, Clone, Hash, PartialEq, Eq)]
|
||||||
pub struct ErrorIndex {
|
pub struct ErrorIndex {
|
||||||
|
|
|
@ -1319,6 +1319,13 @@ host_test!(RunMakeFullDeps {
|
||||||
|
|
||||||
default_test!(Assembly { path: "tests/assembly", mode: "assembly", suite: "assembly" });
|
default_test!(Assembly { path: "tests/assembly", mode: "assembly", suite: "assembly" });
|
||||||
|
|
||||||
|
host_test!(RunCoverage { path: "tests/run-coverage", mode: "run-coverage", suite: "run-coverage" });
|
||||||
|
host_test!(RunCoverageRustdoc {
|
||||||
|
path: "tests/run-coverage-rustdoc",
|
||||||
|
mode: "run-coverage",
|
||||||
|
suite: "run-coverage-rustdoc"
|
||||||
|
});
|
||||||
|
|
||||||
// For the mir-opt suite we do not use macros, as we need custom behavior when blessing.
|
// For the mir-opt suite we do not use macros, as we need custom behavior when blessing.
|
||||||
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
|
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
|
||||||
pub struct MirOpt {
|
pub struct MirOpt {
|
||||||
|
@ -1503,6 +1510,7 @@ note: if you're sure you want to do this, please open an issue as to why. In the
|
||||||
|| (mode == "ui" && is_rustdoc)
|
|| (mode == "ui" && is_rustdoc)
|
||||||
|| mode == "js-doc-test"
|
|| mode == "js-doc-test"
|
||||||
|| mode == "rustdoc-json"
|
|| mode == "rustdoc-json"
|
||||||
|
|| suite == "run-coverage-rustdoc"
|
||||||
{
|
{
|
||||||
cmd.arg("--rustdoc-path").arg(builder.rustdoc(compiler));
|
cmd.arg("--rustdoc-path").arg(builder.rustdoc(compiler));
|
||||||
}
|
}
|
||||||
|
@ -1516,7 +1524,7 @@ note: if you're sure you want to do this, please open an issue as to why. In the
|
||||||
.arg(builder.ensure(tool::JsonDocLint { compiler: json_compiler, target }));
|
.arg(builder.ensure(tool::JsonDocLint { compiler: json_compiler, target }));
|
||||||
}
|
}
|
||||||
|
|
||||||
if mode == "run-make" {
|
if mode == "run-make" || mode == "run-coverage" {
|
||||||
let rust_demangler = builder
|
let rust_demangler = builder
|
||||||
.ensure(tool::RustDemangler {
|
.ensure(tool::RustDemangler {
|
||||||
compiler,
|
compiler,
|
||||||
|
@ -1703,17 +1711,21 @@ note: if you're sure you want to do this, please open an issue as to why. In the
|
||||||
add_link_lib_path(vec![llvm_libdir.trim().into()], &mut cmd);
|
add_link_lib_path(vec![llvm_libdir.trim().into()], &mut cmd);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Only pass correct values for these flags for the `run-make` suite as it
|
if !builder.config.dry_run()
|
||||||
// requires that a C++ compiler was configured which isn't always the case.
|
&& (matches!(suite, "run-make" | "run-make-fulldeps") || mode == "run-coverage")
|
||||||
if !builder.config.dry_run() && matches!(suite, "run-make" | "run-make-fulldeps") {
|
{
|
||||||
// The llvm/bin directory contains many useful cross-platform
|
// The llvm/bin directory contains many useful cross-platform
|
||||||
// tools. Pass the path to run-make tests so they can use them.
|
// tools. Pass the path to run-make tests so they can use them.
|
||||||
|
// (The run-coverage tests also need these tools to process
|
||||||
|
// coverage reports.)
|
||||||
let llvm_bin_path = llvm_config
|
let llvm_bin_path = llvm_config
|
||||||
.parent()
|
.parent()
|
||||||
.expect("Expected llvm-config to be contained in directory");
|
.expect("Expected llvm-config to be contained in directory");
|
||||||
assert!(llvm_bin_path.is_dir());
|
assert!(llvm_bin_path.is_dir());
|
||||||
cmd.arg("--llvm-bin-dir").arg(llvm_bin_path);
|
cmd.arg("--llvm-bin-dir").arg(llvm_bin_path);
|
||||||
|
}
|
||||||
|
|
||||||
|
if !builder.config.dry_run() && matches!(suite, "run-make" | "run-make-fulldeps") {
|
||||||
// If LLD is available, add it to the PATH
|
// If LLD is available, add it to the PATH
|
||||||
if builder.config.lld_enabled {
|
if builder.config.lld_enabled {
|
||||||
let lld_install_root =
|
let lld_install_root =
|
||||||
|
|
|
@ -66,6 +66,7 @@ string_enum! {
|
||||||
JsDocTest => "js-doc-test",
|
JsDocTest => "js-doc-test",
|
||||||
MirOpt => "mir-opt",
|
MirOpt => "mir-opt",
|
||||||
Assembly => "assembly",
|
Assembly => "assembly",
|
||||||
|
RunCoverage => "run-coverage",
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -626,6 +627,7 @@ pub const UI_EXTENSIONS: &[&str] = &[
|
||||||
UI_STDERR_64,
|
UI_STDERR_64,
|
||||||
UI_STDERR_32,
|
UI_STDERR_32,
|
||||||
UI_STDERR_16,
|
UI_STDERR_16,
|
||||||
|
UI_COVERAGE,
|
||||||
];
|
];
|
||||||
pub const UI_STDERR: &str = "stderr";
|
pub const UI_STDERR: &str = "stderr";
|
||||||
pub const UI_STDOUT: &str = "stdout";
|
pub const UI_STDOUT: &str = "stdout";
|
||||||
|
@ -635,6 +637,7 @@ pub const UI_RUN_STDOUT: &str = "run.stdout";
|
||||||
pub const UI_STDERR_64: &str = "64bit.stderr";
|
pub const UI_STDERR_64: &str = "64bit.stderr";
|
||||||
pub const UI_STDERR_32: &str = "32bit.stderr";
|
pub const UI_STDERR_32: &str = "32bit.stderr";
|
||||||
pub const UI_STDERR_16: &str = "16bit.stderr";
|
pub const UI_STDERR_16: &str = "16bit.stderr";
|
||||||
|
pub const UI_COVERAGE: &str = "coverage";
|
||||||
|
|
||||||
/// Absolute path to the directory where all output for all tests in the given
|
/// Absolute path to the directory where all output for all tests in the given
|
||||||
/// `relative_dir` group should reside. Example:
|
/// `relative_dir` group should reside. Example:
|
||||||
|
|
|
@ -161,7 +161,7 @@ pub struct TestProps {
|
||||||
// customized normalization rules
|
// customized normalization rules
|
||||||
pub normalize_stdout: Vec<(String, String)>,
|
pub normalize_stdout: Vec<(String, String)>,
|
||||||
pub normalize_stderr: Vec<(String, String)>,
|
pub normalize_stderr: Vec<(String, String)>,
|
||||||
pub failure_status: i32,
|
pub failure_status: Option<i32>,
|
||||||
// For UI tests, allows compiler to exit with arbitrary failure status
|
// For UI tests, allows compiler to exit with arbitrary failure status
|
||||||
pub dont_check_failure_status: bool,
|
pub dont_check_failure_status: bool,
|
||||||
// Whether or not `rustfix` should apply the `CodeSuggestion`s of this test and compile the
|
// Whether or not `rustfix` should apply the `CodeSuggestion`s of this test and compile the
|
||||||
|
@ -257,7 +257,7 @@ impl TestProps {
|
||||||
check_test_line_numbers_match: false,
|
check_test_line_numbers_match: false,
|
||||||
normalize_stdout: vec![],
|
normalize_stdout: vec![],
|
||||||
normalize_stderr: vec![],
|
normalize_stderr: vec![],
|
||||||
failure_status: -1,
|
failure_status: None,
|
||||||
dont_check_failure_status: false,
|
dont_check_failure_status: false,
|
||||||
run_rustfix: false,
|
run_rustfix: false,
|
||||||
rustfix_only_machine_applicable: false,
|
rustfix_only_machine_applicable: false,
|
||||||
|
@ -428,7 +428,7 @@ impl TestProps {
|
||||||
.parse_name_value_directive(ln, FAILURE_STATUS)
|
.parse_name_value_directive(ln, FAILURE_STATUS)
|
||||||
.and_then(|code| code.trim().parse::<i32>().ok())
|
.and_then(|code| code.trim().parse::<i32>().ok())
|
||||||
{
|
{
|
||||||
self.failure_status = code;
|
self.failure_status = Some(code);
|
||||||
}
|
}
|
||||||
|
|
||||||
config.set_name_directive(
|
config.set_name_directive(
|
||||||
|
@ -491,11 +491,8 @@ impl TestProps {
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
if self.failure_status == -1 {
|
|
||||||
self.failure_status = 1;
|
|
||||||
}
|
|
||||||
if self.should_ice {
|
if self.should_ice {
|
||||||
self.failure_status = 101;
|
self.failure_status = Some(101);
|
||||||
}
|
}
|
||||||
|
|
||||||
if config.mode == Mode::Incremental {
|
if config.mode == Mode::Incremental {
|
||||||
|
@ -615,10 +612,25 @@ pub fn line_directive<'line>(
|
||||||
}
|
}
|
||||||
|
|
||||||
fn iter_header<R: Read>(testfile: &Path, rdr: R, it: &mut dyn FnMut(Option<&str>, &str, usize)) {
|
fn iter_header<R: Read>(testfile: &Path, rdr: R, it: &mut dyn FnMut(Option<&str>, &str, usize)) {
|
||||||
|
iter_header_extra(testfile, rdr, &[], it)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn iter_header_extra(
|
||||||
|
testfile: &Path,
|
||||||
|
rdr: impl Read,
|
||||||
|
extra_directives: &[&str],
|
||||||
|
it: &mut dyn FnMut(Option<&str>, &str, usize),
|
||||||
|
) {
|
||||||
if testfile.is_dir() {
|
if testfile.is_dir() {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Process any extra directives supplied by the caller (e.g. because they
|
||||||
|
// are implied by the test mode), with a dummy line number of 0.
|
||||||
|
for directive in extra_directives {
|
||||||
|
it(None, directive, 0);
|
||||||
|
}
|
||||||
|
|
||||||
let comment = if testfile.extension().map(|e| e == "rs") == Some(true) { "//" } else { "#" };
|
let comment = if testfile.extension().map(|e| e == "rs") == Some(true) { "//" } else { "#" };
|
||||||
|
|
||||||
let mut rdr = BufReader::new(rdr);
|
let mut rdr = BufReader::new(rdr);
|
||||||
|
@ -897,7 +909,27 @@ pub fn make_test_description<R: Read>(
|
||||||
let mut ignore_message = None;
|
let mut ignore_message = None;
|
||||||
let mut should_fail = false;
|
let mut should_fail = false;
|
||||||
|
|
||||||
iter_header(path, src, &mut |revision, ln, line_number| {
|
let extra_directives: &[&str] = match config.mode {
|
||||||
|
// The run-coverage tests are treated as having these extra directives,
|
||||||
|
// without needing to specify them manually in every test file.
|
||||||
|
// (Some of the comments below have been copied over from
|
||||||
|
// `tests/run-make/coverage-reports/Makefile`, which no longer exists.)
|
||||||
|
Mode::RunCoverage => {
|
||||||
|
&[
|
||||||
|
"needs-profiler-support",
|
||||||
|
// FIXME(mati865): MinGW GCC miscompiles compiler-rt profiling library but with Clang it works
|
||||||
|
// properly. Since we only have GCC on the CI ignore the test for now.
|
||||||
|
"ignore-windows-gnu",
|
||||||
|
// FIXME(pietroalbini): this test currently does not work on cross-compiled
|
||||||
|
// targets because remote-test is not capable of sending back the *.profraw
|
||||||
|
// files generated by the LLVM instrumentation.
|
||||||
|
"ignore-cross-compile",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
_ => &[],
|
||||||
|
};
|
||||||
|
|
||||||
|
iter_header_extra(path, src, extra_directives, &mut |revision, ln, line_number| {
|
||||||
if revision.is_some() && revision != cfg {
|
if revision.is_some() && revision != cfg {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
|
@ -87,7 +87,7 @@ pub(super) fn handle_needs(
|
||||||
},
|
},
|
||||||
Need {
|
Need {
|
||||||
name: "needs-profiler-support",
|
name: "needs-profiler-support",
|
||||||
condition: std::env::var_os("RUSTC_PROFILER_SUPPORT").is_some(),
|
condition: cache.profiler_support,
|
||||||
ignore_reason: "ignored when profiler support is disabled",
|
ignore_reason: "ignored when profiler support is disabled",
|
||||||
},
|
},
|
||||||
Need {
|
Need {
|
||||||
|
@ -195,6 +195,7 @@ pub(super) struct CachedNeedsConditions {
|
||||||
sanitizer_memtag: bool,
|
sanitizer_memtag: bool,
|
||||||
sanitizer_shadow_call_stack: bool,
|
sanitizer_shadow_call_stack: bool,
|
||||||
sanitizer_safestack: bool,
|
sanitizer_safestack: bool,
|
||||||
|
profiler_support: bool,
|
||||||
xray: bool,
|
xray: bool,
|
||||||
rust_lld: bool,
|
rust_lld: bool,
|
||||||
i686_dlltool: bool,
|
i686_dlltool: bool,
|
||||||
|
@ -232,6 +233,7 @@ impl CachedNeedsConditions {
|
||||||
sanitizer_memtag: util::MEMTAG_SUPPORTED_TARGETS.contains(target),
|
sanitizer_memtag: util::MEMTAG_SUPPORTED_TARGETS.contains(target),
|
||||||
sanitizer_shadow_call_stack: util::SHADOWCALLSTACK_SUPPORTED_TARGETS.contains(target),
|
sanitizer_shadow_call_stack: util::SHADOWCALLSTACK_SUPPORTED_TARGETS.contains(target),
|
||||||
sanitizer_safestack: util::SAFESTACK_SUPPORTED_TARGETS.contains(target),
|
sanitizer_safestack: util::SAFESTACK_SUPPORTED_TARGETS.contains(target),
|
||||||
|
profiler_support: std::env::var_os("RUSTC_PROFILER_SUPPORT").is_some(),
|
||||||
xray: util::XRAY_SUPPORTED_TARGETS.contains(target),
|
xray: util::XRAY_SUPPORTED_TARGETS.contains(target),
|
||||||
|
|
||||||
// For tests using the `needs-rust-lld` directive (e.g. for `-Zgcc-ld=lld`), we need to find
|
// For tests using the `needs-rust-lld` directive (e.g. for `-Zgcc-ld=lld`), we need to find
|
||||||
|
|
|
@ -6,8 +6,8 @@ use crate::common::{Assembly, Incremental, JsDocTest, MirOpt, RunMake, RustdocJs
|
||||||
use crate::common::{Codegen, CodegenUnits, DebugInfo, Debugger, Rustdoc};
|
use crate::common::{Codegen, CodegenUnits, DebugInfo, Debugger, Rustdoc};
|
||||||
use crate::common::{CompareMode, FailMode, PassMode};
|
use crate::common::{CompareMode, FailMode, PassMode};
|
||||||
use crate::common::{Config, TestPaths};
|
use crate::common::{Config, TestPaths};
|
||||||
use crate::common::{Pretty, RunPassValgrind};
|
use crate::common::{Pretty, RunCoverage, RunPassValgrind};
|
||||||
use crate::common::{UI_RUN_STDERR, UI_RUN_STDOUT};
|
use crate::common::{UI_COVERAGE, UI_RUN_STDERR, UI_RUN_STDOUT};
|
||||||
use crate::compute_diff::{write_diff, write_filtered_diff};
|
use crate::compute_diff::{write_diff, write_filtered_diff};
|
||||||
use crate::errors::{self, Error, ErrorKind};
|
use crate::errors::{self, Error, ErrorKind};
|
||||||
use crate::header::TestProps;
|
use crate::header::TestProps;
|
||||||
|
@ -253,6 +253,7 @@ impl<'test> TestCx<'test> {
|
||||||
MirOpt => self.run_mir_opt_test(),
|
MirOpt => self.run_mir_opt_test(),
|
||||||
Assembly => self.run_assembly_test(),
|
Assembly => self.run_assembly_test(),
|
||||||
JsDocTest => self.run_js_doc_test(),
|
JsDocTest => self.run_js_doc_test(),
|
||||||
|
RunCoverage => self.run_coverage_test(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -384,7 +385,7 @@ impl<'test> TestCx<'test> {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn check_correct_failure_status(&self, proc_res: &ProcRes) {
|
fn check_correct_failure_status(&self, proc_res: &ProcRes) {
|
||||||
let expected_status = Some(self.props.failure_status);
|
let expected_status = Some(self.props.failure_status.unwrap_or(1));
|
||||||
let received_status = proc_res.status.code();
|
let received_status = proc_res.status.code();
|
||||||
|
|
||||||
if expected_status != received_status {
|
if expected_status != received_status {
|
||||||
|
@ -465,6 +466,296 @@ impl<'test> TestCx<'test> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn run_coverage_test(&self) {
|
||||||
|
let should_run = self.run_if_enabled();
|
||||||
|
let proc_res = self.compile_test(should_run, Emit::None);
|
||||||
|
|
||||||
|
if !proc_res.status.success() {
|
||||||
|
self.fatal_proc_rec("compilation failed!", &proc_res);
|
||||||
|
}
|
||||||
|
drop(proc_res);
|
||||||
|
|
||||||
|
if let WillExecute::Disabled = should_run {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
let profraw_path = self.output_base_dir().join("default.profraw");
|
||||||
|
let profdata_path = self.output_base_dir().join("default.profdata");
|
||||||
|
|
||||||
|
// Delete any existing profraw/profdata files to rule out unintended
|
||||||
|
// interference between repeated test runs.
|
||||||
|
if profraw_path.exists() {
|
||||||
|
std::fs::remove_file(&profraw_path).unwrap();
|
||||||
|
}
|
||||||
|
if profdata_path.exists() {
|
||||||
|
std::fs::remove_file(&profdata_path).unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
let proc_res = self.exec_compiled_test_general(
|
||||||
|
&[("LLVM_PROFILE_FILE", &profraw_path.to_str().unwrap())],
|
||||||
|
false,
|
||||||
|
);
|
||||||
|
if self.props.failure_status.is_some() {
|
||||||
|
self.check_correct_failure_status(&proc_res);
|
||||||
|
} else if !proc_res.status.success() {
|
||||||
|
self.fatal_proc_rec("test run failed!", &proc_res);
|
||||||
|
}
|
||||||
|
drop(proc_res);
|
||||||
|
|
||||||
|
let mut profraw_paths = vec![profraw_path];
|
||||||
|
let mut bin_paths = vec![self.make_exe_name()];
|
||||||
|
|
||||||
|
if self.config.suite == "run-coverage-rustdoc" {
|
||||||
|
self.run_doctests_for_coverage(&mut profraw_paths, &mut bin_paths);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run `llvm-profdata merge` to index the raw coverage output.
|
||||||
|
let proc_res = self.run_llvm_tool("llvm-profdata", |cmd| {
|
||||||
|
cmd.args(["merge", "--sparse", "--output"]);
|
||||||
|
cmd.arg(&profdata_path);
|
||||||
|
cmd.args(&profraw_paths);
|
||||||
|
});
|
||||||
|
if !proc_res.status.success() {
|
||||||
|
self.fatal_proc_rec("llvm-profdata merge failed!", &proc_res);
|
||||||
|
}
|
||||||
|
drop(proc_res);
|
||||||
|
|
||||||
|
// Run `llvm-cov show` to produce a coverage report in text format.
|
||||||
|
let proc_res = self.run_llvm_tool("llvm-cov", |cmd| {
|
||||||
|
cmd.args(["show", "--format=text", "--show-line-counts-or-regions"]);
|
||||||
|
|
||||||
|
cmd.arg("--Xdemangler");
|
||||||
|
cmd.arg(self.config.rust_demangler_path.as_ref().unwrap());
|
||||||
|
|
||||||
|
cmd.arg("--instr-profile");
|
||||||
|
cmd.arg(&profdata_path);
|
||||||
|
|
||||||
|
for bin in &bin_paths {
|
||||||
|
cmd.arg("--object");
|
||||||
|
cmd.arg(bin);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
if !proc_res.status.success() {
|
||||||
|
self.fatal_proc_rec("llvm-cov show failed!", &proc_res);
|
||||||
|
}
|
||||||
|
|
||||||
|
let kind = UI_COVERAGE;
|
||||||
|
|
||||||
|
let expected_coverage = self.load_expected_output(kind);
|
||||||
|
let normalized_actual_coverage =
|
||||||
|
self.normalize_coverage_output(&proc_res.stdout).unwrap_or_else(|err| {
|
||||||
|
self.fatal_proc_rec(&err, &proc_res);
|
||||||
|
});
|
||||||
|
|
||||||
|
let coverage_errors = self.compare_output(
|
||||||
|
kind,
|
||||||
|
&normalized_actual_coverage,
|
||||||
|
&expected_coverage,
|
||||||
|
self.props.compare_output_lines_by_subset,
|
||||||
|
);
|
||||||
|
|
||||||
|
if coverage_errors > 0 {
|
||||||
|
self.fatal_proc_rec(
|
||||||
|
&format!("{} errors occurred comparing coverage output.", coverage_errors),
|
||||||
|
&proc_res,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Run any doctests embedded in this test file, and add any resulting
|
||||||
|
/// `.profraw` files and doctest executables to the given vectors.
|
||||||
|
fn run_doctests_for_coverage(
|
||||||
|
&self,
|
||||||
|
profraw_paths: &mut Vec<PathBuf>,
|
||||||
|
bin_paths: &mut Vec<PathBuf>,
|
||||||
|
) {
|
||||||
|
// Put .profraw files and doctest executables in dedicated directories,
|
||||||
|
// to make it easier to glob them all later.
|
||||||
|
let profraws_dir = self.output_base_dir().join("doc_profraws");
|
||||||
|
let bins_dir = self.output_base_dir().join("doc_bins");
|
||||||
|
|
||||||
|
// Remove existing directories to prevent cross-run interference.
|
||||||
|
if profraws_dir.try_exists().unwrap() {
|
||||||
|
std::fs::remove_dir_all(&profraws_dir).unwrap();
|
||||||
|
}
|
||||||
|
if bins_dir.try_exists().unwrap() {
|
||||||
|
std::fs::remove_dir_all(&bins_dir).unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut rustdoc_cmd =
|
||||||
|
Command::new(self.config.rustdoc_path.as_ref().expect("--rustdoc-path not passed"));
|
||||||
|
|
||||||
|
// In general there will be multiple doctest binaries running, so we
|
||||||
|
// tell the profiler runtime to write their coverage data into separate
|
||||||
|
// profraw files.
|
||||||
|
rustdoc_cmd.env("LLVM_PROFILE_FILE", profraws_dir.join("%p-%m.profraw"));
|
||||||
|
|
||||||
|
rustdoc_cmd.args(["--test", "-Cinstrument-coverage"]);
|
||||||
|
|
||||||
|
// Without this, the doctests complain about not being able to find
|
||||||
|
// their enclosing file's crate for some reason.
|
||||||
|
rustdoc_cmd.args(["--crate-name", "workaround_for_79771"]);
|
||||||
|
|
||||||
|
// Persist the doctest binaries so that `llvm-cov show` can read their
|
||||||
|
// embedded coverage mappings later.
|
||||||
|
rustdoc_cmd.arg("-Zunstable-options");
|
||||||
|
rustdoc_cmd.arg("--persist-doctests");
|
||||||
|
rustdoc_cmd.arg(&bins_dir);
|
||||||
|
|
||||||
|
rustdoc_cmd.arg("-L");
|
||||||
|
rustdoc_cmd.arg(self.aux_output_dir_name());
|
||||||
|
|
||||||
|
rustdoc_cmd.arg(&self.testpaths.file);
|
||||||
|
|
||||||
|
let proc_res = self.compose_and_run_compiler(rustdoc_cmd, None);
|
||||||
|
if !proc_res.status.success() {
|
||||||
|
self.fatal_proc_rec("rustdoc --test failed!", &proc_res)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn glob_iter(path: impl AsRef<Path>) -> impl Iterator<Item = PathBuf> {
|
||||||
|
let path_str = path.as_ref().to_str().unwrap();
|
||||||
|
let iter = glob(path_str).unwrap();
|
||||||
|
iter.map(Result::unwrap)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find all profraw files in the profraw directory.
|
||||||
|
for p in glob_iter(profraws_dir.join("*.profraw")) {
|
||||||
|
profraw_paths.push(p);
|
||||||
|
}
|
||||||
|
// Find all executables in the `--persist-doctests` directory, while
|
||||||
|
// avoiding other file types (e.g. `.pdb` on Windows). This doesn't
|
||||||
|
// need to be perfect, as long as it can handle the files actually
|
||||||
|
// produced by `rustdoc --test`.
|
||||||
|
for p in glob_iter(bins_dir.join("**/*")) {
|
||||||
|
let is_bin = p.is_file()
|
||||||
|
&& match p.extension() {
|
||||||
|
None => true,
|
||||||
|
Some(ext) => ext == OsStr::new("exe"),
|
||||||
|
};
|
||||||
|
if is_bin {
|
||||||
|
bin_paths.push(p);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn run_llvm_tool(&self, name: &str, configure_cmd_fn: impl FnOnce(&mut Command)) -> ProcRes {
|
||||||
|
let tool_path = self
|
||||||
|
.config
|
||||||
|
.llvm_bin_dir
|
||||||
|
.as_ref()
|
||||||
|
.expect("this test expects the LLVM bin dir to be available")
|
||||||
|
.join(name);
|
||||||
|
|
||||||
|
let mut cmd = Command::new(tool_path);
|
||||||
|
configure_cmd_fn(&mut cmd);
|
||||||
|
|
||||||
|
let output = cmd.output().unwrap_or_else(|_| panic!("failed to exec `{cmd:?}`"));
|
||||||
|
|
||||||
|
let proc_res = ProcRes {
|
||||||
|
status: output.status,
|
||||||
|
stdout: String::from_utf8(output.stdout).unwrap(),
|
||||||
|
stderr: String::from_utf8(output.stderr).unwrap(),
|
||||||
|
cmdline: format!("{cmd:?}"),
|
||||||
|
};
|
||||||
|
self.dump_output(&proc_res.stdout, &proc_res.stderr);
|
||||||
|
|
||||||
|
proc_res
|
||||||
|
}
|
||||||
|
|
||||||
|
fn normalize_coverage_output(&self, coverage: &str) -> Result<String, String> {
|
||||||
|
let normalized = self.normalize_output(coverage, &[]);
|
||||||
|
|
||||||
|
let mut lines = normalized.lines().collect::<Vec<_>>();
|
||||||
|
|
||||||
|
Self::sort_coverage_file_sections(&mut lines)?;
|
||||||
|
Self::sort_coverage_subviews(&mut lines)?;
|
||||||
|
|
||||||
|
let joined_lines = lines.iter().flat_map(|line| [line, "\n"]).collect::<String>();
|
||||||
|
Ok(joined_lines)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Coverage reports can describe multiple source files, separated by
|
||||||
|
/// blank lines. The order of these files is unpredictable (since it
|
||||||
|
/// depends on implementation details), so we need to sort the file
|
||||||
|
/// sections into a consistent order before comparing against a snapshot.
|
||||||
|
fn sort_coverage_file_sections(coverage_lines: &mut Vec<&str>) -> Result<(), String> {
|
||||||
|
// Group the lines into file sections, separated by blank lines.
|
||||||
|
let mut sections = coverage_lines.split(|line| line.is_empty()).collect::<Vec<_>>();
|
||||||
|
|
||||||
|
// The last section should be empty, representing an extra trailing blank line.
|
||||||
|
if !sections.last().is_some_and(|last| last.is_empty()) {
|
||||||
|
return Err("coverage report should end with an extra blank line".to_owned());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort the file sections (not including the final empty "section").
|
||||||
|
let except_last = sections.len() - 1;
|
||||||
|
(&mut sections[..except_last]).sort();
|
||||||
|
|
||||||
|
// Join the file sections back into a flat list of lines, with
|
||||||
|
// sections separated by blank lines.
|
||||||
|
let joined = sections.join(&[""] as &[_]);
|
||||||
|
assert_eq!(joined.len(), coverage_lines.len());
|
||||||
|
*coverage_lines = joined;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn sort_coverage_subviews(coverage_lines: &mut Vec<&str>) -> Result<(), String> {
|
||||||
|
let mut output_lines = Vec::new();
|
||||||
|
|
||||||
|
// We accumulate a list of zero or more "subviews", where each
|
||||||
|
// subview is a list of one or more lines.
|
||||||
|
let mut subviews: Vec<Vec<&str>> = Vec::new();
|
||||||
|
|
||||||
|
fn flush<'a>(subviews: &mut Vec<Vec<&'a str>>, output_lines: &mut Vec<&'a str>) {
|
||||||
|
if subviews.is_empty() {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Take and clear the list of accumulated subviews.
|
||||||
|
let mut subviews = std::mem::take(subviews);
|
||||||
|
|
||||||
|
// The last "subview" should be just a boundary line on its own,
|
||||||
|
// so exclude it when sorting the other subviews.
|
||||||
|
let except_last = subviews.len() - 1;
|
||||||
|
(&mut subviews[..except_last]).sort();
|
||||||
|
|
||||||
|
for view in subviews {
|
||||||
|
for line in view {
|
||||||
|
output_lines.push(line);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (line, line_num) in coverage_lines.iter().zip(1..) {
|
||||||
|
if line.starts_with(" ------------------") {
|
||||||
|
// This is a subview boundary line, so start a new subview.
|
||||||
|
subviews.push(vec![line]);
|
||||||
|
} else if line.starts_with(" |") {
|
||||||
|
// Add this line to the current subview.
|
||||||
|
subviews
|
||||||
|
.last_mut()
|
||||||
|
.ok_or(format!(
|
||||||
|
"unexpected subview line outside of a subview on line {line_num}"
|
||||||
|
))?
|
||||||
|
.push(line);
|
||||||
|
} else {
|
||||||
|
// This line is not part of a subview, so sort and print any
|
||||||
|
// accumulated subviews, and then print the line as-is.
|
||||||
|
flush(&mut subviews, &mut output_lines);
|
||||||
|
output_lines.push(line);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
flush(&mut subviews, &mut output_lines);
|
||||||
|
assert!(subviews.is_empty());
|
||||||
|
|
||||||
|
assert_eq!(output_lines.len(), coverage_lines.len());
|
||||||
|
*coverage_lines = output_lines;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
fn run_pretty_test(&self) {
|
fn run_pretty_test(&self) {
|
||||||
if self.props.pp_exact.is_some() {
|
if self.props.pp_exact.is_some() {
|
||||||
logv(self.config, "testing for exact pretty-printing".to_owned());
|
logv(self.config, "testing for exact pretty-printing".to_owned());
|
||||||
|
@ -1598,7 +1889,26 @@ impl<'test> TestCx<'test> {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn exec_compiled_test(&self) -> ProcRes {
|
fn exec_compiled_test(&self) -> ProcRes {
|
||||||
let env = &self.props.exec_env;
|
self.exec_compiled_test_general(&[], true)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn exec_compiled_test_general(
|
||||||
|
&self,
|
||||||
|
env_extra: &[(&str, &str)],
|
||||||
|
delete_after_success: bool,
|
||||||
|
) -> ProcRes {
|
||||||
|
let prepare_env = |cmd: &mut Command| {
|
||||||
|
for key in &self.props.unset_exec_env {
|
||||||
|
cmd.env_remove(key);
|
||||||
|
}
|
||||||
|
|
||||||
|
for (key, val) in &self.props.exec_env {
|
||||||
|
cmd.env(key, val);
|
||||||
|
}
|
||||||
|
for (key, val) in env_extra {
|
||||||
|
cmd.env(key, val);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
let proc_res = match &*self.config.target {
|
let proc_res = match &*self.config.target {
|
||||||
// This is pretty similar to below, we're transforming:
|
// This is pretty similar to below, we're transforming:
|
||||||
|
@ -1635,10 +1945,7 @@ impl<'test> TestCx<'test> {
|
||||||
.args(support_libs)
|
.args(support_libs)
|
||||||
.args(args);
|
.args(args);
|
||||||
|
|
||||||
for key in &self.props.unset_exec_env {
|
prepare_env(&mut test_client);
|
||||||
test_client.env_remove(key);
|
|
||||||
}
|
|
||||||
test_client.envs(env.clone());
|
|
||||||
|
|
||||||
self.compose_and_run(
|
self.compose_and_run(
|
||||||
test_client,
|
test_client,
|
||||||
|
@ -1653,10 +1960,7 @@ impl<'test> TestCx<'test> {
|
||||||
let mut wr_run = Command::new("wr-run");
|
let mut wr_run = Command::new("wr-run");
|
||||||
wr_run.args(&[&prog]).args(args);
|
wr_run.args(&[&prog]).args(args);
|
||||||
|
|
||||||
for key in &self.props.unset_exec_env {
|
prepare_env(&mut wr_run);
|
||||||
wr_run.env_remove(key);
|
|
||||||
}
|
|
||||||
wr_run.envs(env.clone());
|
|
||||||
|
|
||||||
self.compose_and_run(
|
self.compose_and_run(
|
||||||
wr_run,
|
wr_run,
|
||||||
|
@ -1671,10 +1975,7 @@ impl<'test> TestCx<'test> {
|
||||||
let mut program = Command::new(&prog);
|
let mut program = Command::new(&prog);
|
||||||
program.args(args).current_dir(&self.output_base_dir());
|
program.args(args).current_dir(&self.output_base_dir());
|
||||||
|
|
||||||
for key in &self.props.unset_exec_env {
|
prepare_env(&mut program);
|
||||||
program.env_remove(key);
|
|
||||||
}
|
|
||||||
program.envs(env.clone());
|
|
||||||
|
|
||||||
self.compose_and_run(
|
self.compose_and_run(
|
||||||
program,
|
program,
|
||||||
|
@ -1685,7 +1986,7 @@ impl<'test> TestCx<'test> {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
if proc_res.status.success() {
|
if delete_after_success && proc_res.status.success() {
|
||||||
// delete the executable after running it to save space.
|
// delete the executable after running it to save space.
|
||||||
// it is ok if the deletion failed.
|
// it is ok if the deletion failed.
|
||||||
let _ = fs::remove_file(self.make_exe_name());
|
let _ = fs::remove_file(self.make_exe_name());
|
||||||
|
@ -1812,6 +2113,7 @@ impl<'test> TestCx<'test> {
|
||||||
|| self.is_vxworks_pure_static()
|
|| self.is_vxworks_pure_static()
|
||||||
|| self.config.target.contains("bpf")
|
|| self.config.target.contains("bpf")
|
||||||
|| !self.config.target_cfg().dynamic_linking
|
|| !self.config.target_cfg().dynamic_linking
|
||||||
|
|| self.config.mode == RunCoverage
|
||||||
{
|
{
|
||||||
// We primarily compile all auxiliary libraries as dynamic libraries
|
// We primarily compile all auxiliary libraries as dynamic libraries
|
||||||
// to avoid code size bloat and large binaries as much as possible
|
// to avoid code size bloat and large binaries as much as possible
|
||||||
|
@ -1822,6 +2124,10 @@ impl<'test> TestCx<'test> {
|
||||||
// dynamic libraries so we just go back to building a normal library. Note,
|
// dynamic libraries so we just go back to building a normal library. Note,
|
||||||
// however, that for MUSL if the library is built with `force_host` then
|
// however, that for MUSL if the library is built with `force_host` then
|
||||||
// it's ok to be a dylib as the host should always support dylibs.
|
// it's ok to be a dylib as the host should always support dylibs.
|
||||||
|
//
|
||||||
|
// Coverage tests want static linking by default so that coverage
|
||||||
|
// mappings in auxiliary libraries can be merged into the final
|
||||||
|
// executable.
|
||||||
(false, Some("lib"))
|
(false, Some("lib"))
|
||||||
} else {
|
} else {
|
||||||
(true, Some("dylib"))
|
(true, Some("dylib"))
|
||||||
|
@ -1999,6 +2305,10 @@ impl<'test> TestCx<'test> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
DebugInfo => { /* debuginfo tests must be unoptimized */ }
|
DebugInfo => { /* debuginfo tests must be unoptimized */ }
|
||||||
|
RunCoverage => {
|
||||||
|
// Coverage reports are affected by optimization level, and
|
||||||
|
// the current snapshots assume no optimization by default.
|
||||||
|
}
|
||||||
_ => {
|
_ => {
|
||||||
rustc.arg("-O");
|
rustc.arg("-O");
|
||||||
}
|
}
|
||||||
|
@ -2065,6 +2375,9 @@ impl<'test> TestCx<'test> {
|
||||||
|
|
||||||
rustc.arg(dir_opt);
|
rustc.arg(dir_opt);
|
||||||
}
|
}
|
||||||
|
RunCoverage => {
|
||||||
|
rustc.arg("-Cinstrument-coverage");
|
||||||
|
}
|
||||||
RunPassValgrind | Pretty | DebugInfo | Codegen | Rustdoc | RustdocJson | RunMake
|
RunPassValgrind | Pretty | DebugInfo | Codegen | Rustdoc | RustdocJson | RunMake
|
||||||
| CodegenUnits | JsDocTest | Assembly => {
|
| CodegenUnits | JsDocTest | Assembly => {
|
||||||
// do not use JSON output
|
// do not use JSON output
|
||||||
|
|
|
@ -1,4 +1,15 @@
|
||||||
../coverage/doctest.rs:
|
$DIR/auxiliary/doctest_crate.rs:
|
||||||
|
1| |/// A function run only from within doctests
|
||||||
|
2| 3|pub fn fn_run_in_doctests(conditional: usize) {
|
||||||
|
3| 3| match conditional {
|
||||||
|
4| 1| 1 => assert_eq!(1, 1), // this is run,
|
||||||
|
5| 1| 2 => assert_eq!(1, 1), // this,
|
||||||
|
6| 1| 3 => assert_eq!(1, 1), // and this too
|
||||||
|
7| 0| _ => assert_eq!(1, 2), // however this is not
|
||||||
|
8| | }
|
||||||
|
9| 3|}
|
||||||
|
|
||||||
|
$DIR/doctest.rs:
|
||||||
1| |//! This test ensures that code from doctests is properly re-mapped.
|
1| |//! This test ensures that code from doctests is properly re-mapped.
|
||||||
2| |//! See <https://github.com/rust-lang/rust/issues/79417> for more info.
|
2| |//! See <https://github.com/rust-lang/rust/issues/79417> for more info.
|
||||||
3| |//!
|
3| |//!
|
||||||
|
@ -67,7 +78,7 @@
|
||||||
63| |//! doctest_main()
|
63| |//! doctest_main()
|
||||||
64| |//! }
|
64| |//! }
|
||||||
65| |//! ```
|
65| |//! ```
|
||||||
66| |
|
66| |// aux-build:doctest_crate.rs
|
||||||
67| |/// doctest attached to fn testing external code:
|
67| |/// doctest attached to fn testing external code:
|
||||||
68| |/// ```
|
68| |/// ```
|
||||||
69| 1|/// extern crate doctest_crate;
|
69| 1|/// extern crate doctest_crate;
|
||||||
|
@ -102,14 +113,3 @@
|
||||||
98| |// what affect it might have on diagnostic messages from the compiler, and whether anyone would care
|
98| |// what affect it might have on diagnostic messages from the compiler, and whether anyone would care
|
||||||
99| |// if the indentation changed. I don't know if there is a more viable solution.
|
99| |// if the indentation changed. I don't know if there is a more viable solution.
|
||||||
|
|
||||||
../coverage/lib/doctest_crate.rs:
|
|
||||||
1| |/// A function run only from within doctests
|
|
||||||
2| 3|pub fn fn_run_in_doctests(conditional: usize) {
|
|
||||||
3| 3| match conditional {
|
|
||||||
4| 1| 1 => assert_eq!(1, 1), // this is run,
|
|
||||||
5| 1| 2 => assert_eq!(1, 1), // this,
|
|
||||||
6| 1| 3 => assert_eq!(1, 1), // and this too
|
|
||||||
7| 0| _ => assert_eq!(1, 2), // however this is not
|
|
||||||
8| | }
|
|
||||||
9| 3|}
|
|
||||||
|
|
|
@ -63,7 +63,7 @@
|
||||||
//! doctest_main()
|
//! doctest_main()
|
||||||
//! }
|
//! }
|
||||||
//! ```
|
//! ```
|
||||||
|
// aux-build:doctest_crate.rs
|
||||||
/// doctest attached to fn testing external code:
|
/// doctest attached to fn testing external code:
|
||||||
/// ```
|
/// ```
|
||||||
/// extern crate doctest_crate;
|
/// extern crate doctest_crate;
|
|
@ -1,5 +1,5 @@
|
||||||
1| |#![allow(unused_assignments)]
|
1| |#![allow(unused_assignments)]
|
||||||
2| |// expect-exit-status-101
|
2| |// failure-status: 101
|
||||||
3| |
|
3| |
|
||||||
4| 4|fn might_fail_assert(one_plus_one: u32) {
|
4| 4|fn might_fail_assert(one_plus_one: u32) {
|
||||||
5| 4| println!("does 1 + 1 = {}?", one_plus_one);
|
5| 4| println!("does 1 + 1 = {}?", one_plus_one);
|
|
@ -1,5 +1,5 @@
|
||||||
#![allow(unused_assignments)]
|
#![allow(unused_assignments)]
|
||||||
// expect-exit-status-101
|
// failure-status: 101
|
||||||
|
|
||||||
fn might_fail_assert(one_plus_one: u32) {
|
fn might_fail_assert(one_plus_one: u32) {
|
||||||
println!("does 1 + 1 = {}?", one_plus_one);
|
println!("does 1 + 1 = {}?", one_plus_one);
|
|
@ -1,6 +1,6 @@
|
||||||
#![allow(unused_assignments, unused_variables)]
|
#![allow(unused_assignments, unused_variables)]
|
||||||
// compile-flags: -C opt-level=3 # validates coverage now works with optimizations
|
// compile-flags: -C opt-level=3
|
||||||
use std::fmt::Debug;
|
use std::fmt::Debug; // ^^ validates coverage now works with optimizations
|
||||||
|
|
||||||
pub fn used_function() {
|
pub fn used_function() {
|
||||||
// Initialize test constants in a way that cannot be determined at compile time, to ensure
|
// Initialize test constants in a way that cannot be determined at compile time, to ensure
|
|
@ -1,7 +1,7 @@
|
||||||
#![allow(unused_assignments, unused_variables)]
|
#![allow(unused_assignments, unused_variables)]
|
||||||
|
|
||||||
// compile-flags: -C opt-level=3 # validates coverage now works with optimizations
|
// compile-flags: -C opt-level=3
|
||||||
|
// ^^ validates coverage now works with optimizations
|
||||||
use std::fmt::Debug;
|
use std::fmt::Debug;
|
||||||
|
|
||||||
pub fn used_function() {
|
pub fn used_function() {
|
|
@ -1,6 +1,6 @@
|
||||||
1| |#![allow(unused_assignments, unused_variables)]
|
1| |#![allow(unused_assignments, unused_variables)]
|
||||||
2| |// compile-flags: -C opt-level=2 # fix described in rustc_middle/mir/mono.rs
|
2| |// compile-flags: -C opt-level=2
|
||||||
3| 1|fn main() {
|
3| 1|fn main() { // ^^ fix described in rustc_middle/mir/mono.rs
|
||||||
4| 1| // Initialize test constants in a way that cannot be determined at compile time, to ensure
|
4| 1| // Initialize test constants in a way that cannot be determined at compile time, to ensure
|
||||||
5| 1| // rustc and LLVM cannot optimize out statements (or coverage counters) downstream from
|
5| 1| // rustc and LLVM cannot optimize out statements (or coverage counters) downstream from
|
||||||
6| 1| // dependent conditions.
|
6| 1| // dependent conditions.
|
|
@ -1,6 +1,6 @@
|
||||||
#![allow(unused_assignments, unused_variables)]
|
#![allow(unused_assignments, unused_variables)]
|
||||||
// compile-flags: -C opt-level=2 # fix described in rustc_middle/mir/mono.rs
|
// compile-flags: -C opt-level=2
|
||||||
fn main() {
|
fn main() { // ^^ fix described in rustc_middle/mir/mono.rs
|
||||||
// Initialize test constants in a way that cannot be determined at compile time, to ensure
|
// Initialize test constants in a way that cannot be determined at compile time, to ensure
|
||||||
// rustc and LLVM cannot optimize out statements (or coverage counters) downstream from
|
// rustc and LLVM cannot optimize out statements (or coverage counters) downstream from
|
||||||
// dependent conditions.
|
// dependent conditions.
|
|
@ -1,5 +1,5 @@
|
||||||
1| |#![allow(unused_assignments)]
|
1| |#![allow(unused_assignments)]
|
||||||
2| |// expect-exit-status-1
|
2| |// failure-status: 1
|
||||||
3| |
|
3| |
|
||||||
4| |struct Firework {
|
4| |struct Firework {
|
||||||
5| | strength: i32,
|
5| | strength: i32,
|
|
@ -1,5 +1,5 @@
|
||||||
#![allow(unused_assignments)]
|
#![allow(unused_assignments)]
|
||||||
// expect-exit-status-1
|
// failure-status: 1
|
||||||
|
|
||||||
struct Firework {
|
struct Firework {
|
||||||
strength: i32,
|
strength: i32,
|
|
@ -1,5 +1,5 @@
|
||||||
1| |#![allow(unused_assignments)]
|
1| |#![allow(unused_assignments)]
|
||||||
2| |// expect-exit-status-1
|
2| |// failure-status: 1
|
||||||
3| |
|
3| |
|
||||||
4| |struct Firework<T> where T: Copy + std::fmt::Display {
|
4| |struct Firework<T> where T: Copy + std::fmt::Display {
|
||||||
5| | strength: T,
|
5| | strength: T,
|
|
@ -1,5 +1,5 @@
|
||||||
#![allow(unused_assignments)]
|
#![allow(unused_assignments)]
|
||||||
// expect-exit-status-1
|
// failure-status: 1
|
||||||
|
|
||||||
struct Firework<T> where T: Copy + std::fmt::Display {
|
struct Firework<T> where T: Copy + std::fmt::Display {
|
||||||
strength: T,
|
strength: T,
|
|
@ -1,6 +1,6 @@
|
||||||
1| |// This demonstrated Issue #84561: function-like macros produce unintuitive coverage results.
|
1| |// This demonstrated Issue #84561: function-like macros produce unintuitive coverage results.
|
||||||
2| |
|
2| |
|
||||||
3| |// expect-exit-status-101
|
3| |// failure-status: 101
|
||||||
4| 21|#[derive(PartialEq, Eq)]
|
4| 21|#[derive(PartialEq, Eq)]
|
||||||
5| |struct Foo(u32);
|
5| |struct Foo(u32);
|
||||||
6| 1|fn test3() {
|
6| 1|fn test3() {
|
|
@ -1,6 +1,6 @@
|
||||||
// This demonstrated Issue #84561: function-like macros produce unintuitive coverage results.
|
// This demonstrated Issue #84561: function-like macros produce unintuitive coverage results.
|
||||||
|
|
||||||
// expect-exit-status-101
|
// failure-status: 101
|
||||||
#[derive(PartialEq, Eq)]
|
#[derive(PartialEq, Eq)]
|
||||||
struct Foo(u32);
|
struct Foo(u32);
|
||||||
fn test3() {
|
fn test3() {
|
|
@ -1,16 +1,4 @@
|
||||||
../coverage/issue-85461.rs:
|
$DIR/auxiliary/inline_always_with_dead_code.rs:
|
||||||
1| |// Regression test for #85461: MSVC sometimes fail to link with dead code and #[inline(always)]
|
|
||||||
2| |
|
|
||||||
3| |extern crate inline_always_with_dead_code;
|
|
||||||
4| |
|
|
||||||
5| |use inline_always_with_dead_code::{bar, baz};
|
|
||||||
6| |
|
|
||||||
7| 1|fn main() {
|
|
||||||
8| 1| bar::call_me();
|
|
||||||
9| 1| baz::call_me();
|
|
||||||
10| 1|}
|
|
||||||
|
|
||||||
../coverage/lib/inline_always_with_dead_code.rs:
|
|
||||||
1| |// compile-flags: -Cinstrument-coverage -Ccodegen-units=4 -Copt-level=0
|
1| |// compile-flags: -Cinstrument-coverage -Ccodegen-units=4 -Copt-level=0
|
||||||
2| |
|
2| |
|
||||||
3| |#![allow(dead_code)]
|
3| |#![allow(dead_code)]
|
||||||
|
@ -34,3 +22,15 @@
|
||||||
21| 1| }
|
21| 1| }
|
||||||
22| |}
|
22| |}
|
||||||
|
|
||||||
|
$DIR/issue-85461.rs:
|
||||||
|
1| |// Regression test for #85461: MSVC sometimes fail to link with dead code and #[inline(always)]
|
||||||
|
2| |// aux-build:inline_always_with_dead_code.rs
|
||||||
|
3| |extern crate inline_always_with_dead_code;
|
||||||
|
4| |
|
||||||
|
5| |use inline_always_with_dead_code::{bar, baz};
|
||||||
|
6| |
|
||||||
|
7| 1|fn main() {
|
||||||
|
8| 1| bar::call_me();
|
||||||
|
9| 1| baz::call_me();
|
||||||
|
10| 1|}
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
// Regression test for #85461: MSVC sometimes fail to link with dead code and #[inline(always)]
|
// Regression test for #85461: MSVC sometimes fail to link with dead code and #[inline(always)]
|
||||||
|
// aux-build:inline_always_with_dead_code.rs
|
||||||
extern crate inline_always_with_dead_code;
|
extern crate inline_always_with_dead_code;
|
||||||
|
|
||||||
use inline_always_with_dead_code::{bar, baz};
|
use inline_always_with_dead_code::{bar, baz};
|
|
@ -1,5 +1,5 @@
|
||||||
1| |#![allow(unused_assignments)]
|
1| |#![allow(unused_assignments)]
|
||||||
2| |// expect-exit-status-101
|
2| |// failure-status: 101
|
||||||
3| |
|
3| |
|
||||||
4| 4|fn might_overflow(to_add: u32) -> u32 {
|
4| 4|fn might_overflow(to_add: u32) -> u32 {
|
||||||
5| 4| if to_add > 5 {
|
5| 4| if to_add > 5 {
|
|
@ -1,5 +1,5 @@
|
||||||
#![allow(unused_assignments)]
|
#![allow(unused_assignments)]
|
||||||
// expect-exit-status-101
|
// failure-status: 101
|
||||||
|
|
||||||
fn might_overflow(to_add: u32) -> u32 {
|
fn might_overflow(to_add: u32) -> u32 {
|
||||||
if to_add > 5 {
|
if to_add > 5 {
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue