Auto merge of #136905 - matthiaskrgr:rollup-8zwcgta, r=matthiaskrgr
Rollup of 8 pull requests Successful merges: - #135549 (Document some safety constraints and use more safe wrappers) - #135965 (In "specify type" suggestion, skip type params that are already known) - #136193 (Implement pattern type ffi checks) - #136646 (Add a TyPat in the AST to reuse the generic arg lowering logic) - #136874 (Change the issue number for `likely_unlikely` and `cold_path`) - #136884 (Lower fn items as ZST valtrees and delay a bug) - #136885 (i686-linux-android: increase CPU baseline to Pentium 4 (without an actual change) - #136891 (Check sig for errors before checking for unconstrained anonymous lifetime) r? `@ghost` `@rustbot` modify labels: rollup
This commit is contained in:
commit
33d92df3e6
57 changed files with 739 additions and 557 deletions
|
@ -2249,7 +2249,7 @@ pub enum TyKind {
|
|||
CVarArgs,
|
||||
/// Pattern types like `pattern_type!(u32 is 1..=)`, which is the same as `NonZero<u32>`,
|
||||
/// just as part of the type system.
|
||||
Pat(P<Ty>, P<Pat>),
|
||||
Pat(P<Ty>, P<TyPat>),
|
||||
/// Sometimes we need a dummy value when no error has occurred.
|
||||
Dummy,
|
||||
/// Placeholder for a kind that has failed to be defined.
|
||||
|
@ -2277,6 +2277,27 @@ impl TyKind {
|
|||
}
|
||||
}
|
||||
|
||||
/// A pattern type pattern.
|
||||
#[derive(Clone, Encodable, Decodable, Debug)]
|
||||
pub struct TyPat {
|
||||
pub id: NodeId,
|
||||
pub kind: TyPatKind,
|
||||
pub span: Span,
|
||||
pub tokens: Option<LazyAttrTokenStream>,
|
||||
}
|
||||
|
||||
/// All the different flavors of pattern that Rust recognizes.
|
||||
//
|
||||
// Adding a new variant? Please update `test_pat` in `tests/ui/macros/stringify.rs`.
|
||||
#[derive(Clone, Encodable, Decodable, Debug)]
|
||||
pub enum TyPatKind {
|
||||
/// A range pattern (e.g., `1...2`, `1..2`, `1..`, `..2`, `1..=2`, `..=2`).
|
||||
Range(Option<P<AnonConst>>, Option<P<AnonConst>>, Spanned<RangeEnd>),
|
||||
|
||||
/// Placeholder for a pattern that wasn't syntactically well formed in some way.
|
||||
Err(ErrorGuaranteed),
|
||||
}
|
||||
|
||||
/// Syntax used to declare a trait object.
|
||||
#[derive(Clone, Copy, PartialEq, Encodable, Decodable, Debug, HashStable_Generic)]
|
||||
#[repr(u8)]
|
||||
|
|
|
@ -210,6 +210,10 @@ pub trait MutVisitor: Sized {
|
|||
walk_ty(self, t);
|
||||
}
|
||||
|
||||
fn visit_ty_pat(&mut self, t: &mut P<TyPat>) {
|
||||
walk_ty_pat(self, t);
|
||||
}
|
||||
|
||||
fn visit_lifetime(&mut self, l: &mut Lifetime) {
|
||||
walk_lifetime(self, l);
|
||||
}
|
||||
|
@ -570,7 +574,7 @@ pub fn walk_ty<T: MutVisitor>(vis: &mut T, ty: &mut P<Ty>) {
|
|||
TyKind::Paren(ty) => vis.visit_ty(ty),
|
||||
TyKind::Pat(ty, pat) => {
|
||||
vis.visit_ty(ty);
|
||||
vis.visit_pat(pat);
|
||||
vis.visit_ty_pat(pat);
|
||||
}
|
||||
TyKind::Path(qself, path) => {
|
||||
vis.visit_qself(qself);
|
||||
|
@ -594,6 +598,20 @@ pub fn walk_ty<T: MutVisitor>(vis: &mut T, ty: &mut P<Ty>) {
|
|||
vis.visit_span(span);
|
||||
}
|
||||
|
||||
pub fn walk_ty_pat<T: MutVisitor>(vis: &mut T, ty: &mut P<TyPat>) {
|
||||
let TyPat { id, kind, span, tokens } = ty.deref_mut();
|
||||
vis.visit_id(id);
|
||||
match kind {
|
||||
TyPatKind::Range(start, end, _include_end) => {
|
||||
visit_opt(start, |c| vis.visit_anon_const(c));
|
||||
visit_opt(end, |c| vis.visit_anon_const(c));
|
||||
}
|
||||
TyPatKind::Err(_) => {}
|
||||
}
|
||||
visit_lazy_tts(vis, tokens);
|
||||
vis.visit_span(span);
|
||||
}
|
||||
|
||||
fn walk_foreign_mod<T: MutVisitor>(vis: &mut T, foreign_mod: &mut ForeignMod) {
|
||||
let ForeignMod { extern_span: _, safety, abi: _, items } = foreign_mod;
|
||||
visit_safety(vis, safety);
|
||||
|
|
|
@ -179,6 +179,9 @@ pub trait Visitor<'ast>: Sized {
|
|||
fn visit_ty(&mut self, t: &'ast Ty) -> Self::Result {
|
||||
walk_ty(self, t)
|
||||
}
|
||||
fn visit_ty_pat(&mut self, t: &'ast TyPat) -> Self::Result {
|
||||
walk_ty_pat(self, t)
|
||||
}
|
||||
fn visit_generic_param(&mut self, param: &'ast GenericParam) -> Self::Result {
|
||||
walk_generic_param(self, param)
|
||||
}
|
||||
|
@ -534,7 +537,7 @@ pub fn walk_ty<'a, V: Visitor<'a>>(visitor: &mut V, typ: &'a Ty) -> V::Result {
|
|||
}
|
||||
TyKind::Pat(ty, pat) => {
|
||||
try_visit!(visitor.visit_ty(ty));
|
||||
try_visit!(visitor.visit_pat(pat));
|
||||
try_visit!(visitor.visit_ty_pat(pat));
|
||||
}
|
||||
TyKind::Array(ty, length) => {
|
||||
try_visit!(visitor.visit_ty(ty));
|
||||
|
@ -555,6 +558,18 @@ pub fn walk_ty<'a, V: Visitor<'a>>(visitor: &mut V, typ: &'a Ty) -> V::Result {
|
|||
V::Result::output()
|
||||
}
|
||||
|
||||
pub fn walk_ty_pat<'a, V: Visitor<'a>>(visitor: &mut V, tp: &'a TyPat) -> V::Result {
|
||||
let TyPat { id: _, kind, span: _, tokens: _ } = tp;
|
||||
match kind {
|
||||
TyPatKind::Range(start, end, _include_end) => {
|
||||
visit_opt!(visitor, visit_anon_const, start);
|
||||
visit_opt!(visitor, visit_anon_const, end);
|
||||
}
|
||||
TyPatKind::Err(_) => {}
|
||||
}
|
||||
V::Result::output()
|
||||
}
|
||||
|
||||
fn walk_qself<'a, V: Visitor<'a>>(visitor: &mut V, qself: &'a Option<P<QSelf>>) -> V::Result {
|
||||
if let Some(qself) = qself {
|
||||
let QSelf { ty, path_span: _, position: _ } = &**qself;
|
||||
|
|
|
@ -4,10 +4,10 @@ use rustc_ast::ptr::P;
|
|||
use rustc_ast::*;
|
||||
use rustc_data_structures::stack::ensure_sufficient_stack;
|
||||
use rustc_hir as hir;
|
||||
use rustc_hir::def::{DefKind, Res};
|
||||
use rustc_hir::def::Res;
|
||||
use rustc_middle::span_bug;
|
||||
use rustc_span::source_map::{Spanned, respan};
|
||||
use rustc_span::{Ident, Span, kw};
|
||||
use rustc_span::{Ident, Span};
|
||||
|
||||
use super::errors::{
|
||||
ArbitraryExpressionInPattern, ExtraDoubleDot, MisplacedDoubleDot, SubTupleBinding,
|
||||
|
@ -430,78 +430,20 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
|
|||
self.arena.alloc(hir::PatExpr { hir_id: self.lower_node_id(expr.id), span, kind })
|
||||
}
|
||||
|
||||
pub(crate) fn lower_ty_pat(&mut self, pattern: &Pat) -> &'hir hir::TyPat<'hir> {
|
||||
pub(crate) fn lower_ty_pat(&mut self, pattern: &TyPat) -> &'hir hir::TyPat<'hir> {
|
||||
self.arena.alloc(self.lower_ty_pat_mut(pattern))
|
||||
}
|
||||
|
||||
fn lower_ty_pat_mut(&mut self, mut pattern: &Pat) -> hir::TyPat<'hir> {
|
||||
fn lower_ty_pat_mut(&mut self, pattern: &TyPat) -> hir::TyPat<'hir> {
|
||||
// loop here to avoid recursion
|
||||
let pat_hir_id = self.lower_node_id(pattern.id);
|
||||
let node = loop {
|
||||
match &pattern.kind {
|
||||
PatKind::Range(e1, e2, Spanned { node: end, .. }) => {
|
||||
// FIXME(pattern_types): remove this closure and call `lower_const_arg` instead.
|
||||
// That requires first modifying the AST to have const args here.
|
||||
let mut lower_expr = |e: &Expr| -> &_ {
|
||||
if let ExprKind::Path(None, path) = &e.kind
|
||||
&& let Some(res) = self
|
||||
.resolver
|
||||
.get_partial_res(e.id)
|
||||
.and_then(|partial_res| partial_res.full_res())
|
||||
{
|
||||
self.lower_const_path_to_const_arg(path, res, e.id, e.span)
|
||||
} else {
|
||||
let node_id = self.next_node_id();
|
||||
let def_id = self.create_def(
|
||||
self.current_hir_id_owner.def_id,
|
||||
node_id,
|
||||
kw::Empty,
|
||||
DefKind::AnonConst,
|
||||
e.span,
|
||||
);
|
||||
let hir_id = self.lower_node_id(node_id);
|
||||
let ac = self.arena.alloc(hir::AnonConst {
|
||||
def_id,
|
||||
hir_id,
|
||||
body: self.lower_const_body(pattern.span, Some(e)),
|
||||
span: self.lower_span(pattern.span),
|
||||
});
|
||||
self.arena.alloc(hir::ConstArg {
|
||||
hir_id: self.next_id(),
|
||||
kind: hir::ConstArgKind::Anon(ac),
|
||||
})
|
||||
}
|
||||
};
|
||||
break hir::TyPatKind::Range(
|
||||
e1.as_deref().map(|e| lower_expr(e)),
|
||||
e2.as_deref().map(|e| lower_expr(e)),
|
||||
self.lower_range_end(end, e2.is_some()),
|
||||
);
|
||||
}
|
||||
// return inner to be processed in next loop
|
||||
PatKind::Paren(inner) => pattern = inner,
|
||||
PatKind::MacCall(_) => panic!("{:?} shouldn't exist here", pattern.span),
|
||||
PatKind::Err(guar) => break hir::TyPatKind::Err(*guar),
|
||||
PatKind::Deref(..)
|
||||
| PatKind::Box(..)
|
||||
| PatKind::Or(..)
|
||||
| PatKind::Struct(..)
|
||||
| PatKind::TupleStruct(..)
|
||||
| PatKind::Tuple(..)
|
||||
| PatKind::Ref(..)
|
||||
| PatKind::Expr(..)
|
||||
| PatKind::Guard(..)
|
||||
| PatKind::Slice(_)
|
||||
| PatKind::Ident(..)
|
||||
| PatKind::Path(..)
|
||||
| PatKind::Wild
|
||||
| PatKind::Never
|
||||
| PatKind::Rest => {
|
||||
break hir::TyPatKind::Err(
|
||||
self.dcx().span_err(pattern.span, "pattern not supported in pattern types"),
|
||||
);
|
||||
}
|
||||
}
|
||||
let node = match &pattern.kind {
|
||||
TyPatKind::Range(e1, e2, Spanned { node: end, .. }) => hir::TyPatKind::Range(
|
||||
e1.as_deref().map(|e| self.lower_anon_const_to_const_arg(e)),
|
||||
e2.as_deref().map(|e| self.lower_anon_const_to_const_arg(e)),
|
||||
self.lower_range_end(end, e2.is_some()),
|
||||
),
|
||||
TyPatKind::Err(guar) => hir::TyPatKind::Err(*guar),
|
||||
};
|
||||
|
||||
hir::TyPat { hir_id: pat_hir_id, kind: node, span: self.lower_span(pattern.span) }
|
||||
|
|
|
@ -1148,6 +1148,28 @@ impl<'a> State<'a> {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn print_ty_pat(&mut self, pat: &ast::TyPat) {
|
||||
match &pat.kind {
|
||||
rustc_ast::TyPatKind::Range(start, end, include_end) => {
|
||||
if let Some(start) = start {
|
||||
self.print_expr_anon_const(start, &[]);
|
||||
}
|
||||
self.word("..");
|
||||
if let Some(end) = end {
|
||||
if let RangeEnd::Included(_) = include_end.node {
|
||||
self.word("=");
|
||||
}
|
||||
self.print_expr_anon_const(end, &[]);
|
||||
}
|
||||
}
|
||||
rustc_ast::TyPatKind::Err(_) => {
|
||||
self.popen();
|
||||
self.word("/*ERROR*/");
|
||||
self.pclose();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn print_type(&mut self, ty: &ast::Ty) {
|
||||
self.maybe_print_comment(ty.span.lo());
|
||||
self.ibox(0);
|
||||
|
@ -1252,7 +1274,7 @@ impl<'a> State<'a> {
|
|||
ast::TyKind::Pat(ty, pat) => {
|
||||
self.print_type(ty);
|
||||
self.word(" is ");
|
||||
self.print_pat(pat);
|
||||
self.print_ty_pat(pat);
|
||||
}
|
||||
}
|
||||
self.end();
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
use rustc_ast::ptr::P;
|
||||
use rustc_ast::tokenstream::TokenStream;
|
||||
use rustc_ast::{Pat, Ty, ast};
|
||||
use rustc_ast::{AnonConst, DUMMY_NODE_ID, Ty, TyPat, TyPatKind, ast};
|
||||
use rustc_errors::PResult;
|
||||
use rustc_expand::base::{self, DummyResult, ExpandResult, ExtCtxt, MacroExpanderResult};
|
||||
use rustc_parse::exp;
|
||||
|
@ -21,12 +21,24 @@ pub(crate) fn expand<'cx>(
|
|||
ExpandResult::Ready(base::MacEager::ty(cx.ty(sp, ast::TyKind::Pat(ty, pat))))
|
||||
}
|
||||
|
||||
fn parse_pat_ty<'a>(cx: &mut ExtCtxt<'a>, stream: TokenStream) -> PResult<'a, (P<Ty>, P<Pat>)> {
|
||||
fn parse_pat_ty<'a>(cx: &mut ExtCtxt<'a>, stream: TokenStream) -> PResult<'a, (P<Ty>, P<TyPat>)> {
|
||||
let mut parser = cx.new_parser_from_tts(stream);
|
||||
|
||||
let ty = parser.parse_ty()?;
|
||||
parser.expect_keyword(exp!(Is))?;
|
||||
let pat = parser.parse_pat_no_top_alt(None, None)?;
|
||||
let pat = parser.parse_pat_no_top_alt(None, None)?.into_inner();
|
||||
|
||||
let kind = match pat.kind {
|
||||
ast::PatKind::Range(start, end, include_end) => TyPatKind::Range(
|
||||
start.map(|value| P(AnonConst { id: DUMMY_NODE_ID, value })),
|
||||
end.map(|value| P(AnonConst { id: DUMMY_NODE_ID, value })),
|
||||
include_end,
|
||||
),
|
||||
ast::PatKind::Err(guar) => TyPatKind::Err(guar),
|
||||
_ => TyPatKind::Err(cx.dcx().span_err(pat.span, "pattern not supported in pattern types")),
|
||||
};
|
||||
|
||||
let pat = P(TyPat { id: pat.id, kind, span: pat.span, tokens: pat.tokens });
|
||||
|
||||
Ok((ty, pat))
|
||||
}
|
||||
|
|
|
@ -81,13 +81,13 @@ pub(crate) unsafe fn codegen(
|
|||
llvm::set_visibility(ll_g, llvm::Visibility::from_generic(tcx.sess.default_visibility()));
|
||||
let val = tcx.sess.opts.unstable_opts.oom.should_panic();
|
||||
let llval = llvm::LLVMConstInt(i8, val as u64, False);
|
||||
llvm::LLVMSetInitializer(ll_g, llval);
|
||||
llvm::set_initializer(ll_g, llval);
|
||||
|
||||
let name = NO_ALLOC_SHIM_IS_UNSTABLE;
|
||||
let ll_g = llvm::LLVMRustGetOrInsertGlobal(llmod, name.as_c_char_ptr(), name.len(), i8);
|
||||
llvm::set_visibility(ll_g, llvm::Visibility::from_generic(tcx.sess.default_visibility()));
|
||||
let llval = llvm::LLVMConstInt(i8, 0, False);
|
||||
llvm::LLVMSetInitializer(ll_g, llval);
|
||||
llvm::set_initializer(ll_g, llval);
|
||||
}
|
||||
|
||||
if tcx.sess.opts.debuginfo != DebugInfo::None {
|
||||
|
|
|
@ -11,7 +11,7 @@ use rustc_codegen_ssa::back::archive::{
|
|||
use rustc_session::Session;
|
||||
|
||||
use crate::llvm::archive_ro::{ArchiveRO, Child};
|
||||
use crate::llvm::{self, ArchiveKind};
|
||||
use crate::llvm::{self, ArchiveKind, last_error};
|
||||
|
||||
/// Helper for adding many files to an archive.
|
||||
#[must_use = "must call build() to finish building the archive"]
|
||||
|
@ -169,6 +169,8 @@ impl<'a> LlvmArchiveBuilder<'a> {
|
|||
.unwrap_or_else(|kind| self.sess.dcx().emit_fatal(UnknownArchiveKind { kind }));
|
||||
|
||||
let mut additions = mem::take(&mut self.additions);
|
||||
// Values in the `members` list below will contain pointers to the strings allocated here.
|
||||
// So they need to get dropped after all elements of `members` get freed.
|
||||
let mut strings = Vec::new();
|
||||
let mut members = Vec::new();
|
||||
|
||||
|
@ -229,12 +231,7 @@ impl<'a> LlvmArchiveBuilder<'a> {
|
|||
self.sess.target.arch == "arm64ec",
|
||||
);
|
||||
let ret = if r.into_result().is_err() {
|
||||
let err = llvm::LLVMRustGetLastError();
|
||||
let msg = if err.is_null() {
|
||||
"failed to write archive".into()
|
||||
} else {
|
||||
String::from_utf8_lossy(CStr::from_ptr(err).to_bytes())
|
||||
};
|
||||
let msg = last_error().unwrap_or_else(|| "failed to write archive".into());
|
||||
Err(io::Error::new(io::ErrorKind::Other, msg))
|
||||
} else {
|
||||
Ok(!members.is_empty())
|
||||
|
|
|
@ -40,7 +40,7 @@ use crate::errors::{
|
|||
WithLlvmError, WriteBytecode,
|
||||
};
|
||||
use crate::llvm::diagnostic::OptimizationDiagnosticKind::*;
|
||||
use crate::llvm::{self, DiagnosticInfo, PassManager};
|
||||
use crate::llvm::{self, DiagnosticInfo};
|
||||
use crate::type_::Type;
|
||||
use crate::{LlvmCodegenBackend, ModuleLlvm, base, common, llvm_util};
|
||||
|
||||
|
@ -54,7 +54,7 @@ pub(crate) fn llvm_err<'a>(dcx: DiagCtxtHandle<'_>, err: LlvmError<'a>) -> Fatal
|
|||
fn write_output_file<'ll>(
|
||||
dcx: DiagCtxtHandle<'_>,
|
||||
target: &'ll llvm::TargetMachine,
|
||||
pm: &llvm::PassManager<'ll>,
|
||||
no_builtins: bool,
|
||||
m: &'ll llvm::Module,
|
||||
output: &Path,
|
||||
dwo_output: Option<&Path>,
|
||||
|
@ -63,16 +63,19 @@ fn write_output_file<'ll>(
|
|||
verify_llvm_ir: bool,
|
||||
) -> Result<(), FatalError> {
|
||||
debug!("write_output_file output={:?} dwo_output={:?}", output, dwo_output);
|
||||
unsafe {
|
||||
let output_c = path_to_c_string(output);
|
||||
let dwo_output_c;
|
||||
let dwo_output_ptr = if let Some(dwo_output) = dwo_output {
|
||||
dwo_output_c = path_to_c_string(dwo_output);
|
||||
dwo_output_c.as_ptr()
|
||||
} else {
|
||||
std::ptr::null()
|
||||
};
|
||||
let result = llvm::LLVMRustWriteOutputFile(
|
||||
let output_c = path_to_c_string(output);
|
||||
let dwo_output_c;
|
||||
let dwo_output_ptr = if let Some(dwo_output) = dwo_output {
|
||||
dwo_output_c = path_to_c_string(dwo_output);
|
||||
dwo_output_c.as_ptr()
|
||||
} else {
|
||||
std::ptr::null()
|
||||
};
|
||||
let result = unsafe {
|
||||
let pm = llvm::LLVMCreatePassManager();
|
||||
llvm::LLVMAddAnalysisPasses(target, pm);
|
||||
llvm::LLVMRustAddLibraryInfo(pm, m, no_builtins);
|
||||
llvm::LLVMRustWriteOutputFile(
|
||||
target,
|
||||
pm,
|
||||
m,
|
||||
|
@ -80,22 +83,22 @@ fn write_output_file<'ll>(
|
|||
dwo_output_ptr,
|
||||
file_type,
|
||||
verify_llvm_ir,
|
||||
);
|
||||
)
|
||||
};
|
||||
|
||||
// Record artifact sizes for self-profiling
|
||||
if result == llvm::LLVMRustResult::Success {
|
||||
let artifact_kind = match file_type {
|
||||
llvm::FileType::ObjectFile => "object_file",
|
||||
llvm::FileType::AssemblyFile => "assembly_file",
|
||||
};
|
||||
record_artifact_size(self_profiler_ref, artifact_kind, output);
|
||||
if let Some(dwo_file) = dwo_output {
|
||||
record_artifact_size(self_profiler_ref, "dwo_file", dwo_file);
|
||||
}
|
||||
// Record artifact sizes for self-profiling
|
||||
if result == llvm::LLVMRustResult::Success {
|
||||
let artifact_kind = match file_type {
|
||||
llvm::FileType::ObjectFile => "object_file",
|
||||
llvm::FileType::AssemblyFile => "assembly_file",
|
||||
};
|
||||
record_artifact_size(self_profiler_ref, artifact_kind, output);
|
||||
if let Some(dwo_file) = dwo_output {
|
||||
record_artifact_size(self_profiler_ref, "dwo_file", dwo_file);
|
||||
}
|
||||
|
||||
result.into_result().map_err(|()| llvm_err(dcx, LlvmError::WriteOutput { path: output }))
|
||||
}
|
||||
|
||||
result.into_result().map_err(|()| llvm_err(dcx, LlvmError::WriteOutput { path: output }))
|
||||
}
|
||||
|
||||
pub(crate) fn create_informational_target_machine(
|
||||
|
@ -325,13 +328,17 @@ pub(crate) fn save_temp_bitcode(
|
|||
if !cgcx.save_temps {
|
||||
return;
|
||||
}
|
||||
let ext = format!("{name}.bc");
|
||||
let cgu = Some(&module.name[..]);
|
||||
let path = cgcx.output_filenames.temp_path_ext(&ext, cgu);
|
||||
write_bitcode_to_file(module, &path)
|
||||
}
|
||||
|
||||
fn write_bitcode_to_file(module: &ModuleCodegen<ModuleLlvm>, path: &Path) {
|
||||
unsafe {
|
||||
let ext = format!("{name}.bc");
|
||||
let cgu = Some(&module.name[..]);
|
||||
let path = cgcx.output_filenames.temp_path_ext(&ext, cgu);
|
||||
let cstr = path_to_c_string(&path);
|
||||
let path = path_to_c_string(&path);
|
||||
let llmod = module.module_llvm.llmod();
|
||||
llvm::LLVMWriteBitcodeToFile(llmod, cstr.as_ptr());
|
||||
llvm::LLVMWriteBitcodeToFile(llmod, path.as_ptr());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -676,7 +683,6 @@ pub(crate) unsafe fn optimize(
|
|||
) -> Result<(), FatalError> {
|
||||
let _timer = cgcx.prof.generic_activity_with_arg("LLVM_module_optimize", &*module.name);
|
||||
|
||||
let llmod = module.module_llvm.llmod();
|
||||
let llcx = &*module.module_llvm.llcx;
|
||||
let _handlers = DiagnosticHandlers::new(cgcx, dcx, llcx, module, CodegenDiagnosticsStage::Opt);
|
||||
|
||||
|
@ -685,8 +691,7 @@ pub(crate) unsafe fn optimize(
|
|||
|
||||
if config.emit_no_opt_bc {
|
||||
let out = cgcx.output_filenames.temp_path_ext("no-opt.bc", module_name);
|
||||
let out = path_to_c_string(&out);
|
||||
unsafe { llvm::LLVMWriteBitcodeToFile(llmod, out.as_ptr()) };
|
||||
write_bitcode_to_file(module, &out)
|
||||
}
|
||||
|
||||
// FIXME(ZuseZ4): support SanitizeHWAddress and prevent illegal/unsupported opts
|
||||
|
@ -755,31 +760,6 @@ pub(crate) unsafe fn codegen(
|
|||
create_msvc_imps(cgcx, llcx, llmod);
|
||||
}
|
||||
|
||||
// A codegen-specific pass manager is used to generate object
|
||||
// files for an LLVM module.
|
||||
//
|
||||
// Apparently each of these pass managers is a one-shot kind of
|
||||
// thing, so we create a new one for each type of output. The
|
||||
// pass manager passed to the closure should be ensured to not
|
||||
// escape the closure itself, and the manager should only be
|
||||
// used once.
|
||||
unsafe fn with_codegen<'ll, F, R>(
|
||||
tm: &'ll llvm::TargetMachine,
|
||||
llmod: &'ll llvm::Module,
|
||||
no_builtins: bool,
|
||||
f: F,
|
||||
) -> R
|
||||
where
|
||||
F: FnOnce(&'ll mut PassManager<'ll>) -> R,
|
||||
{
|
||||
unsafe {
|
||||
let cpm = llvm::LLVMCreatePassManager();
|
||||
llvm::LLVMAddAnalysisPasses(tm, cpm);
|
||||
llvm::LLVMRustAddLibraryInfo(cpm, llmod, no_builtins);
|
||||
f(cpm)
|
||||
}
|
||||
}
|
||||
|
||||
// Note that if object files are just LLVM bitcode we write bitcode,
|
||||
// copy it to the .o file, and delete the bitcode if it wasn't
|
||||
// otherwise requested.
|
||||
|
@ -898,21 +878,17 @@ pub(crate) unsafe fn codegen(
|
|||
} else {
|
||||
llmod
|
||||
};
|
||||
unsafe {
|
||||
with_codegen(tm, llmod, config.no_builtins, |cpm| {
|
||||
write_output_file(
|
||||
dcx,
|
||||
tm,
|
||||
cpm,
|
||||
llmod,
|
||||
&path,
|
||||
None,
|
||||
llvm::FileType::AssemblyFile,
|
||||
&cgcx.prof,
|
||||
config.verify_llvm_ir,
|
||||
)
|
||||
})?;
|
||||
}
|
||||
write_output_file(
|
||||
dcx,
|
||||
tm,
|
||||
config.no_builtins,
|
||||
llmod,
|
||||
&path,
|
||||
None,
|
||||
llvm::FileType::AssemblyFile,
|
||||
&cgcx.prof,
|
||||
config.verify_llvm_ir,
|
||||
)?;
|
||||
}
|
||||
|
||||
match config.emit_obj {
|
||||
|
@ -936,21 +912,17 @@ pub(crate) unsafe fn codegen(
|
|||
(_, SplitDwarfKind::Split) => Some(dwo_out.as_path()),
|
||||
};
|
||||
|
||||
unsafe {
|
||||
with_codegen(tm, llmod, config.no_builtins, |cpm| {
|
||||
write_output_file(
|
||||
dcx,
|
||||
tm,
|
||||
cpm,
|
||||
llmod,
|
||||
&obj_out,
|
||||
dwo_out,
|
||||
llvm::FileType::ObjectFile,
|
||||
&cgcx.prof,
|
||||
config.verify_llvm_ir,
|
||||
)
|
||||
})?;
|
||||
}
|
||||
write_output_file(
|
||||
dcx,
|
||||
tm,
|
||||
config.no_builtins,
|
||||
llmod,
|
||||
&obj_out,
|
||||
dwo_out,
|
||||
llvm::FileType::ObjectFile,
|
||||
&cgcx.prof,
|
||||
config.verify_llvm_ir,
|
||||
)?;
|
||||
}
|
||||
|
||||
EmitObj::Bitcode => {
|
||||
|
@ -1077,24 +1049,18 @@ unsafe fn embed_bitcode(
|
|||
{
|
||||
// We don't need custom section flags, create LLVM globals.
|
||||
let llconst = common::bytes_in_context(llcx, bitcode);
|
||||
let llglobal = llvm::LLVMAddGlobal(
|
||||
llmod,
|
||||
common::val_ty(llconst),
|
||||
c"rustc.embedded.module".as_ptr(),
|
||||
);
|
||||
llvm::LLVMSetInitializer(llglobal, llconst);
|
||||
let llglobal =
|
||||
llvm::add_global(llmod, common::val_ty(llconst), c"rustc.embedded.module");
|
||||
llvm::set_initializer(llglobal, llconst);
|
||||
|
||||
llvm::set_section(llglobal, bitcode_section_name(cgcx));
|
||||
llvm::set_linkage(llglobal, llvm::Linkage::PrivateLinkage);
|
||||
llvm::LLVMSetGlobalConstant(llglobal, llvm::True);
|
||||
|
||||
let llconst = common::bytes_in_context(llcx, cmdline.as_bytes());
|
||||
let llglobal = llvm::LLVMAddGlobal(
|
||||
llmod,
|
||||
common::val_ty(llconst),
|
||||
c"rustc.embedded.cmdline".as_ptr(),
|
||||
);
|
||||
llvm::LLVMSetInitializer(llglobal, llconst);
|
||||
let llglobal =
|
||||
llvm::add_global(llmod, common::val_ty(llconst), c"rustc.embedded.cmdline");
|
||||
llvm::set_initializer(llglobal, llconst);
|
||||
let section = if cgcx.target_is_like_osx {
|
||||
c"__LLVM,__cmdline"
|
||||
} else if cgcx.target_is_like_aix {
|
||||
|
@ -1134,31 +1100,29 @@ fn create_msvc_imps(
|
|||
// underscores added in front).
|
||||
let prefix = if cgcx.target_arch == "x86" { "\x01__imp__" } else { "\x01__imp_" };
|
||||
|
||||
unsafe {
|
||||
let ptr_ty = Type::ptr_llcx(llcx);
|
||||
let globals = base::iter_globals(llmod)
|
||||
.filter(|&val| {
|
||||
llvm::get_linkage(val) == llvm::Linkage::ExternalLinkage
|
||||
&& llvm::LLVMIsDeclaration(val) == 0
|
||||
})
|
||||
.filter_map(|val| {
|
||||
// Exclude some symbols that we know are not Rust symbols.
|
||||
let name = llvm::get_value_name(val);
|
||||
if ignored(name) { None } else { Some((val, name)) }
|
||||
})
|
||||
.map(move |(val, name)| {
|
||||
let mut imp_name = prefix.as_bytes().to_vec();
|
||||
imp_name.extend(name);
|
||||
let imp_name = CString::new(imp_name).unwrap();
|
||||
(imp_name, val)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
let ptr_ty = Type::ptr_llcx(llcx);
|
||||
let globals = base::iter_globals(llmod)
|
||||
.filter(|&val| {
|
||||
llvm::get_linkage(val) == llvm::Linkage::ExternalLinkage && !llvm::is_declaration(val)
|
||||
})
|
||||
.filter_map(|val| {
|
||||
// Exclude some symbols that we know are not Rust symbols.
|
||||
let name = llvm::get_value_name(val);
|
||||
if ignored(name) { None } else { Some((val, name)) }
|
||||
})
|
||||
.map(move |(val, name)| {
|
||||
let mut imp_name = prefix.as_bytes().to_vec();
|
||||
imp_name.extend(name);
|
||||
let imp_name = CString::new(imp_name).unwrap();
|
||||
(imp_name, val)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
for (imp_name, val) in globals {
|
||||
let imp = llvm::LLVMAddGlobal(llmod, ptr_ty, imp_name.as_ptr());
|
||||
llvm::LLVMSetInitializer(imp, val);
|
||||
llvm::set_linkage(imp, llvm::Linkage::ExternalLinkage);
|
||||
}
|
||||
for (imp_name, val) in globals {
|
||||
let imp = llvm::add_global(llmod, ptr_ty, &imp_name);
|
||||
|
||||
llvm::set_initializer(imp, val);
|
||||
llvm::set_linkage(imp, llvm::Linkage::ExternalLinkage);
|
||||
}
|
||||
|
||||
// Use this function to exclude certain symbols from `__imp` generation.
|
||||
|
|
|
@ -219,8 +219,8 @@ impl<'ll, 'tcx> ConstCodegenMethods<'tcx> for CodegenCx<'ll, 'tcx> {
|
|||
let g = self.define_global(&sym, self.val_ty(sc)).unwrap_or_else(|| {
|
||||
bug!("symbol `{}` is already defined", sym);
|
||||
});
|
||||
llvm::set_initializer(g, sc);
|
||||
unsafe {
|
||||
llvm::LLVMSetInitializer(g, sc);
|
||||
llvm::LLVMSetGlobalConstant(g, True);
|
||||
llvm::LLVMSetUnnamedAddress(g, llvm::UnnamedAddr::Global);
|
||||
}
|
||||
|
|
|
@ -191,7 +191,7 @@ fn check_and_apply_linkage<'ll, 'tcx>(
|
|||
})
|
||||
});
|
||||
llvm::set_linkage(g2, llvm::Linkage::InternalLinkage);
|
||||
unsafe { llvm::LLVMSetInitializer(g2, g1) };
|
||||
llvm::set_initializer(g2, g1);
|
||||
g2
|
||||
} else if cx.tcx.sess.target.arch == "x86"
|
||||
&& common::is_mingw_gnu_toolchain(&cx.tcx.sess.target)
|
||||
|
@ -235,7 +235,7 @@ impl<'ll> CodegenCx<'ll, '_> {
|
|||
}
|
||||
_ => self.define_private_global(self.val_ty(cv)),
|
||||
};
|
||||
unsafe { llvm::LLVMSetInitializer(gv, cv) };
|
||||
llvm::set_initializer(gv, cv);
|
||||
set_global_alignment(self, gv, align);
|
||||
llvm::SetUnnamedAddress(gv, llvm::UnnamedAddr::Global);
|
||||
gv
|
||||
|
@ -458,7 +458,7 @@ impl<'ll> CodegenCx<'ll, '_> {
|
|||
new_g
|
||||
};
|
||||
set_global_alignment(self, g, alloc.align);
|
||||
llvm::LLVMSetInitializer(g, v);
|
||||
llvm::set_initializer(g, v);
|
||||
|
||||
if self.should_assume_dso_local(g, true) {
|
||||
llvm::LLVMRustSetDSOLocal(g, true);
|
||||
|
|
|
@ -616,12 +616,10 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> {
|
|||
pub(crate) fn create_used_variable_impl(&self, name: &'static CStr, values: &[&'ll Value]) {
|
||||
let array = self.const_array(self.type_ptr(), values);
|
||||
|
||||
unsafe {
|
||||
let g = llvm::LLVMAddGlobal(self.llmod, self.val_ty(array), name.as_ptr());
|
||||
llvm::LLVMSetInitializer(g, array);
|
||||
llvm::set_linkage(g, llvm::Linkage::AppendingLinkage);
|
||||
llvm::set_section(g, c"llvm.metadata");
|
||||
}
|
||||
let g = llvm::add_global(self.llmod, self.val_ty(array), name);
|
||||
llvm::set_initializer(g, array);
|
||||
llvm::set_linkage(g, llvm::Linkage::AppendingLinkage);
|
||||
llvm::set_section(g, c"llvm.metadata");
|
||||
}
|
||||
}
|
||||
impl<'ll> SimpleCx<'ll> {
|
||||
|
|
|
@ -73,7 +73,7 @@ pub(crate) fn get_or_insert_gdb_debug_scripts_section_global<'ll>(
|
|||
.define_global(section_var_name, llvm_type)
|
||||
.unwrap_or_else(|| bug!("symbol `{}` is already defined", section_var_name));
|
||||
llvm::set_section(section_var, c".debug_gdb_scripts");
|
||||
llvm::LLVMSetInitializer(section_var, cx.const_bytes(section_contents));
|
||||
llvm::set_initializer(section_var, cx.const_bytes(section_contents));
|
||||
llvm::LLVMSetGlobalConstant(section_var, llvm::True);
|
||||
llvm::LLVMSetUnnamedAddress(section_var, llvm::UnnamedAddr::Global);
|
||||
llvm::set_linkage(section_var, llvm::Linkage::LinkOnceODRLinkage);
|
||||
|
|
|
@ -235,7 +235,7 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> {
|
|||
/// name.
|
||||
pub(crate) fn get_defined_value(&self, name: &str) -> Option<&'ll Value> {
|
||||
self.get_declared_value(name).and_then(|val| {
|
||||
let declaration = unsafe { llvm::LLVMIsDeclaration(val) != 0 };
|
||||
let declaration = llvm::is_declaration(val);
|
||||
if !declaration { Some(val) } else { None }
|
||||
})
|
||||
}
|
||||
|
|
|
@ -824,7 +824,7 @@ fn codegen_msvc_try<'ll>(
|
|||
if bx.cx.tcx.sess.target.supports_comdat() {
|
||||
llvm::SetUniqueComdat(bx.llmod, tydesc);
|
||||
}
|
||||
unsafe { llvm::LLVMSetInitializer(tydesc, type_info) };
|
||||
llvm::set_initializer(tydesc, type_info);
|
||||
|
||||
// The flag value of 8 indicates that we are catching the exception by
|
||||
// reference instead of by value. We can't use catch by value because
|
||||
|
|
|
@ -2359,7 +2359,7 @@ unsafe extern "C" {
|
|||
);
|
||||
pub fn LLVMRustWriteOutputFile<'a>(
|
||||
T: &'a TargetMachine,
|
||||
PM: &PassManager<'a>,
|
||||
PM: *mut PassManager<'a>,
|
||||
M: &'a Module,
|
||||
Output: *const c_char,
|
||||
DwoOutput: *const c_char,
|
||||
|
|
|
@ -241,6 +241,10 @@ pub fn set_linkage(llglobal: &Value, linkage: Linkage) {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn is_declaration(llglobal: &Value) -> bool {
|
||||
unsafe { LLVMIsDeclaration(llglobal) == ffi::True }
|
||||
}
|
||||
|
||||
pub fn get_visibility(llglobal: &Value) -> Visibility {
|
||||
unsafe { LLVMGetVisibility(llglobal) }.to_rust()
|
||||
}
|
||||
|
|
|
@ -2154,11 +2154,15 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ {
|
|||
span_bug!(span, "use of bare `static` ConstArgKind::Path's not yet supported")
|
||||
}
|
||||
// FIXME(const_generics): create real const to allow fn items as const paths
|
||||
Res::Def(DefKind::Fn | DefKind::AssocFn, _) => ty::Const::new_error_with_message(
|
||||
tcx,
|
||||
span,
|
||||
"fn items cannot be used as const args",
|
||||
),
|
||||
Res::Def(DefKind::Fn | DefKind::AssocFn, did) => {
|
||||
self.dcx().span_delayed_bug(span, "function items cannot be used as const args");
|
||||
let args = self.lower_generic_args_of_path_segment(
|
||||
span,
|
||||
did,
|
||||
path.segments.last().unwrap(),
|
||||
);
|
||||
ty::Const::new_value(tcx, ty::ValTree::zst(), Ty::new_fn_def(tcx, did, args))
|
||||
}
|
||||
|
||||
// Exhaustive match to be clear about what exactly we're considering to be
|
||||
// an invalid Res for a const path.
|
||||
|
@ -2557,27 +2561,29 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ {
|
|||
// reject function types that violate cmse ABI requirements
|
||||
cmse::validate_cmse_abi(self.tcx(), self.dcx(), hir_id, abi, bare_fn_ty);
|
||||
|
||||
// Find any late-bound regions declared in return type that do
|
||||
// not appear in the arguments. These are not well-formed.
|
||||
//
|
||||
// Example:
|
||||
// for<'a> fn() -> &'a str <-- 'a is bad
|
||||
// for<'a> fn(&'a String) -> &'a str <-- 'a is ok
|
||||
let inputs = bare_fn_ty.inputs();
|
||||
let late_bound_in_args =
|
||||
tcx.collect_constrained_late_bound_regions(inputs.map_bound(|i| i.to_owned()));
|
||||
let output = bare_fn_ty.output();
|
||||
let late_bound_in_ret = tcx.collect_referenced_late_bound_regions(output);
|
||||
if !bare_fn_ty.references_error() {
|
||||
// Find any late-bound regions declared in return type that do
|
||||
// not appear in the arguments. These are not well-formed.
|
||||
//
|
||||
// Example:
|
||||
// for<'a> fn() -> &'a str <-- 'a is bad
|
||||
// for<'a> fn(&'a String) -> &'a str <-- 'a is ok
|
||||
let inputs = bare_fn_ty.inputs();
|
||||
let late_bound_in_args =
|
||||
tcx.collect_constrained_late_bound_regions(inputs.map_bound(|i| i.to_owned()));
|
||||
let output = bare_fn_ty.output();
|
||||
let late_bound_in_ret = tcx.collect_referenced_late_bound_regions(output);
|
||||
|
||||
self.validate_late_bound_regions(late_bound_in_args, late_bound_in_ret, |br_name| {
|
||||
struct_span_code_err!(
|
||||
self.dcx(),
|
||||
decl.output.span(),
|
||||
E0581,
|
||||
"return type references {}, which is not constrained by the fn input types",
|
||||
br_name
|
||||
)
|
||||
});
|
||||
self.validate_late_bound_regions(late_bound_in_args, late_bound_in_ret, |br_name| {
|
||||
struct_span_code_err!(
|
||||
self.dcx(),
|
||||
decl.output.span(),
|
||||
E0581,
|
||||
"return type references {}, which is not constrained by the fn input types",
|
||||
br_name
|
||||
)
|
||||
});
|
||||
}
|
||||
|
||||
bare_fn_ty
|
||||
}
|
||||
|
|
|
@ -390,9 +390,6 @@ lint_improper_ctypes_only_phantomdata = composed only of `PhantomData`
|
|||
|
||||
lint_improper_ctypes_opaque = opaque types have no C equivalent
|
||||
|
||||
lint_improper_ctypes_pat_help = consider using the base type instead
|
||||
|
||||
lint_improper_ctypes_pat_reason = pattern types have no C equivalent
|
||||
lint_improper_ctypes_slice_help = consider using a raw pointer instead
|
||||
|
||||
lint_improper_ctypes_slice_reason = slices have no C equivalent
|
||||
|
|
|
@ -241,10 +241,7 @@ fn structurally_same_type_impl<'tcx>(
|
|||
if let ty::Adt(def, args) = *ty.kind() {
|
||||
let is_transparent = def.repr().transparent();
|
||||
let is_non_null = types::nonnull_optimization_guaranteed(tcx, def);
|
||||
debug!(
|
||||
"non_transparent_ty({:?}) -- type is transparent? {}, type is non-null? {}",
|
||||
ty, is_transparent, is_non_null
|
||||
);
|
||||
debug!(?ty, is_transparent, is_non_null);
|
||||
if is_transparent && !is_non_null {
|
||||
debug_assert_eq!(def.variants().len(), 1);
|
||||
let v = &def.variant(FIRST_VARIANT);
|
||||
|
@ -378,14 +375,14 @@ fn structurally_same_type_impl<'tcx>(
|
|||
|
||||
// An Adt and a primitive or pointer type. This can be FFI-safe if non-null
|
||||
// enum layout optimisation is being applied.
|
||||
(Adt(..), _) if is_primitive_or_pointer(b) => {
|
||||
(Adt(..) | Pat(..), _) if is_primitive_or_pointer(b) => {
|
||||
if let Some(a_inner) = types::repr_nullable_ptr(tcx, typing_env, a, ckind) {
|
||||
a_inner == b
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
(_, Adt(..)) if is_primitive_or_pointer(a) => {
|
||||
(_, Adt(..) | Pat(..)) if is_primitive_or_pointer(a) => {
|
||||
if let Some(b_inner) = types::repr_nullable_ptr(tcx, typing_env, b, ckind) {
|
||||
b_inner == a
|
||||
} else {
|
||||
|
|
|
@ -33,6 +33,7 @@
|
|||
#![feature(let_chains)]
|
||||
#![feature(rustc_attrs)]
|
||||
#![feature(rustdoc_internals)]
|
||||
#![feature(try_blocks)]
|
||||
#![warn(unreachable_pub)]
|
||||
// tidy-alphabetical-end
|
||||
|
||||
|
|
|
@ -877,6 +877,37 @@ fn ty_is_known_nonnull<'tcx>(
|
|||
.filter_map(|variant| transparent_newtype_field(tcx, variant))
|
||||
.any(|field| ty_is_known_nonnull(tcx, typing_env, field.ty(tcx, args), mode))
|
||||
}
|
||||
ty::Pat(base, pat) => {
|
||||
ty_is_known_nonnull(tcx, typing_env, *base, mode)
|
||||
|| Option::unwrap_or_default(
|
||||
try {
|
||||
match **pat {
|
||||
ty::PatternKind::Range { start, end, include_end } => {
|
||||
match (start, end) {
|
||||
(Some(start), None) => {
|
||||
start.try_to_value()?.try_to_bits(tcx, typing_env)? > 0
|
||||
}
|
||||
(Some(start), Some(end)) => {
|
||||
let start =
|
||||
start.try_to_value()?.try_to_bits(tcx, typing_env)?;
|
||||
let end =
|
||||
end.try_to_value()?.try_to_bits(tcx, typing_env)?;
|
||||
|
||||
if include_end {
|
||||
// This also works for negative numbers, as we just need
|
||||
// to ensure we aren't wrapping over zero.
|
||||
start > 0 && end >= start
|
||||
} else {
|
||||
start > 0 && end > start
|
||||
}
|
||||
}
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
)
|
||||
}
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
@ -907,9 +938,8 @@ fn get_nullable_type<'tcx>(
|
|||
};
|
||||
return get_nullable_type(tcx, typing_env, inner_field_ty);
|
||||
}
|
||||
ty::Int(ty) => Ty::new_int(tcx, ty),
|
||||
ty::Uint(ty) => Ty::new_uint(tcx, ty),
|
||||
ty::RawPtr(ty, mutbl) => Ty::new_ptr(tcx, ty, mutbl),
|
||||
ty::Pat(base, ..) => return get_nullable_type(tcx, typing_env, base),
|
||||
ty::Int(_) | ty::Uint(_) | ty::RawPtr(..) => ty,
|
||||
// As these types are always non-null, the nullable equivalent of
|
||||
// `Option<T>` of these types are their raw pointer counterparts.
|
||||
ty::Ref(_region, ty, mutbl) => Ty::new_ptr(tcx, ty, mutbl),
|
||||
|
@ -965,63 +995,69 @@ pub(crate) fn repr_nullable_ptr<'tcx>(
|
|||
ckind: CItemKind,
|
||||
) -> Option<Ty<'tcx>> {
|
||||
debug!("is_repr_nullable_ptr(tcx, ty = {:?})", ty);
|
||||
if let ty::Adt(ty_def, args) = ty.kind() {
|
||||
let field_ty = match &ty_def.variants().raw[..] {
|
||||
[var_one, var_two] => match (&var_one.fields.raw[..], &var_two.fields.raw[..]) {
|
||||
([], [field]) | ([field], []) => field.ty(tcx, args),
|
||||
([field1], [field2]) => {
|
||||
let ty1 = field1.ty(tcx, args);
|
||||
let ty2 = field2.ty(tcx, args);
|
||||
match ty.kind() {
|
||||
ty::Adt(ty_def, args) => {
|
||||
let field_ty = match &ty_def.variants().raw[..] {
|
||||
[var_one, var_two] => match (&var_one.fields.raw[..], &var_two.fields.raw[..]) {
|
||||
([], [field]) | ([field], []) => field.ty(tcx, args),
|
||||
([field1], [field2]) => {
|
||||
let ty1 = field1.ty(tcx, args);
|
||||
let ty2 = field2.ty(tcx, args);
|
||||
|
||||
if is_niche_optimization_candidate(tcx, typing_env, ty1) {
|
||||
ty2
|
||||
} else if is_niche_optimization_candidate(tcx, typing_env, ty2) {
|
||||
ty1
|
||||
} else {
|
||||
return None;
|
||||
if is_niche_optimization_candidate(tcx, typing_env, ty1) {
|
||||
ty2
|
||||
} else if is_niche_optimization_candidate(tcx, typing_env, ty2) {
|
||||
ty1
|
||||
} else {
|
||||
return None;
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => return None,
|
||||
},
|
||||
_ => return None,
|
||||
},
|
||||
_ => return None,
|
||||
};
|
||||
|
||||
if !ty_is_known_nonnull(tcx, typing_env, field_ty, ckind) {
|
||||
return None;
|
||||
}
|
||||
|
||||
// At this point, the field's type is known to be nonnull and the parent enum is Option-like.
|
||||
// If the computed size for the field and the enum are different, the nonnull optimization isn't
|
||||
// being applied (and we've got a problem somewhere).
|
||||
let compute_size_skeleton = |t| SizeSkeleton::compute(t, tcx, typing_env).ok();
|
||||
if !compute_size_skeleton(ty)?.same_size(compute_size_skeleton(field_ty)?) {
|
||||
bug!("improper_ctypes: Option nonnull optimization not applied?");
|
||||
}
|
||||
|
||||
// Return the nullable type this Option-like enum can be safely represented with.
|
||||
let field_ty_layout = tcx.layout_of(typing_env.as_query_input(field_ty));
|
||||
if field_ty_layout.is_err() && !field_ty.has_non_region_param() {
|
||||
bug!("should be able to compute the layout of non-polymorphic type");
|
||||
}
|
||||
|
||||
let field_ty_abi = &field_ty_layout.ok()?.backend_repr;
|
||||
if let BackendRepr::Scalar(field_ty_scalar) = field_ty_abi {
|
||||
match field_ty_scalar.valid_range(&tcx) {
|
||||
WrappingRange { start: 0, end }
|
||||
if end == field_ty_scalar.size(&tcx).unsigned_int_max() - 1 =>
|
||||
{
|
||||
return Some(get_nullable_type(tcx, typing_env, field_ty).unwrap());
|
||||
}
|
||||
WrappingRange { start: 1, .. } => {
|
||||
return Some(get_nullable_type(tcx, typing_env, field_ty).unwrap());
|
||||
}
|
||||
WrappingRange { start, end } => {
|
||||
unreachable!("Unhandled start and end range: ({}, {})", start, end)
|
||||
}
|
||||
};
|
||||
|
||||
if !ty_is_known_nonnull(tcx, typing_env, field_ty, ckind) {
|
||||
return None;
|
||||
}
|
||||
|
||||
// At this point, the field's type is known to be nonnull and the parent enum is Option-like.
|
||||
// If the computed size for the field and the enum are different, the nonnull optimization isn't
|
||||
// being applied (and we've got a problem somewhere).
|
||||
let compute_size_skeleton = |t| SizeSkeleton::compute(t, tcx, typing_env).ok();
|
||||
if !compute_size_skeleton(ty)?.same_size(compute_size_skeleton(field_ty)?) {
|
||||
bug!("improper_ctypes: Option nonnull optimization not applied?");
|
||||
}
|
||||
|
||||
// Return the nullable type this Option-like enum can be safely represented with.
|
||||
let field_ty_layout = tcx.layout_of(typing_env.as_query_input(field_ty));
|
||||
if field_ty_layout.is_err() && !field_ty.has_non_region_param() {
|
||||
bug!("should be able to compute the layout of non-polymorphic type");
|
||||
}
|
||||
|
||||
let field_ty_abi = &field_ty_layout.ok()?.backend_repr;
|
||||
if let BackendRepr::Scalar(field_ty_scalar) = field_ty_abi {
|
||||
match field_ty_scalar.valid_range(&tcx) {
|
||||
WrappingRange { start: 0, end }
|
||||
if end == field_ty_scalar.size(&tcx).unsigned_int_max() - 1 =>
|
||||
{
|
||||
return Some(get_nullable_type(tcx, typing_env, field_ty).unwrap());
|
||||
}
|
||||
WrappingRange { start: 1, .. } => {
|
||||
return Some(get_nullable_type(tcx, typing_env, field_ty).unwrap());
|
||||
}
|
||||
WrappingRange { start, end } => {
|
||||
unreachable!("Unhandled start and end range: ({}, {})", start, end)
|
||||
}
|
||||
};
|
||||
}
|
||||
None
|
||||
}
|
||||
ty::Pat(base, pat) => match **pat {
|
||||
ty::PatternKind::Range { .. } => get_nullable_type(tcx, typing_env, *base),
|
||||
},
|
||||
_ => None,
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> {
|
||||
|
@ -1256,11 +1292,9 @@ impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> {
|
|||
help: Some(fluent::lint_improper_ctypes_char_help),
|
||||
},
|
||||
|
||||
ty::Pat(..) => FfiUnsafe {
|
||||
ty,
|
||||
reason: fluent::lint_improper_ctypes_pat_reason,
|
||||
help: Some(fluent::lint_improper_ctypes_pat_help),
|
||||
},
|
||||
// It's just extra invariants on the type that you need to uphold,
|
||||
// but only the base type is relevant for being representable in FFI.
|
||||
ty::Pat(base, ..) => self.check_type_for_ffi(acc, base),
|
||||
|
||||
ty::Int(ty::IntTy::I128) | ty::Uint(ty::UintTy::U128) => {
|
||||
FfiUnsafe { ty, reason: fluent::lint_improper_ctypes_128bit, help: None }
|
||||
|
|
|
@ -1800,7 +1800,7 @@ pub trait PrettyPrinter<'tcx>: Printer<'tcx> + fmt::Write {
|
|||
}
|
||||
|
||||
let u8_type = self.tcx().types.u8;
|
||||
match (cv.valtree, cv.ty.kind()) {
|
||||
match (cv.valtree, *cv.ty.kind()) {
|
||||
(ty::ValTree::Branch(_), ty::Ref(_, inner_ty, _)) => match inner_ty.kind() {
|
||||
ty::Slice(t) if *t == u8_type => {
|
||||
let bytes = cv.try_to_raw_bytes(self.tcx()).unwrap_or_else(|| {
|
||||
|
@ -1820,13 +1820,13 @@ pub trait PrettyPrinter<'tcx>: Printer<'tcx> + fmt::Write {
|
|||
return Ok(());
|
||||
}
|
||||
_ => {
|
||||
let cv = ty::Value { valtree: cv.valtree, ty: *inner_ty };
|
||||
let cv = ty::Value { valtree: cv.valtree, ty: inner_ty };
|
||||
p!("&");
|
||||
p!(pretty_print_const_valtree(cv, print_ty));
|
||||
return Ok(());
|
||||
}
|
||||
},
|
||||
(ty::ValTree::Branch(_), ty::Array(t, _)) if *t == u8_type => {
|
||||
(ty::ValTree::Branch(_), ty::Array(t, _)) if t == u8_type => {
|
||||
let bytes = cv.try_to_raw_bytes(self.tcx()).unwrap_or_else(|| {
|
||||
bug!("expected to convert valtree to raw bytes for type {:?}", t)
|
||||
});
|
||||
|
@ -1893,11 +1893,16 @@ pub trait PrettyPrinter<'tcx>: Printer<'tcx> + fmt::Write {
|
|||
}
|
||||
(ty::ValTree::Leaf(leaf), ty::Ref(_, inner_ty, _)) => {
|
||||
p!(write("&"));
|
||||
return self.pretty_print_const_scalar_int(leaf, *inner_ty, print_ty);
|
||||
return self.pretty_print_const_scalar_int(leaf, inner_ty, print_ty);
|
||||
}
|
||||
(ty::ValTree::Leaf(leaf), _) => {
|
||||
return self.pretty_print_const_scalar_int(leaf, cv.ty, print_ty);
|
||||
}
|
||||
(_, ty::FnDef(def_id, args)) => {
|
||||
// Never allowed today, but we still encounter them in invalid const args.
|
||||
p!(print_value_path(def_id, args));
|
||||
return Ok(());
|
||||
}
|
||||
// FIXME(oli-obk): also pretty print arrays and other aggregate constants by reading
|
||||
// their fields instead of just dumping the memory.
|
||||
_ => {}
|
||||
|
|
|
@ -923,6 +923,21 @@ impl<'ra: 'ast, 'ast, 'tcx> Visitor<'ast> for LateResolutionVisitor<'_, 'ast, 'r
|
|||
self.diag_metadata.current_trait_object = prev;
|
||||
self.diag_metadata.current_type_path = prev_ty;
|
||||
}
|
||||
|
||||
fn visit_ty_pat(&mut self, t: &'ast TyPat) -> Self::Result {
|
||||
match &t.kind {
|
||||
TyPatKind::Range(start, end, _) => {
|
||||
if let Some(start) = start {
|
||||
self.resolve_anon_const(start, AnonConstKind::ConstArg(IsRepeatExpr::No));
|
||||
}
|
||||
if let Some(end) = end {
|
||||
self.resolve_anon_const(end, AnonConstKind::ConstArg(IsRepeatExpr::No));
|
||||
}
|
||||
}
|
||||
TyPatKind::Err(_) => {}
|
||||
}
|
||||
}
|
||||
|
||||
fn visit_poly_trait_ref(&mut self, tref: &'ast PolyTraitRef) {
|
||||
let span = tref.span.shrink_to_lo().to(tref.trait_ref.path.span.shrink_to_lo());
|
||||
self.with_generic_param_rib(
|
||||
|
|
|
@ -9,7 +9,7 @@ pub(crate) fn target() -> Target {
|
|||
base.max_atomic_width = Some(64);
|
||||
|
||||
// https://developer.android.com/ndk/guides/abis.html#x86
|
||||
base.cpu = "pentiumpro".into();
|
||||
base.cpu = "pentium4".into();
|
||||
base.features = "+mmx,+sse,+sse2,+sse3,+ssse3".into();
|
||||
base.stack_probes = StackProbeType::Inline;
|
||||
|
||||
|
|
|
@ -18,6 +18,8 @@ use rustc_middle::ty::{
|
|||
TypeFoldable, TypeFolder, TypeSuperFoldable, TypeckResults,
|
||||
};
|
||||
use rustc_span::{BytePos, DUMMY_SP, FileName, Ident, Span, sym};
|
||||
use rustc_type_ir::inherent::*;
|
||||
use rustc_type_ir::visit::TypeVisitableExt;
|
||||
use tracing::{debug, instrument, warn};
|
||||
|
||||
use super::nice_region_error::placeholder_error::Highlighted;
|
||||
|
@ -155,27 +157,92 @@ impl UnderspecifiedArgKind {
|
|||
}
|
||||
}
|
||||
|
||||
struct ClosureEraser<'tcx> {
|
||||
tcx: TyCtxt<'tcx>,
|
||||
struct ClosureEraser<'a, 'tcx> {
|
||||
infcx: &'a InferCtxt<'tcx>,
|
||||
}
|
||||
|
||||
impl<'tcx> TypeFolder<TyCtxt<'tcx>> for ClosureEraser<'tcx> {
|
||||
impl<'a, 'tcx> ClosureEraser<'a, 'tcx> {
|
||||
fn new_infer(&mut self) -> Ty<'tcx> {
|
||||
self.infcx.next_ty_var(DUMMY_SP)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, 'tcx> TypeFolder<TyCtxt<'tcx>> for ClosureEraser<'a, 'tcx> {
|
||||
fn cx(&self) -> TyCtxt<'tcx> {
|
||||
self.tcx
|
||||
self.infcx.tcx
|
||||
}
|
||||
|
||||
fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> {
|
||||
match ty.kind() {
|
||||
ty::Closure(_, args) => {
|
||||
// For a closure type, we turn it into a function pointer so that it gets rendered
|
||||
// as `fn(args) -> Ret`.
|
||||
let closure_sig = args.as_closure().sig();
|
||||
Ty::new_fn_ptr(
|
||||
self.tcx,
|
||||
self.tcx.signature_unclosure(closure_sig, hir::Safety::Safe),
|
||||
self.cx(),
|
||||
self.cx().signature_unclosure(closure_sig, hir::Safety::Safe),
|
||||
)
|
||||
}
|
||||
_ => ty.super_fold_with(self),
|
||||
ty::Adt(_, args) if !args.iter().any(|a| a.has_infer()) => {
|
||||
// We have a type that doesn't have any inference variables, so we replace
|
||||
// the whole thing with `_`. The type system already knows about this type in
|
||||
// its entirety and it is redundant to specify it for the user. The user only
|
||||
// needs to specify the type parameters that we *couldn't* figure out.
|
||||
self.new_infer()
|
||||
}
|
||||
ty::Adt(def, args) => {
|
||||
let generics = self.cx().generics_of(def.did());
|
||||
let generics: Vec<bool> = generics
|
||||
.own_params
|
||||
.iter()
|
||||
.map(|param| param.default_value(self.cx()).is_some())
|
||||
.collect();
|
||||
let ty = Ty::new_adt(
|
||||
self.cx(),
|
||||
*def,
|
||||
self.cx().mk_args_from_iter(generics.into_iter().zip(args.iter()).map(
|
||||
|(has_default, arg)| {
|
||||
if arg.has_infer() {
|
||||
// This param has an unsubstituted type variable, meaning that this
|
||||
// type has a (potentially deeply nested) type parameter from the
|
||||
// corresponding type's definition. We have explicitly asked this
|
||||
// type to not be hidden. In either case, we keep the type and don't
|
||||
// substitute with `_` just yet.
|
||||
arg.fold_with(self)
|
||||
} else if has_default {
|
||||
// We have a type param that has a default type, like the allocator
|
||||
// in Vec. We decided to show `Vec` itself, because it hasn't yet
|
||||
// been replaced by an `_` `Infer`, but we want to ensure that the
|
||||
// type parameter with default types does *not* get replaced with
|
||||
// `_` because then we'd end up with `Vec<_, _>`, instead of
|
||||
// `Vec<_>`.
|
||||
arg
|
||||
} else if let GenericArgKind::Type(_) = arg.kind() {
|
||||
// We don't replace lifetime or const params, only type params.
|
||||
self.new_infer().into()
|
||||
} else {
|
||||
arg.fold_with(self)
|
||||
}
|
||||
},
|
||||
)),
|
||||
);
|
||||
ty
|
||||
}
|
||||
_ if ty.has_infer() => {
|
||||
// This type has a (potentially nested) type parameter that we couldn't figure out.
|
||||
// We will print this depth of type, so at least the type name and at least one of
|
||||
// its type parameters.
|
||||
ty.super_fold_with(self)
|
||||
}
|
||||
// We don't have an unknown type parameter anywhere, replace with `_`.
|
||||
_ => self.new_infer(),
|
||||
}
|
||||
}
|
||||
|
||||
fn fold_const(&mut self, c: ty::Const<'tcx>) -> ty::Const<'tcx> {
|
||||
// Avoid accidentally erasing the type of the const.
|
||||
c
|
||||
}
|
||||
}
|
||||
|
||||
fn fmt_printer<'a, 'tcx>(infcx: &'a InferCtxt<'tcx>, ns: Namespace) -> FmtPrinter<'a, 'tcx> {
|
||||
|
@ -219,9 +286,9 @@ fn ty_to_string<'tcx>(
|
|||
) -> String {
|
||||
let mut printer = fmt_printer(infcx, Namespace::TypeNS);
|
||||
let ty = infcx.resolve_vars_if_possible(ty);
|
||||
// We use `fn` ptr syntax for closures, but this only works when the closure
|
||||
// does not capture anything.
|
||||
let ty = ty.fold_with(&mut ClosureEraser { tcx: infcx.tcx });
|
||||
// We use `fn` ptr syntax for closures, but this only works when the closure does not capture
|
||||
// anything. We also remove all type parameters that are fully known to the type system.
|
||||
let ty = ty.fold_with(&mut ClosureEraser { infcx });
|
||||
|
||||
match (ty.kind(), called_method_def_id) {
|
||||
// We don't want the regular output for `fn`s because it includes its path in
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue