1
Fork 0

Auto merge of #45380 - dotdash:arg_copies, r=arielb1

Avoid unnecessary copies of arguments that are simple bindings

Initially MIR differentiated between arguments and locals, which
introduced a need to add extra copies assigning the argument to a
local, even for simple bindings. This differentiation no longer exists,
but we're still creating those copies, bloating the MIR and LLVM IR we
emit.

Additionally, the current approach means that we create debug info for
both the incoming argument (marking it as an argument), and then
immediately shadow it a local that goes by the same name. This can be
confusing when using e.g. "info args" in gdb, or when e.g. a debugger
with a GUI displays the function arguments separately from the local
variables, especially when the binding is mutable, because the argument
doesn't change, while the local variable does.
This commit is contained in:
bors 2017-10-26 14:14:15 +00:00
commit b0b80f8c22
21 changed files with 145 additions and 122 deletions

View file

@ -650,7 +650,9 @@ pub enum TerminatorKind<'tcx> {
Call { Call {
/// The function thats being called /// The function thats being called
func: Operand<'tcx>, func: Operand<'tcx>,
/// Arguments the function is called with /// Arguments the function is called with. These are owned by the callee, which is free to
/// modify them. This is important as "by-value" arguments might be passed by-reference at
/// the ABI level.
args: Vec<Operand<'tcx>>, args: Vec<Operand<'tcx>>,
/// Destination for the return value. If some, the call is converging. /// Destination for the return value. If some, the call is converging.
destination: Option<(Lvalue<'tcx>, BasicBlock)>, destination: Option<(Lvalue<'tcx>, BasicBlock)>,

View file

@ -247,7 +247,13 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
} else { } else {
let args: Vec<_> = let args: Vec<_> =
args.into_iter() args.into_iter()
.map(|arg| unpack!(block = this.as_local_operand(block, arg))) .map(|arg| {
let scope = this.local_scope();
// Function arguments are owned by the callee, so we need as_temp()
// instead of as_operand() to enforce copies
let operand = unpack!(block = this.as_temp(block, scope, arg));
Operand::Consume(Lvalue::Local(operand))
})
.collect(); .collect();
let success = this.cfg.start_new_block(); let success = this.cfg.start_new_block();

View file

@ -21,6 +21,7 @@ use rustc::mir::visit::{MutVisitor, Lookup};
use rustc::ty::{self, Ty, TyCtxt}; use rustc::ty::{self, Ty, TyCtxt};
use rustc::ty::subst::Substs; use rustc::ty::subst::Substs;
use rustc::util::nodemap::NodeMap; use rustc::util::nodemap::NodeMap;
use rustc_const_eval::pattern::{BindingMode, PatternKind};
use rustc_data_structures::indexed_vec::{IndexVec, Idx}; use rustc_data_structures::indexed_vec::{IndexVec, Idx};
use shim; use shim;
use std::mem; use std::mem;
@ -571,13 +572,24 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
// Bind the argument patterns // Bind the argument patterns
for (index, &(ty, pattern)) in arguments.iter().enumerate() { for (index, &(ty, pattern)) in arguments.iter().enumerate() {
// Function arguments always get the first Local indices after the return pointer // Function arguments always get the first Local indices after the return pointer
let lvalue = Lvalue::Local(Local::new(index + 1)); let local = Local::new(index + 1);
let lvalue = Lvalue::Local(local);
if let Some(pattern) = pattern { if let Some(pattern) = pattern {
let pattern = self.hir.pattern_from_hir(pattern); let pattern = self.hir.pattern_from_hir(pattern);
scope = self.declare_bindings(scope, ast_body.span,
LintLevel::Inherited, &pattern); match *pattern.kind {
unpack!(block = self.lvalue_into_pattern(block, pattern, &lvalue)); // Don't introduce extra copies for simple bindings
PatternKind::Binding { mutability, var, mode: BindingMode::ByValue, .. } => {
self.local_decls[local].mutability = mutability;
self.var_indices.insert(var, local);
}
_ => {
scope = self.declare_bindings(scope, ast_body.span,
LintLevel::Inherited, &pattern);
unpack!(block = self.lvalue_into_pattern(block, pattern, &lvalue));
}
}
} }
// Make sure we drop (parts of) the argument even when not matched on. // Make sure we drop (parts of) the argument even when not matched on.

View file

@ -112,6 +112,13 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
} }
} }
pub fn set_value_name(&self, value: ValueRef, name: &str) {
let cname = CString::new(name.as_bytes()).unwrap();
unsafe {
llvm::LLVMSetValueName(value, cname.as_ptr());
}
}
pub fn position_before(&self, insn: ValueRef) { pub fn position_before(&self, insn: ValueRef) {
unsafe { unsafe {
llvm::LLVMPositionBuilderBefore(self.llbuilder, insn); llvm::LLVMPositionBuilderBefore(self.llbuilder, insn);

View file

@ -64,11 +64,18 @@ struct LocalAnalyzer<'mir, 'a: 'mir, 'tcx: 'a> {
impl<'mir, 'a, 'tcx> LocalAnalyzer<'mir, 'a, 'tcx> { impl<'mir, 'a, 'tcx> LocalAnalyzer<'mir, 'a, 'tcx> {
fn new(mircx: &'mir MirContext<'a, 'tcx>) -> LocalAnalyzer<'mir, 'a, 'tcx> { fn new(mircx: &'mir MirContext<'a, 'tcx>) -> LocalAnalyzer<'mir, 'a, 'tcx> {
LocalAnalyzer { let mut analyzer = LocalAnalyzer {
cx: mircx, cx: mircx,
lvalue_locals: BitVector::new(mircx.mir.local_decls.len()), lvalue_locals: BitVector::new(mircx.mir.local_decls.len()),
seen_assigned: BitVector::new(mircx.mir.local_decls.len()) seen_assigned: BitVector::new(mircx.mir.local_decls.len())
};
// Arguments get assigned to by means of the function being called
for idx in 0..mircx.mir.arg_count {
analyzer.seen_assigned.insert(idx + 1);
} }
analyzer
} }
fn mark_as_lvalue(&mut self, local: mir::Local) { fn mark_as_lvalue(&mut self, local: mir::Local) {

View file

@ -386,6 +386,12 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
let arg_decl = &mir.local_decls[local]; let arg_decl = &mir.local_decls[local];
let arg_ty = mircx.monomorphize(&arg_decl.ty); let arg_ty = mircx.monomorphize(&arg_decl.ty);
let name = if let Some(name) = arg_decl.name {
name.as_str().to_string()
} else {
format!("arg{}", arg_index)
};
if Some(local) == mir.spread_arg { if Some(local) == mir.spread_arg {
// This argument (e.g. the last argument in the "rust-call" ABI) // This argument (e.g. the last argument in the "rust-call" ABI)
// is a tuple that was spread at the ABI level and now we have // is a tuple that was spread at the ABI level and now we have
@ -397,7 +403,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
_ => bug!("spread argument isn't a tuple?!") _ => bug!("spread argument isn't a tuple?!")
}; };
let lvalue = LvalueRef::alloca(bcx, arg_ty, &format!("arg{}", arg_index)); let lvalue = LvalueRef::alloca(bcx, arg_ty, &name);
for (i, &tupled_arg_ty) in tupled_arg_tys.iter().enumerate() { for (i, &tupled_arg_ty) in tupled_arg_tys.iter().enumerate() {
let (dst, _) = lvalue.trans_field_ptr(bcx, i); let (dst, _) = lvalue.trans_field_ptr(bcx, i);
let arg = &mircx.fn_ty.args[idx]; let arg = &mircx.fn_ty.args[idx];
@ -444,6 +450,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
llarg_idx += 1; llarg_idx += 1;
} }
let llarg = llvm::get_param(bcx.llfn(), llarg_idx as c_uint); let llarg = llvm::get_param(bcx.llfn(), llarg_idx as c_uint);
bcx.set_value_name(llarg, &name);
llarg_idx += 1; llarg_idx += 1;
llarg llarg
} else if !lvalue_locals.contains(local.index()) && } else if !lvalue_locals.contains(local.index()) &&
@ -481,10 +488,13 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
let meta_llty = type_of::unsized_info_ty(bcx.ccx, pointee); let meta_llty = type_of::unsized_info_ty(bcx.ccx, pointee);
let llarg = bcx.pointercast(llarg, data_llty.ptr_to()); let llarg = bcx.pointercast(llarg, data_llty.ptr_to());
bcx.set_value_name(llarg, &(name.clone() + ".ptr"));
let llmeta = bcx.pointercast(llmeta, meta_llty); let llmeta = bcx.pointercast(llmeta, meta_llty);
bcx.set_value_name(llmeta, &(name + ".meta"));
OperandValue::Pair(llarg, llmeta) OperandValue::Pair(llarg, llmeta)
} else { } else {
bcx.set_value_name(llarg, &name);
OperandValue::Immediate(llarg) OperandValue::Immediate(llarg)
}; };
let operand = OperandRef { let operand = OperandRef {
@ -493,7 +503,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
}; };
return LocalRef::Operand(Some(operand.unpack_if_pair(bcx))); return LocalRef::Operand(Some(operand.unpack_if_pair(bcx)));
} else { } else {
let lltemp = LvalueRef::alloca(bcx, arg_ty, &format!("arg{}", arg_index)); let lltemp = LvalueRef::alloca(bcx, arg_ty, &name);
if common::type_is_fat_ptr(bcx.ccx, arg_ty) { if common::type_is_fat_ptr(bcx.ccx, arg_ty) {
// we pass fat pointers as two words, but we want to // we pass fat pointers as two words, but we want to
// represent them internally as a pointer to two words, // represent them internally as a pointer to two words,

View file

@ -13,7 +13,7 @@
#![crate_type = "lib"] #![crate_type = "lib"]
// Hack to get the correct size for the length part in slices // Hack to get the correct size for the length part in slices
// CHECK: @helper([[USIZE:i[0-9]+]]) // CHECK: @helper([[USIZE:i[0-9]+]] %arg0)
#[no_mangle] #[no_mangle]
fn helper(_: usize) { fn helper(_: usize) {
} }
@ -23,9 +23,9 @@ fn helper(_: usize) {
pub fn no_op_slice_adjustment(x: &[u8]) -> &[u8] { pub fn no_op_slice_adjustment(x: &[u8]) -> &[u8] {
// We used to generate an extra alloca and memcpy for the block's trailing expression value, so // We used to generate an extra alloca and memcpy for the block's trailing expression value, so
// check that we copy directly to the return value slot // check that we copy directly to the return value slot
// CHECK: %2 = insertvalue { i8*, [[USIZE]] } undef, i8* %0, 0 // CHECK: %0 = insertvalue { i8*, [[USIZE]] } undef, i8* %x.ptr, 0
// CHECK: %3 = insertvalue { i8*, [[USIZE]] } %2, [[USIZE]] %1, 1 // CHECK: %1 = insertvalue { i8*, [[USIZE]] } %0, [[USIZE]] %x.meta, 1
// CHECK: ret { i8*, [[USIZE]] } %3 // CHECK: ret { i8*, [[USIZE]] } %1
{ x } { x }
} }

View file

@ -42,7 +42,6 @@ pub fn align64(i : i32) -> Align64 {
#[no_mangle] #[no_mangle]
pub fn nested64(a: Align64, b: i32, c: i32, d: i8) -> Nested64 { pub fn nested64(a: Align64, b: i32, c: i32, d: i8) -> Nested64 {
// CHECK: %n64 = alloca %Nested64, align 64 // CHECK: %n64 = alloca %Nested64, align 64
// CHECK: %a = alloca %Align64, align 64
let n64 = Nested64 { a, b, c, d }; let n64 = Nested64 { a, b, c, d };
n64 n64
} }
@ -51,7 +50,6 @@ pub fn nested64(a: Align64, b: i32, c: i32, d: i8) -> Nested64 {
#[no_mangle] #[no_mangle]
pub fn enum64(a: Align64) -> Enum64 { pub fn enum64(a: Align64) -> Enum64 {
// CHECK: %e64 = alloca %Enum64, align 64 // CHECK: %e64 = alloca %Enum64, align 64
// CHECK: %a = alloca %Align64, align 64
let e64 = Enum64::A(a); let e64 = Enum64::A(a);
e64 e64
} }

View file

@ -60,27 +60,27 @@
#![crate_type = "lib"] #![crate_type = "lib"]
mod tests { mod tests {
// CHECK: @f1(i32 inreg, i32 inreg, i32) // CHECK: @f1(i32 inreg %arg0, i32 inreg %arg1, i32 %arg2)
#[no_mangle] #[no_mangle]
extern "fastcall" fn f1(_: i32, _: i32, _: i32) {} extern "fastcall" fn f1(_: i32, _: i32, _: i32) {}
// CHECK: @f2(i32* inreg, i32* inreg, i32*) // CHECK: @f2(i32* inreg %arg0, i32* inreg %arg1, i32* %arg2)
#[no_mangle] #[no_mangle]
extern "fastcall" fn f2(_: *const i32, _: *const i32, _: *const i32) {} extern "fastcall" fn f2(_: *const i32, _: *const i32, _: *const i32) {}
// CHECK: @f3(float, i32 inreg, i32 inreg, i32) // CHECK: @f3(float %arg0, i32 inreg %arg1, i32 inreg %arg2, i32 %arg3)
#[no_mangle] #[no_mangle]
extern "fastcall" fn f3(_: f32, _: i32, _: i32, _: i32) {} extern "fastcall" fn f3(_: f32, _: i32, _: i32, _: i32) {}
// CHECK: @f4(i32 inreg, float, i32 inreg, i32) // CHECK: @f4(i32 inreg %arg0, float %arg1, i32 inreg %arg2, i32 %arg3)
#[no_mangle] #[no_mangle]
extern "fastcall" fn f4(_: i32, _: f32, _: i32, _: i32) {} extern "fastcall" fn f4(_: i32, _: f32, _: i32, _: i32) {}
// CHECK: @f5(i64, i32) // CHECK: @f5(i64 %arg0, i32 %arg1)
#[no_mangle] #[no_mangle]
extern "fastcall" fn f5(_: i64, _: i32) {} extern "fastcall" fn f5(_: i64, _: i32) {}
// CHECK: @f6(i1 inreg zeroext, i32 inreg, i32) // CHECK: @f6(i1 inreg zeroext %arg0, i32 inreg %arg1, i32 %arg2)
#[no_mangle] #[no_mangle]
extern "fastcall" fn f6(_: bool, _: i32, _: i32) {} extern "fastcall" fn f6(_: bool, _: i32, _: i32) {}
} }

View file

@ -21,62 +21,62 @@ pub struct UnsafeInner {
_field: std::cell::UnsafeCell<i16>, _field: std::cell::UnsafeCell<i16>,
} }
// CHECK: zeroext i1 @boolean(i1 zeroext) // CHECK: zeroext i1 @boolean(i1 zeroext %x)
#[no_mangle] #[no_mangle]
pub fn boolean(x: bool) -> bool { pub fn boolean(x: bool) -> bool {
x x
} }
// CHECK: @readonly_borrow(i32* noalias readonly dereferenceable(4)) // CHECK: @readonly_borrow(i32* noalias readonly dereferenceable(4) %arg0)
// FIXME #25759 This should also have `nocapture` // FIXME #25759 This should also have `nocapture`
#[no_mangle] #[no_mangle]
pub fn readonly_borrow(_: &i32) { pub fn readonly_borrow(_: &i32) {
} }
// CHECK: @static_borrow(i32* noalias readonly dereferenceable(4)) // CHECK: @static_borrow(i32* noalias readonly dereferenceable(4) %arg0)
// static borrow may be captured // static borrow may be captured
#[no_mangle] #[no_mangle]
pub fn static_borrow(_: &'static i32) { pub fn static_borrow(_: &'static i32) {
} }
// CHECK: @named_borrow(i32* noalias readonly dereferenceable(4)) // CHECK: @named_borrow(i32* noalias readonly dereferenceable(4) %arg0)
// borrow with named lifetime may be captured // borrow with named lifetime may be captured
#[no_mangle] #[no_mangle]
pub fn named_borrow<'r>(_: &'r i32) { pub fn named_borrow<'r>(_: &'r i32) {
} }
// CHECK: @unsafe_borrow(%UnsafeInner* dereferenceable(2)) // CHECK: @unsafe_borrow(%UnsafeInner* dereferenceable(2) %arg0)
// unsafe interior means this isn't actually readonly and there may be aliases ... // unsafe interior means this isn't actually readonly and there may be aliases ...
#[no_mangle] #[no_mangle]
pub fn unsafe_borrow(_: &UnsafeInner) { pub fn unsafe_borrow(_: &UnsafeInner) {
} }
// CHECK: @mutable_unsafe_borrow(%UnsafeInner* dereferenceable(2)) // CHECK: @mutable_unsafe_borrow(%UnsafeInner* dereferenceable(2) %arg0)
// ... unless this is a mutable borrow, those never alias // ... unless this is a mutable borrow, those never alias
// ... except that there's this LLVM bug that forces us to not use noalias, see #29485 // ... except that there's this LLVM bug that forces us to not use noalias, see #29485
#[no_mangle] #[no_mangle]
pub fn mutable_unsafe_borrow(_: &mut UnsafeInner) { pub fn mutable_unsafe_borrow(_: &mut UnsafeInner) {
} }
// CHECK: @mutable_borrow(i32* dereferenceable(4)) // CHECK: @mutable_borrow(i32* dereferenceable(4) %arg0)
// FIXME #25759 This should also have `nocapture` // FIXME #25759 This should also have `nocapture`
// ... there's this LLVM bug that forces us to not use noalias, see #29485 // ... there's this LLVM bug that forces us to not use noalias, see #29485
#[no_mangle] #[no_mangle]
pub fn mutable_borrow(_: &mut i32) { pub fn mutable_borrow(_: &mut i32) {
} }
// CHECK: @indirect_struct(%S* noalias nocapture dereferenceable(32)) // CHECK: @indirect_struct(%S* noalias nocapture dereferenceable(32) %arg0)
#[no_mangle] #[no_mangle]
pub fn indirect_struct(_: S) { pub fn indirect_struct(_: S) {
} }
// CHECK: @borrowed_struct(%S* noalias readonly dereferenceable(32)) // CHECK: @borrowed_struct(%S* noalias readonly dereferenceable(32) %arg0)
// FIXME #25759 This should also have `nocapture` // FIXME #25759 This should also have `nocapture`
#[no_mangle] #[no_mangle]
pub fn borrowed_struct(_: &S) { pub fn borrowed_struct(_: &S) {
} }
// CHECK: noalias dereferenceable(4) i32* @_box(i32* noalias dereferenceable(4)) // CHECK: noalias dereferenceable(4) i32* @_box(i32* noalias dereferenceable(4) %x)
#[no_mangle] #[no_mangle]
pub fn _box(x: Box<i32>) -> Box<i32> { pub fn _box(x: Box<i32>) -> Box<i32> {
x x
@ -91,31 +91,31 @@ pub fn struct_return() -> S {
} }
// Hack to get the correct size for the length part in slices // Hack to get the correct size for the length part in slices
// CHECK: @helper([[USIZE:i[0-9]+]]) // CHECK: @helper([[USIZE:i[0-9]+]] %arg0)
#[no_mangle] #[no_mangle]
fn helper(_: usize) { fn helper(_: usize) {
} }
// CHECK: @slice(i8* noalias nonnull readonly, [[USIZE]]) // CHECK: @slice(i8* noalias nonnull readonly %arg0.ptr, [[USIZE]] %arg0.meta)
// FIXME #25759 This should also have `nocapture` // FIXME #25759 This should also have `nocapture`
#[no_mangle] #[no_mangle]
fn slice(_: &[u8]) { fn slice(_: &[u8]) {
} }
// CHECK: @mutable_slice(i8* nonnull, [[USIZE]]) // CHECK: @mutable_slice(i8* nonnull %arg0.ptr, [[USIZE]] %arg0.meta)
// FIXME #25759 This should also have `nocapture` // FIXME #25759 This should also have `nocapture`
// ... there's this LLVM bug that forces us to not use noalias, see #29485 // ... there's this LLVM bug that forces us to not use noalias, see #29485
#[no_mangle] #[no_mangle]
fn mutable_slice(_: &mut [u8]) { fn mutable_slice(_: &mut [u8]) {
} }
// CHECK: @unsafe_slice(%UnsafeInner* nonnull, [[USIZE]]) // CHECK: @unsafe_slice(%UnsafeInner* nonnull %arg0.ptr, [[USIZE]] %arg0.meta)
// unsafe interior means this isn't actually readonly and there may be aliases ... // unsafe interior means this isn't actually readonly and there may be aliases ...
#[no_mangle] #[no_mangle]
pub fn unsafe_slice(_: &[UnsafeInner]) { pub fn unsafe_slice(_: &[UnsafeInner]) {
} }
// CHECK: @str(i8* noalias nonnull readonly, [[USIZE]]) // CHECK: @str(i8* noalias nonnull readonly %arg0.ptr, [[USIZE]] %arg0.meta)
// FIXME #25759 This should also have `nocapture` // FIXME #25759 This should also have `nocapture`
#[no_mangle] #[no_mangle]
fn str(_: &[u8]) { fn str(_: &[u8]) {
@ -132,7 +132,7 @@ fn trait_borrow(_: &Drop) {
fn trait_box(_: Box<Drop>) { fn trait_box(_: Box<Drop>) {
} }
// CHECK: { i16*, [[USIZE]] } @return_slice(i16* noalias nonnull readonly, [[USIZE]]) // CHECK: { i16*, [[USIZE]] } @return_slice(i16* noalias nonnull readonly %x.ptr, [[USIZE]] %x.meta)
#[no_mangle] #[no_mangle]
fn return_slice(x: &[u16]) -> &[u16] { fn return_slice(x: &[u16]) -> &[u16] {
x x

View file

@ -24,6 +24,6 @@ pub struct Big {
// CHECK-LABEL: @test_mvi // CHECK-LABEL: @test_mvi
#[no_mangle] #[no_mangle]
pub unsafe fn test_mvi(target: *mut Big, make_big: fn() -> Big) { pub unsafe fn test_mvi(target: *mut Big, make_big: fn() -> Big) {
// CHECK: call void %1(%Big*{{[^%]*}} %0) // CHECK: call void %make_big(%Big*{{[^%]*}} %target)
move_val_init(target, make_big()); move_val_init(target, make_big());
} }

View file

@ -13,7 +13,7 @@
#![crate_type = "lib"] #![crate_type = "lib"]
// Hack to get the correct size for the length part in slices // Hack to get the correct size for the length part in slices
// CHECK: @helper([[USIZE:i[0-9]+]]) // CHECK: @helper([[USIZE:i[0-9]+]] %arg0)
#[no_mangle] #[no_mangle]
fn helper(_: usize) { fn helper(_: usize) {
} }
@ -24,9 +24,9 @@ pub fn ref_dst(s: &[u8]) {
// We used to generate an extra alloca and memcpy to ref the dst, so check that we copy // We used to generate an extra alloca and memcpy to ref the dst, so check that we copy
// directly to the alloca for "x" // directly to the alloca for "x"
// CHECK: [[X0:%[0-9]+]] = getelementptr {{.*}} { i8*, [[USIZE]] }* %x, i32 0, i32 0 // CHECK: [[X0:%[0-9]+]] = getelementptr {{.*}} { i8*, [[USIZE]] }* %x, i32 0, i32 0
// CHECK: store i8* %0, i8** [[X0]] // CHECK: store i8* %s.ptr, i8** [[X0]]
// CHECK: [[X1:%[0-9]+]] = getelementptr {{.*}} { i8*, [[USIZE]] }* %x, i32 0, i32 1 // CHECK: [[X1:%[0-9]+]] = getelementptr {{.*}} { i8*, [[USIZE]] }* %x, i32 0, i32 1
// CHECK: store [[USIZE]] %1, [[USIZE]]* [[X1]] // CHECK: store [[USIZE]] %s.meta, [[USIZE]]* [[X1]]
let x = &*s; let x = &*s;
&x; // keep variable in an alloca &x; // keep variable in an alloca

View file

@ -25,9 +25,9 @@ pub struct Bytes {
#[no_mangle] #[no_mangle]
pub fn small_array_alignment(x: &mut [i8; 4], y: [i8; 4]) { pub fn small_array_alignment(x: &mut [i8; 4], y: [i8; 4]) {
// CHECK: [[TMP:%.+]] = alloca i32 // CHECK: [[TMP:%.+]] = alloca i32
// CHECK: %arg1 = alloca [4 x i8] // CHECK: %y = alloca [4 x i8]
// CHECK: store i32 %1, i32* [[TMP]] // CHECK: store i32 %0, i32* [[TMP]]
// CHECK: [[Y8:%[0-9]+]] = bitcast [4 x i8]* %arg1 to i8* // CHECK: [[Y8:%[0-9]+]] = bitcast [4 x i8]* %y to i8*
// CHECK: [[TMP8:%[0-9]+]] = bitcast i32* [[TMP]] to i8* // CHECK: [[TMP8:%[0-9]+]] = bitcast i32* [[TMP]] to i8*
// CHECK: call void @llvm.memcpy.{{.*}}(i8* [[Y8]], i8* [[TMP8]], i{{[0-9]+}} 4, i32 1, i1 false) // CHECK: call void @llvm.memcpy.{{.*}}(i8* [[Y8]], i8* [[TMP8]], i{{[0-9]+}} 4, i32 1, i1 false)
*x = y; *x = y;
@ -39,9 +39,9 @@ pub fn small_array_alignment(x: &mut [i8; 4], y: [i8; 4]) {
#[no_mangle] #[no_mangle]
pub fn small_struct_alignment(x: &mut Bytes, y: Bytes) { pub fn small_struct_alignment(x: &mut Bytes, y: Bytes) {
// CHECK: [[TMP:%.+]] = alloca i32 // CHECK: [[TMP:%.+]] = alloca i32
// CHECK: %arg1 = alloca %Bytes // CHECK: %y = alloca %Bytes
// CHECK: store i32 %1, i32* [[TMP]] // CHECK: store i32 %0, i32* [[TMP]]
// CHECK: [[Y8:%[0-9]+]] = bitcast %Bytes* %arg1 to i8* // CHECK: [[Y8:%[0-9]+]] = bitcast %Bytes* %y to i8*
// CHECK: [[TMP8:%[0-9]+]] = bitcast i32* [[TMP]] to i8* // CHECK: [[TMP8:%[0-9]+]] = bitcast i32* [[TMP]] to i8*
// CHECK: call void @llvm.memcpy.{{.*}}(i8* [[Y8]], i8* [[TMP8]], i{{[0-9]+}} 4, i32 1, i1 false) // CHECK: call void @llvm.memcpy.{{.*}}(i8* [[Y8]], i8* [[TMP8]], i{{[0-9]+}} 4, i32 1, i1 false)
*x = y; *x = y;

View file

@ -19,13 +19,12 @@ fn main() { }
// START rustc.node4.CopyPropagation.before.mir // START rustc.node4.CopyPropagation.before.mir
// bb0: { // bb0: {
// ... // ...
// _2 = _1; // _3 = _1;
// ...
// _2 = _3;
// ... // ...
// _4 = _2; // _4 = _2;
// _3 = _4; // _0 = _4;
// ...
// _5 = _3;
// _0 = _5;
// ... // ...
// return; // return;
// } // }

View file

@ -26,8 +26,7 @@ fn main() {}
// ... // ...
// _2 = _1; // _2 = _1;
// ... // ...
// _3 = _2; // _0 = Baz { x: _2, y: const 0f32, z: const false };
// _0 = Baz { x: _3, y: const 0f32, z: const false };
// ... // ...
// return; // return;
// } // }
@ -37,8 +36,7 @@ fn main() {}
// ... // ...
// _2 = _1; // _2 = _1;
// ... // ...
// _3 = _2; // (_0.0: usize) = _2;
// (_0.0: usize) = _3;
// (_0.1: f32) = const 0f32; // (_0.1: f32) = const 0f32;
// (_0.2: bool) = const false; // (_0.2: bool) = const false;
// ... // ...

View file

@ -30,10 +30,7 @@ fn main() {
// bb0: { // bb0: {
// StorageLive(_2); // StorageLive(_2);
// _2 = _1; // _2 = _1;
// StorageLive(_3); // _0 = Baz::Foo { x: _2 };
// _3 = _2;
// _0 = Baz::Foo { x: _3 };
// StorageDead(_3);
// StorageDead(_2); // StorageDead(_2);
// return; // return;
// } // }
@ -42,11 +39,8 @@ fn main() {
// bb0: { // bb0: {
// StorageLive(_2); // StorageLive(_2);
// _2 = _1; // _2 = _1;
// StorageLive(_3); // ((_0 as Foo).0: usize) = _2;
// _3 = _2;
// ((_0 as Foo).0: usize) = _3;
// discriminant(_0) = 1; // discriminant(_0) = 1;
// StorageDead(_3);
// StorageDead(_2); // StorageDead(_2);
// return; // return;
// } // }

View file

@ -28,35 +28,35 @@ fn main() {}
// END RUST SOURCE // END RUST SOURCE
// START rustc.node12.Deaggregator.before.mir // START rustc.node12.Deaggregator.before.mir
// bb1: { // bb1: {
// StorageLive(_6); // StorageLive(_4);
// _6 = _4; // _4 = _2;
// _0 = Foo::A(_6,); // _0 = Foo::A(_4,);
// StorageDead(_6); // StorageDead(_4);
// goto -> bb3; // goto -> bb3;
// } // }
// bb2: { // bb2: {
// StorageLive(_7); // StorageLive(_5);
// _7 = _4; // _5 = _2;
// _0 = Foo::B(_7,); // _0 = Foo::B(_5,);
// StorageDead(_7); // StorageDead(_5);
// goto -> bb3; // goto -> bb3;
// } // }
// END rustc.node12.Deaggregator.before.mir // END rustc.node12.Deaggregator.before.mir
// START rustc.node12.Deaggregator.after.mir // START rustc.node12.Deaggregator.after.mir
// bb1: { // bb1: {
// StorageLive(_6); // StorageLive(_4);
// _6 = _4; // _4 = _2;
// ((_0 as A).0: i32) = _6; // ((_0 as A).0: i32) = _4;
// discriminant(_0) = 0; // discriminant(_0) = 0;
// StorageDead(_6); // StorageDead(_4);
// goto -> bb3; // goto -> bb3;
// } // }
// bb2: { // bb2: {
// StorageLive(_7); // StorageLive(_5);
// _7 = _4; // _5 = _2;
// ((_0 as B).0: i32) = _7; // ((_0 as B).0: i32) = _5;
// discriminant(_0) = 1; // discriminant(_0) = 1;
// StorageDead(_7); // StorageDead(_5);
// goto -> bb3; // goto -> bb3;
// } // }
// END rustc.node12.Deaggregator.after.mir // END rustc.node12.Deaggregator.after.mir

View file

@ -25,15 +25,14 @@ fn main() { }
// START rustc.node10.Deaggregator.before.mir // START rustc.node10.Deaggregator.before.mir
// bb0: { // bb0: {
// ... // ...
// _2 = _1; // _3 = _1;
// ... // ...
// _4 = _2; // _2 = Foo::A(_3,);
// _3 = Foo::A(_4,);
// ... // ...
// _6 = _2; // _5 = _1;
// _5 = Foo::A(_6,); // _4 = Foo::A(_5,);
// ... // ...
// _0 = [_3, _5]; // _0 = [_2, _4];
// ... // ...
// return; // return;
// } // }
@ -41,17 +40,16 @@ fn main() { }
// START rustc.node10.Deaggregator.after.mir // START rustc.node10.Deaggregator.after.mir
// bb0: { // bb0: {
// ... // ...
// _2 = _1; // _3 = _1;
// ... // ...
// _4 = _2; // ((_2 as A).0: i32) = _3;
// ((_3 as A).0: i32) = _4; // discriminant(_2) = 0;
// discriminant(_3) = 0;
// ... // ...
// _6 = _2; // _5 = _1;
// ((_5 as A).0: i32) = _6; // ((_4 as A).0: i32) = _5;
// discriminant(_5) = 0; // discriminant(_4) = 0;
// ... // ...
// _0 = [_3, _5]; // _0 = [_2, _4];
// ... // ...
// return; // return;
// } // }

View file

@ -64,17 +64,14 @@ fn main() {
// bb0: { // bb0: {
// Validate(Acquire, [_1: &ReFree(DefId { krate: CrateNum(0), index: DefIndex(1:11) => validate_1[317d]::main[0]::{{closure}}[0] }, BrEnv) [closure@NodeId(50)], _2: &ReFree(DefId { krate: CrateNum(0), index: DefIndex(1:11) => validate_1[317d]::main[0]::{{closure}}[0] }, BrAnon(1)) mut i32]); // Validate(Acquire, [_1: &ReFree(DefId { krate: CrateNum(0), index: DefIndex(1:11) => validate_1[317d]::main[0]::{{closure}}[0] }, BrEnv) [closure@NodeId(50)], _2: &ReFree(DefId { krate: CrateNum(0), index: DefIndex(1:11) => validate_1[317d]::main[0]::{{closure}}[0] }, BrAnon(1)) mut i32]);
// StorageLive(_3); // StorageLive(_3);
// _3 = _2; // Validate(Suspend(ReScope(Remainder(BlockRemainder { block: ItemLocalId(22), first_statement_index: 0 }))), [(*_2): i32]);
// _3 = &ReErased (*_2);
// Validate(Acquire, [(*_3): i32/ReScope(Remainder(BlockRemainder { block: ItemLocalId(22), first_statement_index: 0 })) (imm)]);
// StorageLive(_4); // StorageLive(_4);
// Validate(Suspend(ReScope(Remainder(BlockRemainder { block: ItemLocalId(22), first_statement_index: 0 }))), [(*_3): i32]); // _4 = (*_3);
// _4 = &ReErased (*_3); // _0 = _4;
// Validate(Acquire, [(*_4): i32/ReScope(Remainder(BlockRemainder { block: ItemLocalId(22), first_statement_index: 0 })) (imm)]);
// StorageLive(_5);
// _5 = (*_4);
// _0 = _5;
// StorageDead(_5);
// EndRegion(ReScope(Remainder(BlockRemainder { block: ItemLocalId(22), first_statement_index: 0 })));
// StorageDead(_4); // StorageDead(_4);
// EndRegion(ReScope(Remainder(BlockRemainder { block: ItemLocalId(22), first_statement_index: 0 })));
// StorageDead(_3); // StorageDead(_3);
// return; // return;
// } // }

View file

@ -53,10 +53,7 @@ fn main() {
// bb0: { // bb0: {
// Validate(Acquire, [_1: &ReFree(DefId { krate: CrateNum(0), index: DefIndex(1:9) => validate_4[317d]::write_42[0]::{{closure}}[0] }, BrEnv) [closure@NodeId(22)], _2: *mut i32]); // Validate(Acquire, [_1: &ReFree(DefId { krate: CrateNum(0), index: DefIndex(1:9) => validate_4[317d]::write_42[0]::{{closure}}[0] }, BrEnv) [closure@NodeId(22)], _2: *mut i32]);
// Validate(Release, [_1: &ReFree(DefId { krate: CrateNum(0), index: DefIndex(1:9) => validate_4[317d]::write_42[0]::{{closure}}[0] }, BrEnv) [closure@NodeId(22)], _2: *mut i32]); // Validate(Release, [_1: &ReFree(DefId { krate: CrateNum(0), index: DefIndex(1:9) => validate_4[317d]::write_42[0]::{{closure}}[0] }, BrEnv) [closure@NodeId(22)], _2: *mut i32]);
// StorageLive(_3); // (*_2) = const 23i32;
// _3 = _2;
// (*_3) = const 23i32;
// StorageDead(_3);
// return; // return;
// } // }
// } // }
@ -68,11 +65,11 @@ fn main() {
// Validate(Acquire, [_1: &ReFree(DefId { krate: CrateNum(0), index: DefIndex(0:4) => validate_4[317d]::test[0] }, BrAnon(0)) mut i32]); // Validate(Acquire, [_1: &ReFree(DefId { krate: CrateNum(0), index: DefIndex(0:4) => validate_4[317d]::test[0] }, BrAnon(0)) mut i32]);
// Validate(Release, [_1: &ReFree(DefId { krate: CrateNum(0), index: DefIndex(0:4) => validate_4[317d]::test[0] }, BrAnon(0)) mut i32]); // Validate(Release, [_1: &ReFree(DefId { krate: CrateNum(0), index: DefIndex(0:4) => validate_4[317d]::test[0] }, BrAnon(0)) mut i32]);
// ... // ...
// _3 = const write_42(_4) -> bb1; // _2 = const write_42(_3) -> bb1;
// } // }
// bb1: { // bb1: {
// Validate(Acquire, [_3: bool]); // Validate(Acquire, [_2: bool]);
// Validate(Release, [_3: bool]); // Validate(Release, [_2: bool]);
// ... // ...
// } // }
// } // }
@ -85,7 +82,7 @@ fn main() {
// Validate(Release, [_1: &ReFree(DefId { krate: CrateNum(0), index: DefIndex(1:10) => validate_4[317d]::main[0]::{{closure}}[0] }, BrEnv) [closure@NodeId(60)], _2: &ReFree(DefId { krate: CrateNum(0), index: DefIndex(1:10) => validate_4[317d]::main[0]::{{closure}}[0] }, BrAnon(1)) mut i32]); // Validate(Release, [_1: &ReFree(DefId { krate: CrateNum(0), index: DefIndex(1:10) => validate_4[317d]::main[0]::{{closure}}[0] }, BrEnv) [closure@NodeId(60)], _2: &ReFree(DefId { krate: CrateNum(0), index: DefIndex(1:10) => validate_4[317d]::main[0]::{{closure}}[0] }, BrAnon(1)) mut i32]);
// StorageLive(_3); // StorageLive(_3);
// ... // ...
// _0 = const write_42(_4) -> bb1; // _0 = const write_42(_3) -> bb1;
// } // }
// ... // ...
// } // }

View file

@ -39,8 +39,8 @@ fn main() {
// bb0: { // bb0: {
// Validate(Acquire, [_1: &ReFree(DefId { krate: CrateNum(0), index: DefIndex(0:4) => validate_5[317d]::test[0] }, BrAnon(0)) mut i32]); // Validate(Acquire, [_1: &ReFree(DefId { krate: CrateNum(0), index: DefIndex(0:4) => validate_5[317d]::test[0] }, BrAnon(0)) mut i32]);
// ... // ...
// Validate(Release, [_3: bool, _4: *mut i32]); // Validate(Release, [_2: bool, _3: *mut i32]);
// _3 = const write_42(_4) -> bb1; // _2 = const write_42(_3) -> bb1;
// } // }
// ... // ...
// } // }
@ -51,17 +51,15 @@ fn main() {
// bb0: { // bb0: {
// Validate(Acquire, [_1: &ReFree(DefId { krate: CrateNum(0), index: DefIndex(1:9) => validate_5[317d]::main[0]::{{closure}}[0] }, BrEnv) [closure@NodeId(46)], _2: &ReFree(DefId { krate: CrateNum(0), index: DefIndex(1:9) => validate_5[317d]::main[0]::{{closure}}[0] }, BrAnon(1)) mut i32]); // Validate(Acquire, [_1: &ReFree(DefId { krate: CrateNum(0), index: DefIndex(1:9) => validate_5[317d]::main[0]::{{closure}}[0] }, BrEnv) [closure@NodeId(46)], _2: &ReFree(DefId { krate: CrateNum(0), index: DefIndex(1:9) => validate_5[317d]::main[0]::{{closure}}[0] }, BrAnon(1)) mut i32]);
// StorageLive(_3); // StorageLive(_3);
// _3 = _2;
// StorageLive(_4); // StorageLive(_4);
// StorageLive(_5); // Validate(Suspend(ReScope(Node(ItemLocalId(9)))), [(*_2): i32]);
// Validate(Suspend(ReScope(Node(ItemLocalId(9)))), [(*_3): i32]); // _4 = &ReErased mut (*_2);
// _5 = &ReErased mut (*_3); // Validate(Acquire, [(*_4): i32/ReScope(Node(ItemLocalId(9)))]);
// Validate(Acquire, [(*_5): i32/ReScope(Node(ItemLocalId(9)))]); // _3 = _4 as *mut i32 (Misc);
// _4 = _5 as *mut i32 (Misc);
// EndRegion(ReScope(Node(ItemLocalId(9)))); // EndRegion(ReScope(Node(ItemLocalId(9))));
// StorageDead(_5); // StorageDead(_4);
// Validate(Release, [_0: bool, _4: *mut i32]); // Validate(Release, [_0: bool, _3: *mut i32]);
// _0 = const write_42(_4) -> bb1; // _0 = const write_42(_3) -> bb1;
// } // }
// ... // ...
// } // }