1
Fork 0

Fix transmute::<T, U> where T requires a bigger alignment than U

For transmute::<T, U> we simply pointercast the destination from a U
pointer to a T pointer, without providing any alignment information,
thus LLVM assumes that the destination is aligned to hold a value of
type T, which is not necessarily true. This can lead to LLVM emitting
machine instructions that assume said alignment, and thus cause aborts.

To fix this, we need to provide the actual alignment to store_operand()
and in turn to store() so they can set the proper alignment information
on the stores and LLVM can emit the proper machine instructions.

Fixes #32947
This commit is contained in:
Björn Steinbrink 2016-12-29 02:20:26 +01:00
parent 82801b552e
commit 71a11a0b10
11 changed files with 97 additions and 65 deletions

View file

@ -248,11 +248,8 @@ impl ArgType {
let can_store_through_cast_ptr = false; let can_store_through_cast_ptr = false;
if can_store_through_cast_ptr { if can_store_through_cast_ptr {
let cast_dst = bcx.pointercast(dst, ty.ptr_to()); let cast_dst = bcx.pointercast(dst, ty.ptr_to());
let store = bcx.store(val, cast_dst);
let llalign = llalign_of_min(ccx, self.ty); let llalign = llalign_of_min(ccx, self.ty);
unsafe { bcx.store(val, cast_dst, Some(llalign));
llvm::LLVMSetAlignment(store, llalign);
}
} else { } else {
// The actual return type is a struct, but the ABI // The actual return type is a struct, but the ABI
// adaptation code has cast it into some scalar type. The // adaptation code has cast it into some scalar type. The
@ -273,7 +270,7 @@ impl ArgType {
base::Lifetime::Start.call(bcx, llscratch); base::Lifetime::Start.call(bcx, llscratch);
// ...where we first store the value... // ...where we first store the value...
bcx.store(val, llscratch); bcx.store(val, llscratch, None);
// ...and then memcpy it to the intended destination. // ...and then memcpy it to the intended destination.
base::call_memcpy(bcx, base::call_memcpy(bcx,
@ -289,7 +286,7 @@ impl ArgType {
if self.original_ty == Type::i1(ccx) { if self.original_ty == Type::i1(ccx) {
val = bcx.zext(val, Type::i8(ccx)); val = bcx.zext(val, Type::i8(ccx));
} }
bcx.store(val, dst); bcx.store(val, dst, None);
} }
} }

View file

@ -443,11 +443,11 @@ pub fn trans_set_discr<'a, 'tcx>(
layout::CEnum{ discr, min, max, .. } => { layout::CEnum{ discr, min, max, .. } => {
assert_discr_in_range(Disr(min), Disr(max), to); assert_discr_in_range(Disr(min), Disr(max), to);
bcx.store(C_integral(Type::from_integer(bcx.ccx, discr), to.0, true), bcx.store(C_integral(Type::from_integer(bcx.ccx, discr), to.0, true),
val); val, None);
} }
layout::General{ discr, .. } => { layout::General{ discr, .. } => {
bcx.store(C_integral(Type::from_integer(bcx.ccx, discr), to.0, true), bcx.store(C_integral(Type::from_integer(bcx.ccx, discr), to.0, true),
bcx.struct_gep(val, 0)); bcx.struct_gep(val, 0), None);
} }
layout::Univariant { .. } layout::Univariant { .. }
| layout::UntaggedUnion { .. } | layout::UntaggedUnion { .. }
@ -458,7 +458,7 @@ pub fn trans_set_discr<'a, 'tcx>(
let nnty = compute_fields(bcx.ccx, t, nndiscr as usize, false)[0]; let nnty = compute_fields(bcx.ccx, t, nndiscr as usize, false)[0];
if to.0 != nndiscr { if to.0 != nndiscr {
let llptrty = type_of::sizing_type_of(bcx.ccx, nnty); let llptrty = type_of::sizing_type_of(bcx.ccx, nnty);
bcx.store(C_null(llptrty), val); bcx.store(C_null(llptrty), val, None);
} }
} }
layout::StructWrappedNullablePointer { nndiscr, ref discrfield, ref nonnull, .. } => { layout::StructWrappedNullablePointer { nndiscr, ref discrfield, ref nonnull, .. } => {
@ -476,7 +476,7 @@ pub fn trans_set_discr<'a, 'tcx>(
let path = discrfield.iter().map(|&i| i as usize).collect::<Vec<_>>(); let path = discrfield.iter().map(|&i| i as usize).collect::<Vec<_>>();
let llptrptr = bcx.gepi(val, &path[..]); let llptrptr = bcx.gepi(val, &path[..]);
let llptrty = val_ty(llptrptr).element_type(); let llptrty = val_ty(llptrptr).element_type();
bcx.store(C_null(llptrty), llptrptr); bcx.store(C_null(llptrty), llptrptr, None);
} }
} }
} }

View file

@ -105,7 +105,7 @@ pub fn trans_inline_asm<'a, 'tcx>(
let outputs = ia.outputs.iter().zip(&outputs).filter(|&(ref o, _)| !o.is_indirect); let outputs = ia.outputs.iter().zip(&outputs).filter(|&(ref o, _)| !o.is_indirect);
for (i, (_, &(val, _))) in outputs.enumerate() { for (i, (_, &(val, _))) in outputs.enumerate() {
let v = if num_outputs == 1 { r } else { bcx.extract_value(r, i) }; let v = if num_outputs == 1 { r } else { bcx.extract_value(r, i) };
bcx.store(v, val); bcx.store(v, val, None);
} }
// Store expn_id in a metadata node so we can map LLVM errors // Store expn_id in a metadata node so we can map LLVM errors

View file

@ -290,7 +290,7 @@ pub fn coerce_unsized_into<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>,
let src_f = adt::trans_field_ptr(bcx, src_ty, src, Disr(0), i); let src_f = adt::trans_field_ptr(bcx, src_ty, src, Disr(0), i);
let dst_f = adt::trans_field_ptr(bcx, dst_ty, dst, Disr(0), i); let dst_f = adt::trans_field_ptr(bcx, dst_ty, dst, Disr(0), i);
if src_fty == dst_fty { if src_fty == dst_fty {
memcpy_ty(bcx, dst_f, src_f, src_fty); memcpy_ty(bcx, dst_f, src_f, src_fty, None);
} else { } else {
coerce_unsized_into(bcx, src_f, src_fty, dst_f, dst_fty); coerce_unsized_into(bcx, src_f, src_fty, dst_f, dst_fty);
} }
@ -429,7 +429,7 @@ pub fn store_ty<'a, 'tcx>(cx: &BlockAndBuilder<'a, 'tcx>, v: ValueRef, dst: Valu
let llextra = cx.extract_value(v, abi::FAT_PTR_EXTRA); let llextra = cx.extract_value(v, abi::FAT_PTR_EXTRA);
store_fat_ptr(cx, lladdr, llextra, dst, t); store_fat_ptr(cx, lladdr, llextra, dst, t);
} else { } else {
cx.store(from_immediate(cx, v), dst); cx.store(from_immediate(cx, v), dst, None);
} }
} }
@ -439,8 +439,8 @@ pub fn store_fat_ptr<'a, 'tcx>(cx: &BlockAndBuilder<'a, 'tcx>,
dst: ValueRef, dst: ValueRef,
_ty: Ty<'tcx>) { _ty: Ty<'tcx>) {
// FIXME: emit metadata // FIXME: emit metadata
cx.store(data, get_dataptr(cx, dst)); cx.store(data, get_dataptr(cx, dst), None);
cx.store(extra, get_meta(cx, dst)); cx.store(extra, get_meta(cx, dst), None);
} }
pub fn load_fat_ptr<'a, 'tcx>( pub fn load_fat_ptr<'a, 'tcx>(
@ -523,26 +523,21 @@ pub fn call_memcpy<'a, 'tcx>(b: &Builder<'a, 'tcx>,
b.call(memcpy, &[dst_ptr, src_ptr, size, align, volatile], None); b.call(memcpy, &[dst_ptr, src_ptr, size, align, volatile], None);
} }
pub fn memcpy_ty<'a, 'tcx>( pub fn memcpy_ty<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>,
bcx: &BlockAndBuilder<'a, 'tcx>, dst: ValueRef, src: ValueRef, t: Ty<'tcx> dst: ValueRef,
) { src: ValueRef,
t: Ty<'tcx>,
align: Option<u32>) {
let ccx = bcx.ccx; let ccx = bcx.ccx;
if type_is_zero_size(ccx, t) { if type_is_zero_size(ccx, t) {
return; return;
} }
if t.is_structural() {
let llty = type_of::type_of(ccx, t); let llty = type_of::type_of(ccx, t);
let llsz = llsize_of(ccx, llty); let llsz = llsize_of(ccx, llty);
let llalign = type_of::align_of(ccx, t); let llalign = align.unwrap_or_else(|| type_of::align_of(ccx, t));
call_memcpy(bcx, dst, src, llsz, llalign as u32); call_memcpy(bcx, dst, src, llsz, llalign as u32);
} else if common::type_is_fat_ptr(bcx.ccx, t) {
let (data, extra) = load_fat_ptr(bcx, src, t);
store_fat_ptr(bcx, data, extra, dst, t);
} else {
store_ty(bcx, load_ty(bcx, src, t), dst, t);
}
} }
pub fn call_memset<'a, 'tcx>(b: &Builder<'a, 'tcx>, pub fn call_memset<'a, 'tcx>(b: &Builder<'a, 'tcx>,

View file

@ -512,13 +512,17 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
value value
} }
pub fn store(&self, val: ValueRef, ptr: ValueRef) -> ValueRef { pub fn store(&self, val: ValueRef, ptr: ValueRef, align: Option<u32>) -> ValueRef {
debug!("Store {:?} -> {:?}", Value(val), Value(ptr)); debug!("Store {:?} -> {:?}", Value(val), Value(ptr));
assert!(!self.llbuilder.is_null()); assert!(!self.llbuilder.is_null());
self.count_insn("store"); self.count_insn("store");
let ptr = self.check_store(val, ptr); let ptr = self.check_store(val, ptr);
unsafe { unsafe {
llvm::LLVMBuildStore(self.llbuilder, val, ptr) let store = llvm::LLVMBuildStore(self.llbuilder, val, ptr);
if let Some(align) = align {
llvm::LLVMSetAlignment(store, align as c_uint);
}
store
} }
} }

View file

@ -288,8 +288,8 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>,
let val = bcx.call(llfn, &[llargs[0], llargs[1]], None); let val = bcx.call(llfn, &[llargs[0], llargs[1]], None);
let result = bcx.extract_value(val, 0); let result = bcx.extract_value(val, 0);
let overflow = bcx.zext(bcx.extract_value(val, 1), Type::bool(ccx)); let overflow = bcx.zext(bcx.extract_value(val, 1), Type::bool(ccx));
bcx.store(result, bcx.struct_gep(llresult, 0)); bcx.store(result, bcx.struct_gep(llresult, 0), None);
bcx.store(overflow, bcx.struct_gep(llresult, 1)); bcx.store(overflow, bcx.struct_gep(llresult, 1), None);
C_nil(bcx.ccx) C_nil(bcx.ccx)
}, },
@ -407,8 +407,8 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>,
failorder, weak); failorder, weak);
let result = bcx.extract_value(val, 0); let result = bcx.extract_value(val, 0);
let success = bcx.zext(bcx.extract_value(val, 1), Type::bool(bcx.ccx)); let success = bcx.zext(bcx.extract_value(val, 1), Type::bool(bcx.ccx));
bcx.store(result, bcx.struct_gep(llresult, 0)); bcx.store(result, bcx.struct_gep(llresult, 0), None);
bcx.store(success, bcx.struct_gep(llresult, 1)); bcx.store(success, bcx.struct_gep(llresult, 1), None);
} else { } else {
invalid_monomorphization(sty); invalid_monomorphization(sty);
} }
@ -613,7 +613,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>,
for i in 0..elems.len() { for i in 0..elems.len() {
let val = bcx.extract_value(val, i); let val = bcx.extract_value(val, i);
bcx.store(val, bcx.struct_gep(llresult, i)); bcx.store(val, bcx.struct_gep(llresult, i), None);
} }
C_nil(ccx) C_nil(ccx)
} }
@ -625,10 +625,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>,
if val_ty(llval) != Type::void(ccx) && machine::llsize_of_alloc(ccx, val_ty(llval)) != 0 { if val_ty(llval) != Type::void(ccx) && machine::llsize_of_alloc(ccx, val_ty(llval)) != 0 {
if let Some(ty) = fn_ty.ret.cast { if let Some(ty) = fn_ty.ret.cast {
let ptr = bcx.pointercast(llresult, ty.ptr_to()); let ptr = bcx.pointercast(llresult, ty.ptr_to());
let store = bcx.store(llval, ptr); bcx.store(llval, ptr, Some(type_of::align_of(ccx, ret_ty)));
unsafe {
llvm::LLVMSetAlignment(store, type_of::align_of(ccx, ret_ty));
}
} else { } else {
store_ty(bcx, llval, llresult, ret_ty); store_ty(bcx, llval, llresult, ret_ty);
} }
@ -695,7 +692,7 @@ fn try_intrinsic<'a, 'tcx>(
) { ) {
if bcx.sess().no_landing_pads() { if bcx.sess().no_landing_pads() {
bcx.call(func, &[data], None); bcx.call(func, &[data], None);
bcx.store(C_null(Type::i8p(&bcx.ccx)), dest); bcx.store(C_null(Type::i8p(&bcx.ccx)), dest, None);
} else if wants_msvc_seh(bcx.sess()) { } else if wants_msvc_seh(bcx.sess()) {
trans_msvc_try(bcx, func, data, local_ptr, dest); trans_msvc_try(bcx, func, data, local_ptr, dest);
} else { } else {
@ -789,8 +786,8 @@ fn trans_msvc_try<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>,
let val1 = C_i32(ccx, 1); let val1 = C_i32(ccx, 1);
let arg2 = catchpad.load(catchpad.inbounds_gep(addr, &[val1])); let arg2 = catchpad.load(catchpad.inbounds_gep(addr, &[val1]));
let local_ptr = catchpad.bitcast(local_ptr, i64p); let local_ptr = catchpad.bitcast(local_ptr, i64p);
catchpad.store(arg1, local_ptr); catchpad.store(arg1, local_ptr, None);
catchpad.store(arg2, catchpad.inbounds_gep(local_ptr, &[val1])); catchpad.store(arg2, catchpad.inbounds_gep(local_ptr, &[val1]), None);
catchpad.catch_ret(tok, caught.llbb()); catchpad.catch_ret(tok, caught.llbb());
caught.ret(C_i32(ccx, 1)); caught.ret(C_i32(ccx, 1));
@ -799,7 +796,7 @@ fn trans_msvc_try<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>,
// Note that no invoke is used here because by definition this function // Note that no invoke is used here because by definition this function
// can't panic (that's what it's catching). // can't panic (that's what it's catching).
let ret = bcx.call(llfn, &[func, data, local_ptr], None); let ret = bcx.call(llfn, &[func, data, local_ptr], None);
bcx.store(ret, dest); bcx.store(ret, dest, None);
} }
// Definition of the standard "try" function for Rust using the GNU-like model // Definition of the standard "try" function for Rust using the GNU-like model
@ -858,14 +855,14 @@ fn trans_gnu_try<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>,
let vals = catch.landing_pad(lpad_ty, bcx.ccx.eh_personality(), 1, catch.fcx().llfn); let vals = catch.landing_pad(lpad_ty, bcx.ccx.eh_personality(), 1, catch.fcx().llfn);
catch.add_clause(vals, C_null(Type::i8p(ccx))); catch.add_clause(vals, C_null(Type::i8p(ccx)));
let ptr = catch.extract_value(vals, 0); let ptr = catch.extract_value(vals, 0);
catch.store(ptr, catch.bitcast(local_ptr, Type::i8p(ccx).ptr_to())); catch.store(ptr, catch.bitcast(local_ptr, Type::i8p(ccx).ptr_to()), None);
catch.ret(C_i32(ccx, 1)); catch.ret(C_i32(ccx, 1));
}); });
// Note that no invoke is used here because by definition this function // Note that no invoke is used here because by definition this function
// can't panic (that's what it's catching). // can't panic (that's what it's catching).
let ret = bcx.call(llfn, &[func, data, local_ptr], None); let ret = bcx.call(llfn, &[func, data, local_ptr], None);
bcx.store(ret, dest); bcx.store(ret, dest, None);
} }
// Helper function to give a Block to a closure to translate a shim function. // Helper function to give a Block to a closure to translate a shim function.

View file

@ -23,7 +23,7 @@ use consts;
use Disr; use Disr;
use machine::{llalign_of_min, llbitsize_of_real}; use machine::{llalign_of_min, llbitsize_of_real};
use meth; use meth;
use type_of; use type_of::{self, align_of};
use glue; use glue;
use type_::Type; use type_::Type;
@ -31,6 +31,8 @@ use rustc_data_structures::indexed_vec::IndexVec;
use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::fx::FxHashMap;
use syntax::symbol::Symbol; use syntax::symbol::Symbol;
use std::cmp;
use super::{MirContext, LocalRef}; use super::{MirContext, LocalRef};
use super::analyze::CleanupKind; use super::analyze::CleanupKind;
use super::constant::Const; use super::constant::Const;
@ -207,7 +209,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
let llslot = match op.val { let llslot = match op.val {
Immediate(_) | Pair(..) => { Immediate(_) | Pair(..) => {
let llscratch = bcx.fcx().alloca(ret.original_ty, "ret"); let llscratch = bcx.fcx().alloca(ret.original_ty, "ret");
self.store_operand(&bcx, llscratch, op); self.store_operand(&bcx, llscratch, op, None);
llscratch llscratch
} }
Ref(llval) => llval Ref(llval) => llval
@ -426,7 +428,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
// The first argument is a thin destination pointer. // The first argument is a thin destination pointer.
let llptr = self.trans_operand(&bcx, &args[0]).immediate(); let llptr = self.trans_operand(&bcx, &args[0]).immediate();
let val = self.trans_operand(&bcx, &args[1]); let val = self.trans_operand(&bcx, &args[1]);
self.store_operand(&bcx, llptr, val); self.store_operand(&bcx, llptr, val, None);
funclet_br(self, bcx, target); funclet_br(self, bcx, target);
return; return;
} }
@ -659,7 +661,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
Immediate(_) | Pair(..) => { Immediate(_) | Pair(..) => {
if arg.is_indirect() || arg.cast.is_some() { if arg.is_indirect() || arg.cast.is_some() {
let llscratch = bcx.fcx().alloca(arg.original_ty, "arg"); let llscratch = bcx.fcx().alloca(arg.original_ty, "arg");
self.store_operand(bcx, llscratch, op); self.store_operand(bcx, llscratch, op, None);
(llscratch, true) (llscratch, true)
} else { } else {
(op.pack_if_pair(bcx).immediate(), false) (op.pack_if_pair(bcx).immediate(), false)
@ -801,7 +803,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
let llretval = bcx.landing_pad(llretty, llpersonality, 1, self.fcx.llfn); let llretval = bcx.landing_pad(llretty, llpersonality, 1, self.fcx.llfn);
bcx.set_cleanup(llretval); bcx.set_cleanup(llretval);
let slot = self.get_personality_slot(&bcx); let slot = self.get_personality_slot(&bcx);
bcx.store(llretval, slot); bcx.store(llretval, slot, None);
bcx.br(target.llbb()); bcx.br(target.llbb());
bcx.llbb() bcx.llbb()
} }
@ -886,7 +888,10 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
let llty = type_of::type_of(bcx.ccx, val.ty); let llty = type_of::type_of(bcx.ccx, val.ty);
let cast_ptr = bcx.pointercast(dst.llval, llty.ptr_to()); let cast_ptr = bcx.pointercast(dst.llval, llty.ptr_to());
self.store_operand(bcx, cast_ptr, val); let in_type = val.ty;
let out_type = dst.ty.to_ty(bcx.tcx());;
let llalign = cmp::min(align_of(bcx.ccx, in_type), align_of(bcx.ccx, out_type));
self.store_operand(bcx, cast_ptr, val, Some(llalign));
} }

View file

@ -514,7 +514,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>,
// environment into its components so it ends up out of bounds. // environment into its components so it ends up out of bounds.
let env_ptr = if !env_ref { let env_ptr = if !env_ref {
let alloc = bcx.fcx().alloca(common::val_ty(llval), "__debuginfo_env_ptr"); let alloc = bcx.fcx().alloca(common::val_ty(llval), "__debuginfo_env_ptr");
bcx.store(llval, alloc); bcx.store(llval, alloc, None);
alloc alloc
} else { } else {
llval llval

View file

@ -244,21 +244,24 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
pub fn store_operand(&mut self, pub fn store_operand(&mut self,
bcx: &BlockAndBuilder<'a, 'tcx>, bcx: &BlockAndBuilder<'a, 'tcx>,
lldest: ValueRef, lldest: ValueRef,
operand: OperandRef<'tcx>) { operand: OperandRef<'tcx>,
debug!("store_operand: operand={:?}", operand); align: Option<u32>) {
debug!("store_operand: operand={:?}, align={:?}", operand, align);
// Avoid generating stores of zero-sized values, because the only way to have a zero-sized // Avoid generating stores of zero-sized values, because the only way to have a zero-sized
// value is through `undef`, and store itself is useless. // value is through `undef`, and store itself is useless.
if common::type_is_zero_size(bcx.ccx, operand.ty) { if common::type_is_zero_size(bcx.ccx, operand.ty) {
return; return;
} }
match operand.val { match operand.val {
OperandValue::Ref(r) => base::memcpy_ty(bcx, lldest, r, operand.ty), OperandValue::Ref(r) => base::memcpy_ty(bcx, lldest, r, operand.ty, align),
OperandValue::Immediate(s) => base::store_ty(bcx, s, lldest, operand.ty), OperandValue::Immediate(s) => {
bcx.store(base::from_immediate(bcx, s), lldest, align);
}
OperandValue::Pair(a, b) => { OperandValue::Pair(a, b) => {
let a = base::from_immediate(bcx, a); let a = base::from_immediate(bcx, a);
let b = base::from_immediate(bcx, b); let b = base::from_immediate(bcx, b);
bcx.store(a, bcx.struct_gep(lldest, 0)); bcx.store(a, bcx.struct_gep(lldest, 0), align);
bcx.store(b, bcx.struct_gep(lldest, 1)); bcx.store(b, bcx.struct_gep(lldest, 1), align);
} }
} }
} }

View file

@ -48,7 +48,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
let tr_operand = self.trans_operand(&bcx, operand); let tr_operand = self.trans_operand(&bcx, operand);
// FIXME: consider not copying constants through stack. (fixable by translating // FIXME: consider not copying constants through stack. (fixable by translating
// constants into OperandValue::Ref, why dont we do that yet if we dont?) // constants into OperandValue::Ref, why dont we do that yet if we dont?)
self.store_operand(&bcx, dest.llval, tr_operand); self.store_operand(&bcx, dest.llval, tr_operand, None);
bcx bcx
} }
@ -59,7 +59,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
// into-coerce of a thin pointer to a fat pointer - just // into-coerce of a thin pointer to a fat pointer - just
// use the operand path. // use the operand path.
let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue); let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue);
self.store_operand(&bcx, dest.llval, temp); self.store_operand(&bcx, dest.llval, temp, None);
return bcx; return bcx;
} }
@ -95,7 +95,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
let size = C_uint(bcx.ccx, size); let size = C_uint(bcx.ccx, size);
let base = base::get_dataptr(&bcx, dest.llval); let base = base::get_dataptr(&bcx, dest.llval);
tvec::slice_for_each(&bcx, base, tr_elem.ty, size, |bcx, llslot| { tvec::slice_for_each(&bcx, base, tr_elem.ty, size, |bcx, llslot| {
self.store_operand(bcx, llslot, tr_elem); self.store_operand(bcx, llslot, tr_elem, None);
}) })
} }
@ -113,7 +113,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
let field_index = active_field_index.unwrap_or(i); let field_index = active_field_index.unwrap_or(i);
let lldest_i = adt::trans_field_ptr(&bcx, dest_ty, val, disr, let lldest_i = adt::trans_field_ptr(&bcx, dest_ty, val, disr,
field_index); field_index);
self.store_operand(&bcx, lldest_i, op); self.store_operand(&bcx, lldest_i, op, None);
} }
} }
}, },
@ -138,7 +138,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
i i
}; };
let dest = bcx.gepi(dest.llval, &[0, i]); let dest = bcx.gepi(dest.llval, &[0, i]);
self.store_operand(&bcx, dest, op); self.store_operand(&bcx, dest, op, None);
} }
} }
} }
@ -163,7 +163,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
_ => { _ => {
assert!(rvalue_creates_operand(rvalue)); assert!(rvalue_creates_operand(rvalue));
let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue); let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue);
self.store_operand(&bcx, dest.llval, temp); self.store_operand(&bcx, dest.llval, temp, None);
bcx bcx
} }
} }

View file

@ -0,0 +1,31 @@
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(repr_simd, test)]
extern crate test;
#[repr(simd)]
pub struct Mu64(pub u64, pub u64, pub u64, pub u64);
fn main() {
// This ensures an unaligned pointer even in optimized builds, though LLVM
// gets enough type information to actually not mess things up in that case,
// but at the time of writing this, it's enough to trigger the bug in
// non-optimized builds
unsafe {
let memory = &mut [0u64; 8] as *mut _ as *mut u8;
let misaligned_ptr: &mut [u8; 32] = {
std::mem::transmute(memory.offset(1))
};
*misaligned_ptr = std::mem::transmute(Mu64(1, 1, 1, 1));
test::black_box(memory);
}
}