rustc_trans: compute better align/dereferenceable attributes from pointees.
This commit is contained in:
parent
ced5e04e8b
commit
f8d5d0c30c
5 changed files with 242 additions and 128 deletions
|
@ -730,7 +730,9 @@ extern "C" {
|
||||||
FunctionTy: TypeRef)
|
FunctionTy: TypeRef)
|
||||||
-> ValueRef;
|
-> ValueRef;
|
||||||
pub fn LLVMSetFunctionCallConv(Fn: ValueRef, CC: c_uint);
|
pub fn LLVMSetFunctionCallConv(Fn: ValueRef, CC: c_uint);
|
||||||
|
pub fn LLVMRustAddAlignmentAttr(Fn: ValueRef, index: c_uint, bytes: u32);
|
||||||
pub fn LLVMRustAddDereferenceableAttr(Fn: ValueRef, index: c_uint, bytes: u64);
|
pub fn LLVMRustAddDereferenceableAttr(Fn: ValueRef, index: c_uint, bytes: u64);
|
||||||
|
pub fn LLVMRustAddDereferenceableOrNullAttr(Fn: ValueRef, index: c_uint, bytes: u64);
|
||||||
pub fn LLVMRustAddFunctionAttribute(Fn: ValueRef, index: c_uint, attr: Attribute);
|
pub fn LLVMRustAddFunctionAttribute(Fn: ValueRef, index: c_uint, attr: Attribute);
|
||||||
pub fn LLVMRustAddFunctionAttrStringValue(Fn: ValueRef,
|
pub fn LLVMRustAddFunctionAttrStringValue(Fn: ValueRef,
|
||||||
index: c_uint,
|
index: c_uint,
|
||||||
|
@ -760,7 +762,11 @@ extern "C" {
|
||||||
// Operations on call sites
|
// Operations on call sites
|
||||||
pub fn LLVMSetInstructionCallConv(Instr: ValueRef, CC: c_uint);
|
pub fn LLVMSetInstructionCallConv(Instr: ValueRef, CC: c_uint);
|
||||||
pub fn LLVMRustAddCallSiteAttribute(Instr: ValueRef, index: c_uint, attr: Attribute);
|
pub fn LLVMRustAddCallSiteAttribute(Instr: ValueRef, index: c_uint, attr: Attribute);
|
||||||
|
pub fn LLVMRustAddAlignmentCallSiteAttr(Instr: ValueRef, index: c_uint, bytes: u32);
|
||||||
pub fn LLVMRustAddDereferenceableCallSiteAttr(Instr: ValueRef, index: c_uint, bytes: u64);
|
pub fn LLVMRustAddDereferenceableCallSiteAttr(Instr: ValueRef, index: c_uint, bytes: u64);
|
||||||
|
pub fn LLVMRustAddDereferenceableOrNullCallSiteAttr(Instr: ValueRef,
|
||||||
|
index: c_uint,
|
||||||
|
bytes: u64);
|
||||||
|
|
||||||
// Operations on load/store instructions (only)
|
// Operations on load/store instructions (only)
|
||||||
pub fn LLVMSetVolatile(MemoryAccessInst: ValueRef, volatile: Bool);
|
pub fn LLVMSetVolatile(MemoryAccessInst: ValueRef, volatile: Bool);
|
||||||
|
|
|
@ -96,20 +96,24 @@ impl ArgAttribute {
|
||||||
|
|
||||||
/// A compact representation of LLVM attributes (at least those relevant for this module)
|
/// A compact representation of LLVM attributes (at least those relevant for this module)
|
||||||
/// that can be manipulated without interacting with LLVM's Attribute machinery.
|
/// that can be manipulated without interacting with LLVM's Attribute machinery.
|
||||||
#[derive(Copy, Clone, Debug, Default)]
|
#[derive(Copy, Clone, Debug)]
|
||||||
pub struct ArgAttributes {
|
pub struct ArgAttributes {
|
||||||
regular: ArgAttribute,
|
regular: ArgAttribute,
|
||||||
dereferenceable_bytes: u64,
|
pointee_size: Size,
|
||||||
|
pointee_align: Option<Align>
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ArgAttributes {
|
impl ArgAttributes {
|
||||||
pub fn set(&mut self, attr: ArgAttribute) -> &mut Self {
|
fn new() -> Self {
|
||||||
self.regular = self.regular | attr;
|
ArgAttributes {
|
||||||
self
|
regular: ArgAttribute::default(),
|
||||||
|
pointee_size: Size::from_bytes(0),
|
||||||
|
pointee_align: None,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn set_dereferenceable(&mut self, size: Size) -> &mut Self {
|
pub fn set(&mut self, attr: ArgAttribute) -> &mut Self {
|
||||||
self.dereferenceable_bytes = size.bytes();
|
self.regular = self.regular | attr;
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -118,24 +122,52 @@ impl ArgAttributes {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn apply_llfn(&self, idx: AttributePlace, llfn: ValueRef) {
|
pub fn apply_llfn(&self, idx: AttributePlace, llfn: ValueRef) {
|
||||||
|
let mut regular = self.regular;
|
||||||
unsafe {
|
unsafe {
|
||||||
self.regular.for_each_kind(|attr| attr.apply_llfn(idx, llfn));
|
let deref = self.pointee_size.bytes();
|
||||||
if self.dereferenceable_bytes != 0 {
|
if deref != 0 {
|
||||||
llvm::LLVMRustAddDereferenceableAttr(llfn,
|
if regular.contains(ArgAttribute::NonNull) {
|
||||||
idx.as_uint(),
|
llvm::LLVMRustAddDereferenceableAttr(llfn,
|
||||||
self.dereferenceable_bytes);
|
idx.as_uint(),
|
||||||
|
deref);
|
||||||
|
} else {
|
||||||
|
llvm::LLVMRustAddDereferenceableOrNullAttr(llfn,
|
||||||
|
idx.as_uint(),
|
||||||
|
deref);
|
||||||
|
}
|
||||||
|
regular -= ArgAttribute::NonNull;
|
||||||
}
|
}
|
||||||
|
if let Some(align) = self.pointee_align {
|
||||||
|
llvm::LLVMRustAddAlignmentAttr(llfn,
|
||||||
|
idx.as_uint(),
|
||||||
|
align.abi() as u32);
|
||||||
|
}
|
||||||
|
regular.for_each_kind(|attr| attr.apply_llfn(idx, llfn));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn apply_callsite(&self, idx: AttributePlace, callsite: ValueRef) {
|
pub fn apply_callsite(&self, idx: AttributePlace, callsite: ValueRef) {
|
||||||
|
let mut regular = self.regular;
|
||||||
unsafe {
|
unsafe {
|
||||||
self.regular.for_each_kind(|attr| attr.apply_callsite(idx, callsite));
|
let deref = self.pointee_size.bytes();
|
||||||
if self.dereferenceable_bytes != 0 {
|
if deref != 0 {
|
||||||
llvm::LLVMRustAddDereferenceableCallSiteAttr(callsite,
|
if regular.contains(ArgAttribute::NonNull) {
|
||||||
idx.as_uint(),
|
llvm::LLVMRustAddDereferenceableCallSiteAttr(callsite,
|
||||||
self.dereferenceable_bytes);
|
idx.as_uint(),
|
||||||
|
deref);
|
||||||
|
} else {
|
||||||
|
llvm::LLVMRustAddDereferenceableOrNullCallSiteAttr(callsite,
|
||||||
|
idx.as_uint(),
|
||||||
|
deref);
|
||||||
|
}
|
||||||
|
regular -= ArgAttribute::NonNull;
|
||||||
}
|
}
|
||||||
|
if let Some(align) = self.pointee_align {
|
||||||
|
llvm::LLVMRustAddAlignmentCallSiteAttr(callsite,
|
||||||
|
idx.as_uint(),
|
||||||
|
align.abi() as u32);
|
||||||
|
}
|
||||||
|
regular.for_each_kind(|attr| attr.apply_callsite(idx, callsite));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -439,12 +471,20 @@ pub struct ArgType<'tcx> {
|
||||||
|
|
||||||
impl<'a, 'tcx> ArgType<'tcx> {
|
impl<'a, 'tcx> ArgType<'tcx> {
|
||||||
fn new(layout: TyLayout<'tcx>) -> ArgType<'tcx> {
|
fn new(layout: TyLayout<'tcx>) -> ArgType<'tcx> {
|
||||||
|
let mut attrs = ArgAttributes::new();
|
||||||
|
|
||||||
|
if let layout::Abi::Scalar(ref scalar) = layout.abi {
|
||||||
|
if scalar.is_bool() {
|
||||||
|
attrs.set(ArgAttribute::ZExt);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
ArgType {
|
ArgType {
|
||||||
kind: ArgKind::Direct,
|
kind: ArgKind::Direct,
|
||||||
layout,
|
layout,
|
||||||
cast: None,
|
cast: None,
|
||||||
pad: None,
|
pad: None,
|
||||||
attrs: ArgAttributes::default(),
|
attrs,
|
||||||
nested: vec![]
|
nested: vec![]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -454,14 +494,16 @@ impl<'a, 'tcx> ArgType<'tcx> {
|
||||||
assert_eq!(self.kind, ArgKind::Direct);
|
assert_eq!(self.kind, ArgKind::Direct);
|
||||||
|
|
||||||
// Wipe old attributes, likely not valid through indirection.
|
// Wipe old attributes, likely not valid through indirection.
|
||||||
self.attrs = ArgAttributes::default();
|
self.attrs = ArgAttributes::new();
|
||||||
|
|
||||||
// For non-immediate arguments the callee gets its own copy of
|
// For non-immediate arguments the callee gets its own copy of
|
||||||
// the value on the stack, so there are no aliases. It's also
|
// the value on the stack, so there are no aliases. It's also
|
||||||
// program-invisible so can't possibly capture
|
// program-invisible so can't possibly capture
|
||||||
self.attrs.set(ArgAttribute::NoAlias)
|
self.attrs.set(ArgAttribute::NoAlias)
|
||||||
.set(ArgAttribute::NoCapture)
|
.set(ArgAttribute::NoCapture)
|
||||||
.set_dereferenceable(self.layout.size);
|
.set(ArgAttribute::NonNull);
|
||||||
|
self.attrs.pointee_size = self.layout.size;
|
||||||
|
self.attrs.pointee_align = Some(self.layout.align);
|
||||||
|
|
||||||
self.kind = ArgKind::Indirect;
|
self.kind = ArgKind::Indirect;
|
||||||
}
|
}
|
||||||
|
@ -472,6 +514,22 @@ impl<'a, 'tcx> ArgType<'tcx> {
|
||||||
self.kind = ArgKind::Ignore;
|
self.kind = ArgKind::Ignore;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn safe_pointee(&mut self, layout: TyLayout) {
|
||||||
|
match self.layout.abi {
|
||||||
|
layout::Abi::Scalar(layout::Scalar {
|
||||||
|
value: layout::Pointer,
|
||||||
|
ref valid_range
|
||||||
|
}) => {
|
||||||
|
if valid_range.start > 0 {
|
||||||
|
self.attrs.set(ArgAttribute::NonNull);
|
||||||
|
}
|
||||||
|
self.attrs.pointee_size = layout.size;
|
||||||
|
self.attrs.pointee_align = Some(layout.align);
|
||||||
|
}
|
||||||
|
_ => bug!("ArgType::safe_pointee({:#?}): not a pointer", self.layout)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub fn extend_integer_width_to(&mut self, bits: u64) {
|
pub fn extend_integer_width_to(&mut self, bits: u64) {
|
||||||
// Only integers have signedness
|
// Only integers have signedness
|
||||||
if let layout::Abi::Scalar(ref scalar) = self.layout.abi {
|
if let layout::Abi::Scalar(ref scalar) = self.layout.abi {
|
||||||
|
@ -694,13 +752,85 @@ impl<'a, 'tcx> FnType<'tcx> {
|
||||||
_ => false
|
_ => false
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Handle safe Rust thin and fat pointers.
|
||||||
|
let adjust_for_rust_type = |arg: &mut ArgType<'tcx>, is_return: bool| {
|
||||||
|
// We only handle thin pointers here.
|
||||||
|
match arg.layout.abi {
|
||||||
|
layout::Abi::Scalar(layout::Scalar { value: layout::Pointer, .. }) => {}
|
||||||
|
_ => return
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut ty = arg.layout.ty;
|
||||||
|
|
||||||
|
// FIXME(eddyb) detect more nested cases than `Option<&T>` here.
|
||||||
|
match arg.layout.variants {
|
||||||
|
layout::Variants::NicheFilling { dataful_variant, .. } => {
|
||||||
|
let variant = arg.layout.for_variant(ccx, dataful_variant);
|
||||||
|
for i in 0..variant.fields.count() {
|
||||||
|
let field = variant.field(ccx, i);
|
||||||
|
match field.abi {
|
||||||
|
layout::Abi::Scalar(layout::Scalar { value: layout::Pointer, .. }) => {
|
||||||
|
// We found the pointer field, use its type.
|
||||||
|
ty = field.ty;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
_ => {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ => {}
|
||||||
|
}
|
||||||
|
|
||||||
|
match ty.sty {
|
||||||
|
// `Box` pointer parameters never alias because ownership is transferred
|
||||||
|
ty::TyAdt(def, _) if def.is_box() => {
|
||||||
|
arg.attrs.set(ArgAttribute::NoAlias);
|
||||||
|
|
||||||
|
arg.safe_pointee(ccx.layout_of(ty.boxed_ty()));
|
||||||
|
}
|
||||||
|
|
||||||
|
ty::TyRef(_, mt) => {
|
||||||
|
// `&mut` pointer parameters never alias other parameters,
|
||||||
|
// or mutable global data
|
||||||
|
//
|
||||||
|
// `&T` where `T` contains no `UnsafeCell<U>` is immutable,
|
||||||
|
// and can be marked as both `readonly` and `noalias`, as
|
||||||
|
// LLVM's definition of `noalias` is based solely on memory
|
||||||
|
// dependencies rather than pointer equality
|
||||||
|
let is_freeze = ccx.shared().type_is_freeze(mt.ty);
|
||||||
|
|
||||||
|
let no_alias_is_safe =
|
||||||
|
if ccx.shared().tcx().sess.opts.debugging_opts.mutable_noalias ||
|
||||||
|
ccx.shared().tcx().sess.panic_strategy() == PanicStrategy::Abort {
|
||||||
|
// Mutable refrences or immutable shared references
|
||||||
|
mt.mutbl == hir::MutMutable || is_freeze
|
||||||
|
} else {
|
||||||
|
// Only immutable shared references
|
||||||
|
mt.mutbl != hir::MutMutable && is_freeze
|
||||||
|
};
|
||||||
|
|
||||||
|
if no_alias_is_safe {
|
||||||
|
arg.attrs.set(ArgAttribute::NoAlias);
|
||||||
|
}
|
||||||
|
|
||||||
|
if mt.mutbl == hir::MutImmutable && is_freeze && !is_return {
|
||||||
|
arg.attrs.set(ArgAttribute::ReadOnly);
|
||||||
|
}
|
||||||
|
|
||||||
|
arg.safe_pointee(ccx.layout_of(mt.ty));
|
||||||
|
}
|
||||||
|
_ => {}
|
||||||
|
}
|
||||||
|
|
||||||
|
// HACK(eddyb) LLVM inserts `llvm.assume` calls when inlining functions
|
||||||
|
// with align attributes, and those calls later block optimizations.
|
||||||
|
if !is_return {
|
||||||
|
arg.attrs.pointee_align = None;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
let arg_of = |ty: Ty<'tcx>, is_return: bool| {
|
let arg_of = |ty: Ty<'tcx>, is_return: bool| {
|
||||||
let mut arg = ArgType::new(ccx.layout_of(ty));
|
let mut arg = ArgType::new(ccx.layout_of(ty));
|
||||||
if let layout::Abi::Scalar(ref scalar) = arg.layout.abi {
|
|
||||||
if scalar.is_bool() {
|
|
||||||
arg.attrs.set(ArgAttribute::ZExt);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if arg.layout.is_zst() {
|
if arg.layout.is_zst() {
|
||||||
// For some forsaken reason, x86_64-pc-windows-gnu
|
// For some forsaken reason, x86_64-pc-windows-gnu
|
||||||
// doesn't ignore zero-sized struct arguments.
|
// doesn't ignore zero-sized struct arguments.
|
||||||
|
@ -710,107 +840,27 @@ impl<'a, 'tcx> FnType<'tcx> {
|
||||||
arg.ignore();
|
arg.ignore();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FIXME(eddyb) other ABIs don't have logic for nested.
|
||||||
|
if !is_return && type_is_fat_ptr(ccx, arg.layout.ty) && rust_abi {
|
||||||
|
arg.nested = vec![
|
||||||
|
ArgType::new(arg.layout.field(ccx, 0)),
|
||||||
|
ArgType::new(arg.layout.field(ccx, 1))
|
||||||
|
];
|
||||||
|
adjust_for_rust_type(&mut arg.nested[0], false);
|
||||||
|
adjust_for_rust_type(&mut arg.nested[1], false);
|
||||||
|
} else {
|
||||||
|
adjust_for_rust_type(&mut arg, is_return);
|
||||||
|
}
|
||||||
|
|
||||||
arg
|
arg
|
||||||
};
|
};
|
||||||
|
|
||||||
let ret_ty = sig.output();
|
|
||||||
let mut ret = arg_of(ret_ty, true);
|
|
||||||
|
|
||||||
if !type_is_fat_ptr(ccx, ret_ty) {
|
|
||||||
// The `noalias` attribute on the return value is useful to a
|
|
||||||
// function ptr caller.
|
|
||||||
if ret_ty.is_box() {
|
|
||||||
// `Box` pointer return values never alias because ownership
|
|
||||||
// is transferred
|
|
||||||
ret.attrs.set(ArgAttribute::NoAlias);
|
|
||||||
}
|
|
||||||
|
|
||||||
// We can also mark the return value as `dereferenceable` in certain cases
|
|
||||||
match ret_ty.sty {
|
|
||||||
// These are not really pointers but pairs, (pointer, len)
|
|
||||||
ty::TyRef(_, ty::TypeAndMut { ty, .. }) => {
|
|
||||||
ret.attrs.set_dereferenceable(ccx.size_of(ty));
|
|
||||||
}
|
|
||||||
ty::TyAdt(def, _) if def.is_box() => {
|
|
||||||
ret.attrs.set_dereferenceable(ccx.size_of(ret_ty.boxed_ty()));
|
|
||||||
}
|
|
||||||
_ => {}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut args = Vec::with_capacity(inputs.len() + extra_args.len());
|
|
||||||
|
|
||||||
// Handle safe Rust thin and fat pointers.
|
|
||||||
let rust_ptr_attrs = |ty: Ty<'tcx>, arg: &mut ArgType| match ty.sty {
|
|
||||||
// `Box` pointer parameters never alias because ownership is transferred
|
|
||||||
ty::TyAdt(def, _) if def.is_box() => {
|
|
||||||
arg.attrs.set(ArgAttribute::NoAlias);
|
|
||||||
Some(ty.boxed_ty())
|
|
||||||
}
|
|
||||||
|
|
||||||
ty::TyRef(_, mt) => {
|
|
||||||
// `&mut` pointer parameters never alias other parameters, or mutable global data
|
|
||||||
//
|
|
||||||
// `&T` where `T` contains no `UnsafeCell<U>` is immutable, and can be marked as
|
|
||||||
// both `readonly` and `noalias`, as LLVM's definition of `noalias` is based solely
|
|
||||||
// on memory dependencies rather than pointer equality
|
|
||||||
let is_freeze = ccx.shared().type_is_freeze(mt.ty);
|
|
||||||
|
|
||||||
let no_alias_is_safe =
|
|
||||||
if ccx.shared().tcx().sess.opts.debugging_opts.mutable_noalias ||
|
|
||||||
ccx.shared().tcx().sess.panic_strategy() == PanicStrategy::Abort {
|
|
||||||
// Mutable refrences or immutable shared references
|
|
||||||
mt.mutbl == hir::MutMutable || is_freeze
|
|
||||||
} else {
|
|
||||||
// Only immutable shared references
|
|
||||||
mt.mutbl != hir::MutMutable && is_freeze
|
|
||||||
};
|
|
||||||
|
|
||||||
if no_alias_is_safe {
|
|
||||||
arg.attrs.set(ArgAttribute::NoAlias);
|
|
||||||
}
|
|
||||||
|
|
||||||
if mt.mutbl == hir::MutImmutable && is_freeze {
|
|
||||||
arg.attrs.set(ArgAttribute::ReadOnly);
|
|
||||||
}
|
|
||||||
|
|
||||||
Some(mt.ty)
|
|
||||||
}
|
|
||||||
_ => None
|
|
||||||
};
|
|
||||||
|
|
||||||
for ty in inputs.iter().chain(extra_args.iter()) {
|
|
||||||
let mut arg = arg_of(ty, false);
|
|
||||||
|
|
||||||
if type_is_fat_ptr(ccx, ty) {
|
|
||||||
let mut data = ArgType::new(arg.layout.field(ccx, 0));
|
|
||||||
let mut info = ArgType::new(arg.layout.field(ccx, 1));
|
|
||||||
|
|
||||||
if let Some(inner) = rust_ptr_attrs(ty, &mut data) {
|
|
||||||
data.attrs.set(ArgAttribute::NonNull);
|
|
||||||
if ccx.tcx().struct_tail(inner).is_trait() {
|
|
||||||
// vtables can be safely marked non-null, readonly
|
|
||||||
// and noalias.
|
|
||||||
info.attrs.set(ArgAttribute::NonNull);
|
|
||||||
info.attrs.set(ArgAttribute::ReadOnly);
|
|
||||||
info.attrs.set(ArgAttribute::NoAlias);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// FIXME(eddyb) other ABIs don't have logic for nested.
|
|
||||||
if rust_abi {
|
|
||||||
arg.nested = vec![data, info];
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if let Some(inner) = rust_ptr_attrs(ty, &mut arg) {
|
|
||||||
arg.attrs.set_dereferenceable(ccx.size_of(inner));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
args.push(arg);
|
|
||||||
}
|
|
||||||
|
|
||||||
FnType {
|
FnType {
|
||||||
args,
|
ret: arg_of(sig.output(), true),
|
||||||
ret,
|
args: inputs.iter().chain(extra_args.iter()).map(|ty| {
|
||||||
|
arg_of(ty, false)
|
||||||
|
}).collect(),
|
||||||
variadic: sig.variadic,
|
variadic: sig.variadic,
|
||||||
cconv,
|
cconv,
|
||||||
}
|
}
|
||||||
|
|
|
@ -178,6 +178,22 @@ extern "C" void LLVMRustAddCallSiteAttribute(LLVMValueRef Instr, unsigned Index,
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
extern "C" void LLVMRustAddAlignmentCallSiteAttr(LLVMValueRef Instr,
|
||||||
|
unsigned Index,
|
||||||
|
uint32_t Bytes) {
|
||||||
|
CallSite Call = CallSite(unwrap<Instruction>(Instr));
|
||||||
|
AttrBuilder B;
|
||||||
|
B.addAlignmentAttr(Bytes);
|
||||||
|
#if LLVM_VERSION_GE(5, 0)
|
||||||
|
Call.setAttributes(Call.getAttributes().addAttributes(
|
||||||
|
Call->getContext(), Index, B));
|
||||||
|
#else
|
||||||
|
Call.setAttributes(Call.getAttributes().addAttributes(
|
||||||
|
Call->getContext(), Index,
|
||||||
|
AttributeSet::get(Call->getContext(), Index, B)));
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
extern "C" void LLVMRustAddDereferenceableCallSiteAttr(LLVMValueRef Instr,
|
extern "C" void LLVMRustAddDereferenceableCallSiteAttr(LLVMValueRef Instr,
|
||||||
unsigned Index,
|
unsigned Index,
|
||||||
uint64_t Bytes) {
|
uint64_t Bytes) {
|
||||||
|
@ -194,6 +210,22 @@ extern "C" void LLVMRustAddDereferenceableCallSiteAttr(LLVMValueRef Instr,
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
extern "C" void LLVMRustAddDereferenceableOrNullCallSiteAttr(LLVMValueRef Instr,
|
||||||
|
unsigned Index,
|
||||||
|
uint64_t Bytes) {
|
||||||
|
CallSite Call = CallSite(unwrap<Instruction>(Instr));
|
||||||
|
AttrBuilder B;
|
||||||
|
B.addDereferenceableOrNullAttr(Bytes);
|
||||||
|
#if LLVM_VERSION_GE(5, 0)
|
||||||
|
Call.setAttributes(Call.getAttributes().addAttributes(
|
||||||
|
Call->getContext(), Index, B));
|
||||||
|
#else
|
||||||
|
Call.setAttributes(Call.getAttributes().addAttributes(
|
||||||
|
Call->getContext(), Index,
|
||||||
|
AttributeSet::get(Call->getContext(), Index, B)));
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
extern "C" void LLVMRustAddFunctionAttribute(LLVMValueRef Fn, unsigned Index,
|
extern "C" void LLVMRustAddFunctionAttribute(LLVMValueRef Fn, unsigned Index,
|
||||||
LLVMRustAttribute RustAttr) {
|
LLVMRustAttribute RustAttr) {
|
||||||
Function *A = unwrap<Function>(Fn);
|
Function *A = unwrap<Function>(Fn);
|
||||||
|
@ -206,6 +238,19 @@ extern "C" void LLVMRustAddFunctionAttribute(LLVMValueRef Fn, unsigned Index,
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
extern "C" void LLVMRustAddAlignmentAttr(LLVMValueRef Fn,
|
||||||
|
unsigned Index,
|
||||||
|
uint32_t Bytes) {
|
||||||
|
Function *A = unwrap<Function>(Fn);
|
||||||
|
AttrBuilder B;
|
||||||
|
B.addAlignmentAttr(Bytes);
|
||||||
|
#if LLVM_VERSION_GE(5, 0)
|
||||||
|
A->addAttributes(Index, B);
|
||||||
|
#else
|
||||||
|
A->addAttributes(Index, AttributeSet::get(A->getContext(), Index, B));
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
extern "C" void LLVMRustAddDereferenceableAttr(LLVMValueRef Fn, unsigned Index,
|
extern "C" void LLVMRustAddDereferenceableAttr(LLVMValueRef Fn, unsigned Index,
|
||||||
uint64_t Bytes) {
|
uint64_t Bytes) {
|
||||||
Function *A = unwrap<Function>(Fn);
|
Function *A = unwrap<Function>(Fn);
|
||||||
|
@ -218,6 +263,19 @@ extern "C" void LLVMRustAddDereferenceableAttr(LLVMValueRef Fn, unsigned Index,
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
extern "C" void LLVMRustAddDereferenceableOrNullAttr(LLVMValueRef Fn,
|
||||||
|
unsigned Index,
|
||||||
|
uint64_t Bytes) {
|
||||||
|
Function *A = unwrap<Function>(Fn);
|
||||||
|
AttrBuilder B;
|
||||||
|
B.addDereferenceableOrNullAttr(Bytes);
|
||||||
|
#if LLVM_VERSION_GE(5, 0)
|
||||||
|
A->addAttributes(Index, B);
|
||||||
|
#else
|
||||||
|
A->addAttributes(Index, AttributeSet::get(A->getContext(), Index, B));
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
extern "C" void LLVMRustAddFunctionAttrStringValue(LLVMValueRef Fn,
|
extern "C" void LLVMRustAddFunctionAttrStringValue(LLVMValueRef Fn,
|
||||||
unsigned Index,
|
unsigned Index,
|
||||||
const char *Name,
|
const char *Name,
|
||||||
|
|
|
@ -15,7 +15,7 @@
|
||||||
#![feature(custom_attribute)]
|
#![feature(custom_attribute)]
|
||||||
|
|
||||||
pub struct S {
|
pub struct S {
|
||||||
_field: [i64; 4],
|
_field: [i32; 8],
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct UnsafeInner {
|
pub struct UnsafeInner {
|
||||||
|
@ -66,7 +66,7 @@ pub fn mutable_unsafe_borrow(_: &mut UnsafeInner) {
|
||||||
pub fn mutable_borrow(_: &mut i32) {
|
pub fn mutable_borrow(_: &mut i32) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// CHECK: @indirect_struct(%S* noalias nocapture dereferenceable(32) %arg0)
|
// CHECK: @indirect_struct(%S* noalias nocapture align 4 dereferenceable(32) %arg0)
|
||||||
#[no_mangle]
|
#[no_mangle]
|
||||||
pub fn indirect_struct(_: S) {
|
pub fn indirect_struct(_: S) {
|
||||||
}
|
}
|
||||||
|
@ -77,17 +77,17 @@ pub fn indirect_struct(_: S) {
|
||||||
pub fn borrowed_struct(_: &S) {
|
pub fn borrowed_struct(_: &S) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// CHECK: noalias dereferenceable(4) i32* @_box(i32* noalias dereferenceable(4) %x)
|
// CHECK: noalias align 4 dereferenceable(4) i32* @_box(i32* noalias dereferenceable(4) %x)
|
||||||
#[no_mangle]
|
#[no_mangle]
|
||||||
pub fn _box(x: Box<i32>) -> Box<i32> {
|
pub fn _box(x: Box<i32>) -> Box<i32> {
|
||||||
x
|
x
|
||||||
}
|
}
|
||||||
|
|
||||||
// CHECK: @struct_return(%S* noalias nocapture sret dereferenceable(32))
|
// CHECK: @struct_return(%S* noalias nocapture sret align 4 dereferenceable(32))
|
||||||
#[no_mangle]
|
#[no_mangle]
|
||||||
pub fn struct_return() -> S {
|
pub fn struct_return() -> S {
|
||||||
S {
|
S {
|
||||||
_field: [0, 0, 0, 0]
|
_field: [0, 0, 0, 0, 0, 0, 0, 0]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -39,7 +39,7 @@ pub struct BigPacked {
|
||||||
#[no_mangle]
|
#[no_mangle]
|
||||||
pub fn call_pkd(f: fn() -> Array) -> BigPacked {
|
pub fn call_pkd(f: fn() -> Array) -> BigPacked {
|
||||||
// CHECK: [[ALLOCA:%[_a-z0-9]+]] = alloca %Array
|
// CHECK: [[ALLOCA:%[_a-z0-9]+]] = alloca %Array
|
||||||
// CHECK: call void %{{.*}}(%Array* noalias nocapture sret dereferenceable(32) [[ALLOCA]])
|
// CHECK: call void %{{.*}}(%Array* noalias nocapture sret align 4 dereferenceable(32) [[ALLOCA]])
|
||||||
// CHECK: call void @llvm.memcpy.{{.*}}(i8* %{{.*}}, i8* %{{.*}}, i{{[0-9]+}} 32, i32 1, i1 false)
|
// CHECK: call void @llvm.memcpy.{{.*}}(i8* %{{.*}}, i8* %{{.*}}, i{{[0-9]+}} 32, i32 1, i1 false)
|
||||||
// check that calls whose destination is a field of a packed struct
|
// check that calls whose destination is a field of a packed struct
|
||||||
// go through an alloca rather than calling the function with an
|
// go through an alloca rather than calling the function with an
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue