1
Fork 0

rustc_codegen_llvm: use safe references for Value.

This commit is contained in:
Irina Popa 2018-07-10 13:28:39 +03:00
parent 8d17684341
commit f375185314
28 changed files with 1214 additions and 1230 deletions

View file

@ -8,7 +8,7 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use llvm::{self, ValueRef, AttributePlace};
use llvm::{self, AttributePlace};
use base;
use builder::{Builder, MemFlags};
use common::{ty_fn_sig, C_usize};
@ -17,6 +17,7 @@ use mir::place::PlaceRef;
use mir::operand::OperandValue;
use type_::Type;
use type_of::{LayoutLlvmExt, PointerKind};
use value::Value;
use rustc_target::abi::{LayoutOf, Size, TyLayout};
use rustc::ty::{self, Ty};
@ -46,12 +47,12 @@ impl ArgAttributeExt for ArgAttribute {
}
pub trait ArgAttributesExt {
fn apply_llfn(&self, idx: AttributePlace, llfn: ValueRef);
fn apply_callsite(&self, idx: AttributePlace, callsite: ValueRef);
fn apply_llfn(&self, idx: AttributePlace, llfn: &Value);
fn apply_callsite(&self, idx: AttributePlace, callsite: &Value);
}
impl ArgAttributesExt for ArgAttributes {
fn apply_llfn(&self, idx: AttributePlace, llfn: ValueRef) {
fn apply_llfn(&self, idx: AttributePlace, llfn: &Value) {
let mut regular = self.regular;
unsafe {
let deref = self.pointee_size.bytes();
@ -76,7 +77,7 @@ impl ArgAttributesExt for ArgAttributes {
}
}
fn apply_callsite(&self, idx: AttributePlace, callsite: ValueRef) {
fn apply_callsite(&self, idx: AttributePlace, callsite: &Value) {
let mut regular = self.regular;
unsafe {
let deref = self.pointee_size.bytes();
@ -164,16 +165,16 @@ impl LlvmType for CastTarget {
}
}
pub trait ArgTypeExt<'a, 'tcx> {
fn memory_ty(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type;
fn store(&self, bx: &Builder<'a, 'll, 'tcx>, val: ValueRef, dst: PlaceRef<'tcx>);
fn store_fn_arg(&self, bx: &Builder<'a, 'll, 'tcx>, idx: &mut usize, dst: PlaceRef<'tcx>);
pub trait ArgTypeExt<'ll, 'tcx> {
fn memory_ty(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type;
fn store(&self, bx: &Builder<'_, 'll, 'tcx>, val: &'ll Value, dst: PlaceRef<'ll, 'tcx>);
fn store_fn_arg(&self, bx: &Builder<'_, 'll, 'tcx>, idx: &mut usize, dst: PlaceRef<'ll, 'tcx>);
}
impl<'a, 'tcx> ArgTypeExt<'a, 'tcx> for ArgType<'tcx, Ty<'tcx>> {
impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> {
/// Get the LLVM type for a place of the original Rust type of
/// this argument/return, i.e. the result of `type_of::type_of`.
fn memory_ty(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type {
fn memory_ty(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
self.layout.llvm_type(cx)
}
@ -181,7 +182,7 @@ impl<'a, 'tcx> ArgTypeExt<'a, 'tcx> for ArgType<'tcx, Ty<'tcx>> {
/// place for the original Rust type of this argument/return.
/// Can be used for both storing formal arguments into Rust variables
/// or results of call/invoke instructions into their destinations.
fn store(&self, bx: &Builder<'a, 'll, 'tcx>, val: ValueRef, dst: PlaceRef<'tcx>) {
fn store(&self, bx: &Builder<'_, 'll, 'tcx>, val: &'ll Value, dst: PlaceRef<'ll, 'tcx>) {
if self.is_ignore() {
return;
}
@ -234,7 +235,7 @@ impl<'a, 'tcx> ArgTypeExt<'a, 'tcx> for ArgType<'tcx, Ty<'tcx>> {
}
}
fn store_fn_arg(&self, bx: &Builder<'a, 'll, 'tcx>, idx: &mut usize, dst: PlaceRef<'tcx>) {
fn store_fn_arg(&self, bx: &Builder<'a, 'll, 'tcx>, idx: &mut usize, dst: PlaceRef<'ll, 'tcx>) {
let mut next = || {
let val = llvm::get_param(bx.llfn(), *idx as c_uint);
*idx += 1;
@ -252,32 +253,32 @@ impl<'a, 'tcx> ArgTypeExt<'a, 'tcx> for ArgType<'tcx, Ty<'tcx>> {
}
}
pub trait FnTypeExt<'a, 'tcx> {
fn of_instance(cx: &CodegenCx<'a, 'tcx>, instance: &ty::Instance<'tcx>)
pub trait FnTypeExt<'tcx> {
fn of_instance(cx: &CodegenCx<'ll, 'tcx>, instance: &ty::Instance<'tcx>)
-> Self;
fn new(cx: &CodegenCx<'a, 'tcx>,
fn new(cx: &CodegenCx<'ll, 'tcx>,
sig: ty::FnSig<'tcx>,
extra_args: &[Ty<'tcx>]) -> Self;
fn new_vtable(cx: &CodegenCx<'a, 'tcx>,
fn new_vtable(cx: &CodegenCx<'ll, 'tcx>,
sig: ty::FnSig<'tcx>,
extra_args: &[Ty<'tcx>]) -> Self;
fn new_internal(
cx: &CodegenCx<'a, 'tcx>,
cx: &CodegenCx<'ll, 'tcx>,
sig: ty::FnSig<'tcx>,
extra_args: &[Ty<'tcx>],
mk_arg_type: impl Fn(Ty<'tcx>, Option<usize>) -> ArgType<'tcx, Ty<'tcx>>,
) -> Self;
fn adjust_for_abi(&mut self,
cx: &CodegenCx<'a, 'tcx>,
cx: &CodegenCx<'ll, 'tcx>,
abi: Abi);
fn llvm_type(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type;
fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type;
fn llvm_cconv(&self) -> llvm::CallConv;
fn apply_attrs_llfn(&self, llfn: ValueRef);
fn apply_attrs_callsite(&self, bx: &Builder<'a, 'll, 'tcx>, callsite: ValueRef);
fn apply_attrs_llfn(&self, llfn: &'ll Value);
fn apply_attrs_callsite(&self, bx: &Builder<'a, 'll, 'tcx>, callsite: &'ll Value);
}
impl<'a, 'tcx> FnTypeExt<'a, 'tcx> for FnType<'tcx, Ty<'tcx>> {
fn of_instance(cx: &CodegenCx<'a, 'tcx>, instance: &ty::Instance<'tcx>)
impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> {
fn of_instance(cx: &CodegenCx<'ll, 'tcx>, instance: &ty::Instance<'tcx>)
-> Self {
let fn_ty = instance.ty(cx.tcx);
let sig = ty_fn_sig(cx, fn_ty);
@ -285,7 +286,7 @@ impl<'a, 'tcx> FnTypeExt<'a, 'tcx> for FnType<'tcx, Ty<'tcx>> {
FnType::new(cx, sig, &[])
}
fn new(cx: &CodegenCx<'a, 'tcx>,
fn new(cx: &CodegenCx<'ll, 'tcx>,
sig: ty::FnSig<'tcx>,
extra_args: &[Ty<'tcx>]) -> Self {
FnType::new_internal(cx, sig, extra_args, |ty, _| {
@ -293,7 +294,7 @@ impl<'a, 'tcx> FnTypeExt<'a, 'tcx> for FnType<'tcx, Ty<'tcx>> {
})
}
fn new_vtable(cx: &CodegenCx<'a, 'tcx>,
fn new_vtable(cx: &CodegenCx<'ll, 'tcx>,
sig: ty::FnSig<'tcx>,
extra_args: &[Ty<'tcx>]) -> Self {
FnType::new_internal(cx, sig, extra_args, |ty, arg_idx| {
@ -316,7 +317,7 @@ impl<'a, 'tcx> FnTypeExt<'a, 'tcx> for FnType<'tcx, Ty<'tcx>> {
}
fn new_internal(
cx: &CodegenCx<'a, 'tcx>,
cx: &CodegenCx<'ll, 'tcx>,
sig: ty::FnSig<'tcx>,
extra_args: &[Ty<'tcx>],
mk_arg_type: impl Fn(Ty<'tcx>, Option<usize>) -> ArgType<'tcx, Ty<'tcx>>,
@ -497,7 +498,7 @@ impl<'a, 'tcx> FnTypeExt<'a, 'tcx> for FnType<'tcx, Ty<'tcx>> {
}
fn adjust_for_abi(&mut self,
cx: &CodegenCx<'a, 'tcx>,
cx: &CodegenCx<'ll, 'tcx>,
abi: Abi) {
if abi == Abi::Unadjusted { return }
@ -564,7 +565,7 @@ impl<'a, 'tcx> FnTypeExt<'a, 'tcx> for FnType<'tcx, Ty<'tcx>> {
}
}
fn llvm_type(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type {
fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
let args_capacity: usize = self.args.iter().map(|arg|
if arg.pad.is_some() { 1 } else { 0 } +
if let PassMode::Pair(_, _) = arg.mode { 2 } else { 1 }
@ -629,7 +630,7 @@ impl<'a, 'tcx> FnTypeExt<'a, 'tcx> for FnType<'tcx, Ty<'tcx>> {
}
}
fn apply_attrs_llfn(&self, llfn: ValueRef) {
fn apply_attrs_llfn(&self, llfn: &'ll Value) {
let mut i = 0;
let mut apply = |attrs: &ArgAttributes| {
attrs.apply_llfn(llvm::AttributePlace::Argument(i), llfn);
@ -659,7 +660,7 @@ impl<'a, 'tcx> FnTypeExt<'a, 'tcx> for FnType<'tcx, Ty<'tcx>> {
}
}
fn apply_attrs_callsite(&self, bx: &Builder<'a, 'll, 'tcx>, callsite: ValueRef) {
fn apply_attrs_callsite(&self, bx: &Builder<'a, 'll, 'tcx>, callsite: &'ll Value) {
let mut i = 0;
let mut apply = |attrs: &ArgAttributes| {
attrs.apply_callsite(llvm::AttributePlace::Argument(i), callsite);

View file

@ -8,11 +8,12 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use llvm::{self, ValueRef};
use llvm;
use common::*;
use type_::Type;
use type_of::LayoutLlvmExt;
use builder::Builder;
use value::Value;
use rustc::hir;
@ -27,8 +28,8 @@ use libc::{c_uint, c_char};
pub fn codegen_inline_asm(
bx: &Builder<'a, 'll, 'tcx>,
ia: &hir::InlineAsm,
outputs: Vec<PlaceRef<'tcx>>,
mut inputs: Vec<ValueRef>
outputs: Vec<PlaceRef<'ll, 'tcx>>,
mut inputs: Vec<&'ll Value>
) {
let mut ext_constraints = vec![];
let mut output_types = vec![];
@ -111,7 +112,7 @@ pub fn codegen_inline_asm(
let kind = llvm::LLVMGetMDKindIDInContext(bx.cx.llcx,
key.as_ptr() as *const c_char, key.len() as c_uint);
let val: llvm::ValueRef = C_i32(bx.cx, ia.ctxt.outer().as_u32() as i32);
let val: &'ll Value = C_i32(bx.cx, ia.ctxt.outer().as_u32() as i32);
llvm::LLVMSetMetadata(r, kind,
llvm::LLVMMDNodeInContext(bx.cx.llcx, &val, 1));

View file

@ -22,15 +22,17 @@ use rustc_data_structures::fx::FxHashMap;
use rustc_target::spec::PanicStrategy;
use attributes;
use llvm::{self, Attribute, ValueRef};
use llvm::{self, Attribute};
use llvm::AttributePlace::Function;
use llvm_util;
pub use syntax::attr::{self, InlineAttr};
use context::CodegenCx;
use value::Value;
/// Mark LLVM function to use provided inline heuristic.
#[inline]
pub fn inline(val: ValueRef, inline: InlineAttr) {
pub fn inline(val: &'ll Value, inline: InlineAttr) {
use self::InlineAttr::*;
match inline {
Hint => Attribute::InlineHint.apply_llfn(Function, val),
@ -46,30 +48,30 @@ pub fn inline(val: ValueRef, inline: InlineAttr) {
/// Tell LLVM to emit or not emit the information necessary to unwind the stack for the function.
#[inline]
pub fn emit_uwtable(val: ValueRef, emit: bool) {
pub fn emit_uwtable(val: &'ll Value, emit: bool) {
Attribute::UWTable.toggle_llfn(Function, val, emit);
}
/// Tell LLVM whether the function can or cannot unwind.
#[inline]
pub fn unwind(val: ValueRef, can_unwind: bool) {
pub fn unwind(val: &'ll Value, can_unwind: bool) {
Attribute::NoUnwind.toggle_llfn(Function, val, !can_unwind);
}
/// Tell LLVM whether it should optimize function for size.
#[inline]
#[allow(dead_code)] // possibly useful function
pub fn set_optimize_for_size(val: ValueRef, optimize: bool) {
pub fn set_optimize_for_size(val: &'ll Value, optimize: bool) {
Attribute::OptimizeForSize.toggle_llfn(Function, val, optimize);
}
/// Tell LLVM if this function should be 'naked', i.e. skip the epilogue and prologue.
#[inline]
pub fn naked(val: ValueRef, is_naked: bool) {
pub fn naked(val: &'ll Value, is_naked: bool) {
Attribute::Naked.toggle_llfn(Function, val, is_naked);
}
pub fn set_frame_pointer_elimination(cx: &CodegenCx, llfn: ValueRef) {
pub fn set_frame_pointer_elimination(cx: &CodegenCx<'ll, '_>, llfn: &'ll Value) {
if cx.sess().must_not_eliminate_frame_pointers() {
llvm::AddFunctionAttrStringValue(
llfn, llvm::AttributePlace::Function,
@ -77,7 +79,7 @@ pub fn set_frame_pointer_elimination(cx: &CodegenCx, llfn: ValueRef) {
}
}
pub fn set_probestack(cx: &CodegenCx, llfn: ValueRef) {
pub fn set_probestack(cx: &CodegenCx<'ll, '_>, llfn: &'ll Value) {
// Only use stack probes if the target specification indicates that we
// should be using stack probes
if !cx.sess().target.target.options.stack_probes {
@ -123,7 +125,7 @@ pub fn llvm_target_features(sess: &Session) -> impl Iterator<Item = &str> {
/// Composite function which sets LLVM attributes for function depending on its AST (#[attribute])
/// attributes.
pub fn from_fn_attrs(cx: &CodegenCx, llfn: ValueRef, id: DefId) {
pub fn from_fn_attrs(cx: &CodegenCx<'ll, '_>, llfn: &'ll Value, id: DefId) {
let codegen_fn_attrs = cx.tcx.codegen_fn_attrs(id);
inline(llfn, codegen_fn_attrs.inline);

View file

@ -17,7 +17,7 @@
//!
//! Hopefully useful general knowledge about codegen:
//!
//! * There's no way to find out the Ty type of a ValueRef. Doing so
//! * There's no way to find out the Ty type of a Value. Doing so
//! would be "trying to get the eggs out of an omelette" (credit:
//! pcwalton). You can, instead, find out its llvm::Type by calling val_ty,
//! but one llvm::Type corresponds to many `Ty`s; for instance, tup(int, int,
@ -31,8 +31,7 @@ use super::ModuleKind;
use abi;
use back::link;
use back::write::{self, OngoingCodegen};
use llvm::{TypeKind, ValueRef, get_param};
use llvm;
use llvm::{self, TypeKind, get_param};
use metadata;
use rustc::hir::def_id::{CrateNum, DefId, LOCAL_CRATE};
use rustc::middle::lang_items::StartFnLangItem;
@ -87,6 +86,8 @@ use syntax_pos::symbol::InternedString;
use syntax::attr;
use rustc::hir::{self, CodegenFnAttrs};
use value::Value;
use mir::operand::OperandValue;
use rustc_codegen_utils::check_for_rustc_errors_attr;
@ -157,12 +158,12 @@ pub fn bin_op_to_fcmp_predicate(op: hir::BinOpKind) -> llvm::RealPredicate {
pub fn compare_simd_types(
bx: &Builder<'a, 'll, 'tcx>,
lhs: ValueRef,
rhs: ValueRef,
lhs: &'ll Value,
rhs: &'ll Value,
t: Ty<'tcx>,
ret_ty: &'ll Type,
op: hir::BinOpKind
) -> ValueRef {
) -> &'ll Value {
let signed = match t.sty {
ty::TyFloat(_) => {
let cmp = bin_op_to_fcmp_predicate(op);
@ -187,11 +188,12 @@ pub fn compare_simd_types(
/// The `old_info` argument is a bit funny. It is intended for use
/// in an upcast, where the new vtable for an object will be derived
/// from the old one.
pub fn unsized_info<'cx, 'tcx>(cx: &CodegenCx<'cx, 'tcx>,
source: Ty<'tcx>,
target: Ty<'tcx>,
old_info: Option<ValueRef>)
-> ValueRef {
pub fn unsized_info(
cx: &CodegenCx<'ll, 'tcx>,
source: Ty<'tcx>,
target: Ty<'tcx>,
old_info: Option<&'ll Value>,
) -> &'ll Value {
let (source, target) = cx.tcx.struct_lockstep_tails(source, target);
match (&source.sty, &target.sty) {
(&ty::TyArray(_, len), &ty::TySlice(_)) => {
@ -218,10 +220,10 @@ pub fn unsized_info<'cx, 'tcx>(cx: &CodegenCx<'cx, 'tcx>,
/// Coerce `src` to `dst_ty`. `src_ty` must be a thin pointer.
pub fn unsize_thin_ptr(
bx: &Builder<'a, 'll, 'tcx>,
src: ValueRef,
src: &'ll Value,
src_ty: Ty<'tcx>,
dst_ty: Ty<'tcx>
) -> (ValueRef, ValueRef) {
) -> (&'ll Value, &'ll Value) {
debug!("unsize_thin_ptr: {:?} => {:?}", src_ty, dst_ty);
match (&src_ty.sty, &dst_ty.sty) {
(&ty::TyRef(_, a, _),
@ -273,8 +275,8 @@ pub fn unsize_thin_ptr(
/// to a value of type `dst_ty` and store the result in `dst`
pub fn coerce_unsized_into(
bx: &Builder<'a, 'll, 'tcx>,
src: PlaceRef<'tcx>,
dst: PlaceRef<'tcx>
src: PlaceRef<'ll, 'tcx>,
dst: PlaceRef<'ll, 'tcx>
) {
let src_ty = src.layout.ty;
let dst_ty = dst.layout.ty;
@ -331,19 +333,19 @@ pub fn coerce_unsized_into(
}
pub fn cast_shift_expr_rhs(
cx: &Builder, op: hir::BinOpKind, lhs: ValueRef, rhs: ValueRef
) -> ValueRef {
cx: &Builder<'_, 'll, '_>, op: hir::BinOpKind, lhs: &'ll Value, rhs: &'ll Value
) -> &'ll Value {
cast_shift_rhs(op, lhs, rhs, |a, b| cx.trunc(a, b), |a, b| cx.zext(a, b))
}
fn cast_shift_rhs<'ll, F, G>(op: hir::BinOpKind,
lhs: ValueRef,
rhs: ValueRef,
lhs: &'ll Value,
rhs: &'ll Value,
trunc: F,
zext: G)
-> ValueRef
where F: FnOnce(ValueRef, &'ll Type) -> ValueRef,
G: FnOnce(ValueRef, &'ll Type) -> ValueRef
-> &'ll Value
where F: FnOnce(&'ll Value, &'ll Type) -> &'ll Value,
G: FnOnce(&'ll Value, &'ll Type) -> &'ll Value
{
// Shifts may have any size int on the rhs
if op.is_shift() {
@ -380,12 +382,12 @@ pub fn wants_msvc_seh(sess: &Session) -> bool {
sess.target.target.options.is_like_msvc
}
pub fn call_assume(bx: &Builder<'a, 'll, 'tcx>, val: ValueRef) {
pub fn call_assume(bx: &Builder<'_, 'll, '_>, val: &'ll Value) {
let assume_intrinsic = bx.cx.get_intrinsic("llvm.assume");
bx.call(assume_intrinsic, &[val], None);
}
pub fn from_immediate(bx: &Builder, val: ValueRef) -> ValueRef {
pub fn from_immediate(bx: &Builder<'_, 'll, '_>, val: &'ll Value) -> &'ll Value {
if val_ty(val) == Type::i1(bx.cx) {
bx.zext(val, Type::i8(bx.cx))
} else {
@ -393,26 +395,28 @@ pub fn from_immediate(bx: &Builder, val: ValueRef) -> ValueRef {
}
}
pub fn to_immediate(bx: &Builder, val: ValueRef, layout: layout::TyLayout) -> ValueRef {
pub fn to_immediate(bx: &Builder<'_, 'll, '_>, val: &'ll Value, layout: layout::TyLayout) -> &'ll Value {
if let layout::Abi::Scalar(ref scalar) = layout.abi {
return to_immediate_scalar(bx, val, scalar);
}
val
}
pub fn to_immediate_scalar(bx: &Builder, val: ValueRef, scalar: &layout::Scalar) -> ValueRef {
pub fn to_immediate_scalar(bx: &Builder<'_, 'll, '_>, val: &'ll Value, scalar: &layout::Scalar) -> &'ll Value {
if scalar.is_bool() {
return bx.trunc(val, Type::i1(bx.cx));
}
val
}
pub fn call_memcpy(bx: &Builder,
dst: ValueRef,
src: ValueRef,
n_bytes: ValueRef,
align: Align,
flags: MemFlags) {
pub fn call_memcpy(
bx: &Builder<'_, 'll, '_>,
dst: &'ll Value,
src: &'ll Value,
n_bytes: &'ll Value,
align: Align,
flags: MemFlags,
) {
if flags.contains(MemFlags::NONTEMPORAL) {
// HACK(nox): This is inefficient but there is no nontemporal memcpy.
let val = bx.load(src, align);
@ -433,9 +437,9 @@ pub fn call_memcpy(bx: &Builder,
}
pub fn memcpy_ty(
bx: &Builder<'a, 'll, 'tcx>,
dst: ValueRef,
src: ValueRef,
bx: &Builder<'_, 'll, 'tcx>,
dst: &'ll Value,
src: &'ll Value,
layout: TyLayout<'tcx>,
align: Align,
flags: MemFlags,
@ -449,13 +453,13 @@ pub fn memcpy_ty(
}
pub fn call_memset(
bx: &Builder<'a, 'll, 'tcx>,
ptr: ValueRef,
fill_byte: ValueRef,
size: ValueRef,
align: ValueRef,
bx: &Builder<'_, 'll, '_>,
ptr: &'ll Value,
fill_byte: &'ll Value,
size: &'ll Value,
align: &'ll Value,
volatile: bool,
) -> ValueRef {
) -> &'ll Value {
let ptr_width = &bx.cx.sess().target.target.target_pointer_width;
let intrinsic_key = format!("llvm.memset.p0i8.i{}", ptr_width);
let llintrinsicfn = bx.cx.get_intrinsic(&intrinsic_key);
@ -514,7 +518,7 @@ pub fn codegen_instance<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, instance: Instance<'
mir::codegen_mir(cx, lldecl, &mir, instance, sig);
}
pub fn set_link_section(llval: ValueRef, attrs: &CodegenFnAttrs) {
pub fn set_link_section(llval: &Value, attrs: &CodegenFnAttrs) {
let sect = match attrs.link_section {
Some(name) => name,
None => return,
@ -552,11 +556,13 @@ fn maybe_create_entry_wrapper(cx: &CodegenCx) {
None => {} // Do nothing.
}
fn create_entry_fn<'cx>(cx: &'cx CodegenCx,
sp: Span,
rust_main: ValueRef,
rust_main_def_id: DefId,
use_start_lang_item: bool) {
fn create_entry_fn(
cx: &CodegenCx<'ll, '_>,
sp: Span,
rust_main: &'ll Value,
rust_main_def_id: DefId,
use_start_lang_item: bool,
) {
let llfty = Type::func(&[Type::c_int(cx), Type::i8p(cx).ptr_to()], Type::c_int(cx));
let main_ret_ty = cx.tcx.fn_sig(rust_main_def_id).output();
@ -678,26 +684,24 @@ fn write_metadata<'a, 'gcx>(tcx: TyCtxt<'a, 'gcx, 'gcx>,
return metadata;
}
pub struct ValueIter {
cur: ValueRef,
step: unsafe extern "C" fn(ValueRef) -> ValueRef,
pub struct ValueIter<'ll> {
cur: Option<&'ll Value>,
step: unsafe extern "C" fn(&'ll Value) -> Option<&'ll Value>,
}
impl Iterator for ValueIter {
type Item = ValueRef;
impl Iterator for ValueIter<'ll> {
type Item = &'ll Value;
fn next(&mut self) -> Option<ValueRef> {
fn next(&mut self) -> Option<&'ll Value> {
let old = self.cur;
if !old.is_null() {
if let Some(old) = old {
self.cur = unsafe { (self.step)(old) };
Some(old)
} else {
None
}
old
}
}
pub fn iter_globals(llmod: &llvm::Module) -> ValueIter {
pub fn iter_globals(llmod: &'ll llvm::Module) -> ValueIter<'ll> {
unsafe {
ValueIter {
cur: llvm::LLVMGetFirstGlobal(llmod),

File diff suppressed because it is too large Load diff

View file

@ -18,9 +18,10 @@ use attributes;
use common::{self, CodegenCx};
use consts;
use declare;
use llvm::{self, ValueRef};
use llvm;
use monomorphize::Instance;
use type_of::LayoutLlvmExt;
use value::Value;
use rustc::hir::def_id::DefId;
use rustc::ty::{self, TypeFoldable};
@ -34,10 +35,10 @@ use rustc::ty::subst::Substs;
///
/// - `cx`: the crate context
/// - `instance`: the instance to be instantiated
pub fn get_fn<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
instance: Instance<'tcx>)
-> ValueRef
{
pub fn get_fn(
cx: &CodegenCx<'ll, 'tcx>,
instance: Instance<'tcx>,
) -> &'ll Value {
let tcx = cx.tcx;
debug!("get_fn(instance={:?})", instance);
@ -204,11 +205,11 @@ pub fn get_fn<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
llfn
}
pub fn resolve_and_get_fn<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
def_id: DefId,
substs: &'tcx Substs<'tcx>)
-> ValueRef
{
pub fn resolve_and_get_fn(
cx: &CodegenCx<'ll, 'tcx>,
def_id: DefId,
substs: &'tcx Substs<'tcx>,
) -> &'ll Value {
get_fn(
cx,
ty::Instance::resolve(

View file

@ -12,8 +12,7 @@
//! Code that is useful in various codegen modules.
use llvm;
use llvm::{ValueRef, TypeKind};
use llvm::{self, TypeKind};
use llvm::{True, False, Bool, OperandBundleDef};
use rustc::hir::def_id::DefId;
use rustc::middle::lang_items::LangItem;
@ -25,6 +24,7 @@ use declare;
use type_::Type;
use type_of::LayoutLlvmExt;
use value::Value;
use rustc::ty::{self, Ty, TyCtxt};
use rustc::ty::layout::{HasDataLayout, LayoutOf};
use rustc::hir;
@ -90,20 +90,20 @@ pub fn type_is_freeze<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bo
/// When inside of a landing pad, each function call in LLVM IR needs to be
/// annotated with which landing pad it's a part of. This is accomplished via
/// the `OperandBundleDef` value created for MSVC landing pads.
pub struct Funclet {
cleanuppad: ValueRef,
pub struct Funclet<'ll> {
cleanuppad: &'ll Value,
operand: OperandBundleDef,
}
impl Funclet {
pub fn new(cleanuppad: ValueRef) -> Funclet {
impl Funclet<'ll> {
pub fn new(cleanuppad: &'ll Value) -> Self {
Funclet {
cleanuppad,
operand: OperandBundleDef::new("funclet", &[cleanuppad]),
}
}
pub fn cleanuppad(&self) -> ValueRef {
pub fn cleanuppad(&self) -> &'ll Value {
self.cleanuppad
}
@ -112,62 +112,61 @@ impl Funclet {
}
}
// TODO: use proper lifetime in return type
pub fn val_ty(v: ValueRef) -> &'static Type {
pub fn val_ty(v: &'ll Value) -> &'ll Type {
unsafe {
llvm::LLVMTypeOf(&*v)
llvm::LLVMTypeOf(v)
}
}
// LLVM constant constructors.
pub fn C_null(t: &Type) -> ValueRef {
pub fn C_null(t: &'ll Type) -> &'ll Value {
unsafe {
llvm::LLVMConstNull(t)
}
}
pub fn C_undef(t: &Type) -> ValueRef {
pub fn C_undef(t: &'ll Type) -> &'ll Value {
unsafe {
llvm::LLVMGetUndef(t)
}
}
pub fn C_int(t: &Type, i: i64) -> ValueRef {
pub fn C_int(t: &'ll Type, i: i64) -> &'ll Value {
unsafe {
llvm::LLVMConstInt(t, i as u64, True)
}
}
pub fn C_uint(t: &Type, i: u64) -> ValueRef {
pub fn C_uint(t: &'ll Type, i: u64) -> &'ll Value {
unsafe {
llvm::LLVMConstInt(t, i, False)
}
}
pub fn C_uint_big(t: &Type, u: u128) -> ValueRef {
pub fn C_uint_big(t: &'ll Type, u: u128) -> &'ll Value {
unsafe {
let words = [u as u64, (u >> 64) as u64];
llvm::LLVMConstIntOfArbitraryPrecision(t, 2, words.as_ptr())
}
}
pub fn C_bool(cx: &CodegenCx, val: bool) -> ValueRef {
pub fn C_bool(cx: &CodegenCx<'ll, '_>, val: bool) -> &'ll Value {
C_uint(Type::i1(cx), val as u64)
}
pub fn C_i32(cx: &CodegenCx, i: i32) -> ValueRef {
pub fn C_i32(cx: &CodegenCx<'ll, '_>, i: i32) -> &'ll Value {
C_int(Type::i32(cx), i as i64)
}
pub fn C_u32(cx: &CodegenCx, i: u32) -> ValueRef {
pub fn C_u32(cx: &CodegenCx<'ll, '_>, i: u32) -> &'ll Value {
C_uint(Type::i32(cx), i as u64)
}
pub fn C_u64(cx: &CodegenCx, i: u64) -> ValueRef {
pub fn C_u64(cx: &CodegenCx<'ll, '_>, i: u64) -> &'ll Value {
C_uint(Type::i64(cx), i)
}
pub fn C_usize(cx: &CodegenCx, i: u64) -> ValueRef {
pub fn C_usize(cx: &CodegenCx<'ll, '_>, i: u64) -> &'ll Value {
let bit_size = cx.data_layout().pointer_size.bits();
if bit_size < 64 {
// make sure it doesn't overflow
@ -177,14 +176,14 @@ pub fn C_usize(cx: &CodegenCx, i: u64) -> ValueRef {
C_uint(cx.isize_ty, i)
}
pub fn C_u8(cx: &CodegenCx, i: u8) -> ValueRef {
pub fn C_u8(cx: &CodegenCx<'ll, '_>, i: u8) -> &'ll Value {
C_uint(Type::i8(cx), i as u64)
}
// This is a 'c-like' raw string, which differs from
// our boxed-and-length-annotated strings.
pub fn C_cstr(cx: &CodegenCx, s: LocalInternedString, null_terminated: bool) -> ValueRef {
pub fn C_cstr(cx: &CodegenCx<'ll, '_>, s: LocalInternedString, null_terminated: bool) -> &'ll Value {
unsafe {
if let Some(&llval) = cx.const_cstr_cache.borrow().get(&s) {
return llval;
@ -209,24 +208,24 @@ pub fn C_cstr(cx: &CodegenCx, s: LocalInternedString, null_terminated: bool) ->
// NB: Do not use `do_spill_noroot` to make this into a constant string, or
// you will be kicked off fast isel. See issue #4352 for an example of this.
pub fn C_str_slice(cx: &CodegenCx, s: LocalInternedString) -> ValueRef {
pub fn C_str_slice(cx: &CodegenCx<'ll, '_>, s: LocalInternedString) -> &'ll Value {
let len = s.len();
let cs = consts::ptrcast(C_cstr(cx, s, false),
cx.layout_of(cx.tcx.mk_str()).llvm_type(cx).ptr_to());
C_fat_ptr(cx, cs, C_usize(cx, len as u64))
}
pub fn C_fat_ptr(cx: &CodegenCx, ptr: ValueRef, meta: ValueRef) -> ValueRef {
pub fn C_fat_ptr(cx: &CodegenCx<'ll, '_>, ptr: &'ll Value, meta: &'ll Value) -> &'ll Value {
assert_eq!(abi::FAT_PTR_ADDR, 0);
assert_eq!(abi::FAT_PTR_EXTRA, 1);
C_struct(cx, &[ptr, meta], false)
}
pub fn C_struct(cx: &CodegenCx, elts: &[ValueRef], packed: bool) -> ValueRef {
pub fn C_struct(cx: &CodegenCx<'ll, '_>, elts: &[&'ll Value], packed: bool) -> &'ll Value {
C_struct_in_context(cx.llcx, elts, packed)
}
pub fn C_struct_in_context(llcx: &llvm::Context, elts: &[ValueRef], packed: bool) -> ValueRef {
pub fn C_struct_in_context(llcx: &'ll llvm::Context, elts: &[&'ll Value], packed: bool) -> &'ll Value {
unsafe {
llvm::LLVMConstStructInContext(llcx,
elts.as_ptr(), elts.len() as c_uint,
@ -234,43 +233,43 @@ pub fn C_struct_in_context(llcx: &llvm::Context, elts: &[ValueRef], packed: bool
}
}
pub fn C_array(ty: &Type, elts: &[ValueRef]) -> ValueRef {
pub fn C_array(ty: &'ll Type, elts: &[&'ll Value]) -> &'ll Value {
unsafe {
return llvm::LLVMConstArray(ty, elts.as_ptr(), elts.len() as c_uint);
}
}
pub fn C_vector(elts: &[ValueRef]) -> ValueRef {
pub fn C_vector(elts: &[&'ll Value]) -> &'ll Value {
unsafe {
return llvm::LLVMConstVector(elts.as_ptr(), elts.len() as c_uint);
}
}
pub fn C_bytes(cx: &CodegenCx, bytes: &[u8]) -> ValueRef {
pub fn C_bytes(cx: &CodegenCx<'ll, '_>, bytes: &[u8]) -> &'ll Value {
C_bytes_in_context(cx.llcx, bytes)
}
pub fn C_bytes_in_context(llcx: &llvm::Context, bytes: &[u8]) -> ValueRef {
pub fn C_bytes_in_context(llcx: &'ll llvm::Context, bytes: &[u8]) -> &'ll Value {
unsafe {
let ptr = bytes.as_ptr() as *const c_char;
return llvm::LLVMConstStringInContext(llcx, ptr, bytes.len() as c_uint, True);
}
}
pub fn const_get_elt(v: ValueRef, idx: u64) -> ValueRef {
pub fn const_get_elt(v: &'ll Value, idx: u64) -> &'ll Value {
unsafe {
assert_eq!(idx as c_uint as u64, idx);
let us = &[idx as c_uint];
let r = llvm::LLVMConstExtractValue(v, us.as_ptr(), us.len() as c_uint);
debug!("const_get_elt(v={:?}, idx={}, r={:?})",
Value(v), idx, Value(r));
v, idx, r);
r
}
}
pub fn const_get_real(v: ValueRef) -> Option<(f64, bool)> {
pub fn const_get_real(v: &'ll Value) -> Option<(f64, bool)> {
unsafe {
if is_const_real(v) {
let mut loses_info: llvm::Bool = ::std::mem::uninitialized();
@ -283,21 +282,21 @@ pub fn const_get_real(v: ValueRef) -> Option<(f64, bool)> {
}
}
pub fn const_to_uint(v: ValueRef) -> u64 {
pub fn const_to_uint(v: &'ll Value) -> u64 {
unsafe {
llvm::LLVMConstIntGetZExtValue(v)
}
}
pub fn is_const_integral(v: ValueRef) -> bool {
pub fn is_const_integral(v: &'ll Value) -> bool {
unsafe {
!llvm::LLVMIsAConstantInt(v).is_null()
llvm::LLVMIsAConstantInt(v).is_some()
}
}
pub fn is_const_real(v: ValueRef) -> bool {
pub fn is_const_real(v: &'ll Value) -> bool {
unsafe {
!llvm::LLVMIsAConstantFP(v).is_null()
llvm::LLVMIsAConstantFP(v).is_some()
}
}
@ -307,7 +306,7 @@ fn hi_lo_to_u128(lo: u64, hi: u64) -> u128 {
((hi as u128) << 64) | (lo as u128)
}
pub fn const_to_opt_u128(v: ValueRef, sign_ext: bool) -> Option<u128> {
pub fn const_to_opt_u128(v: &'ll Value, sign_ext: bool) -> Option<u128> {
unsafe {
if is_const_integral(v) {
let (mut lo, mut hi) = (0u64, 0u64);
@ -348,9 +347,9 @@ pub fn langcall(tcx: TyCtxt,
pub fn build_unchecked_lshift(
bx: &Builder<'a, 'll, 'tcx>,
lhs: ValueRef,
rhs: ValueRef
) -> ValueRef {
lhs: &'ll Value,
rhs: &'ll Value
) -> &'ll Value {
let rhs = base::cast_shift_expr_rhs(bx, hir::BinOpKind::Shl, lhs, rhs);
// #1877, #10183: Ensure that input is always valid
let rhs = shift_mask_rhs(bx, rhs);
@ -358,8 +357,8 @@ pub fn build_unchecked_lshift(
}
pub fn build_unchecked_rshift(
bx: &Builder<'a, 'll, 'tcx>, lhs_t: Ty<'tcx>, lhs: ValueRef, rhs: ValueRef
) -> ValueRef {
bx: &Builder<'a, 'll, 'tcx>, lhs_t: Ty<'tcx>, lhs: &'ll Value, rhs: &'ll Value
) -> &'ll Value {
let rhs = base::cast_shift_expr_rhs(bx, hir::BinOpKind::Shr, lhs, rhs);
// #1877, #10183: Ensure that input is always valid
let rhs = shift_mask_rhs(bx, rhs);
@ -371,7 +370,7 @@ pub fn build_unchecked_rshift(
}
}
fn shift_mask_rhs(bx: &Builder<'a, 'll, 'tcx>, rhs: ValueRef) -> ValueRef {
fn shift_mask_rhs(bx: &Builder<'a, 'll, 'tcx>, rhs: &'ll Value) -> &'ll Value {
let rhs_llty = val_ty(rhs);
bx.and(rhs, shift_mask_val(bx, rhs_llty, rhs_llty, false))
}
@ -381,7 +380,7 @@ pub fn shift_mask_val(
llty: &'ll Type,
mask_llty: &'ll Type,
invert: bool
) -> ValueRef {
) -> &'ll Value {
let kind = llty.kind();
match kind {
TypeKind::Integer => {

View file

@ -9,9 +9,7 @@
// except according to those terms.
use libc::c_uint;
use llvm;
use llvm::{SetUnnamedAddr};
use llvm::{ValueRef, True};
use llvm::{self, SetUnnamedAddr, True};
use rustc::hir::def_id::DefId;
use rustc::hir::map as hir_map;
use debuginfo;
@ -24,27 +22,29 @@ use syntax_pos::Span;
use syntax_pos::symbol::LocalInternedString;
use type_::Type;
use type_of::LayoutLlvmExt;
use value::Value;
use rustc::ty::{self, Ty};
use rustc::ty::layout::{Align, LayoutOf};
use rustc::hir::{self, CodegenFnAttrs, CodegenFnAttrFlags};
use std::ffi::{CStr, CString};
pub fn ptrcast(val: ValueRef, ty: &Type) -> ValueRef {
pub fn ptrcast(val: &'ll Value, ty: &'ll Type) -> &'ll Value {
unsafe {
llvm::LLVMConstPointerCast(val, ty)
}
}
pub fn bitcast(val: ValueRef, ty: &Type) -> ValueRef {
pub fn bitcast(val: &'ll Value, ty: &'ll Type) -> &'ll Value {
unsafe {
llvm::LLVMConstBitCast(val, ty)
}
}
fn set_global_alignment(cx: &CodegenCx,
gv: ValueRef,
fn set_global_alignment(cx: &CodegenCx<'ll, '_>,
gv: &'ll Value,
mut align: Align) {
// The target may require greater alignment for globals than the type does.
// Note: GCC and Clang also allow `__attribute__((aligned))` on variables,
@ -62,11 +62,12 @@ fn set_global_alignment(cx: &CodegenCx,
}
}
pub fn addr_of_mut(cx: &CodegenCx,
cv: ValueRef,
align: Align,
kind: &str)
-> ValueRef {
pub fn addr_of_mut(
cx: &CodegenCx<'ll, '_>,
cv: &'ll Value,
align: Align,
kind: &str,
) -> &'ll Value {
unsafe {
let name = cx.generate_local_symbol_name(kind);
let gv = declare::define_global(cx, &name[..], val_ty(cv)).unwrap_or_else(||{
@ -80,11 +81,12 @@ pub fn addr_of_mut(cx: &CodegenCx,
}
}
pub fn addr_of(cx: &CodegenCx,
cv: ValueRef,
align: Align,
kind: &str)
-> ValueRef {
pub fn addr_of(
cx: &CodegenCx<'ll, '_>,
cv: &'ll Value,
align: Align,
kind: &str,
) -> &'ll Value {
if let Some(&gv) = cx.const_globals.borrow().get(&cv) {
unsafe {
// Upgrade the alignment in cases where the same constant is used with different
@ -104,7 +106,7 @@ pub fn addr_of(cx: &CodegenCx,
gv
}
pub fn get_static(cx: &CodegenCx, def_id: DefId) -> ValueRef {
pub fn get_static(cx: &CodegenCx<'ll, '_>, def_id: DefId) -> &'ll Value {
let instance = Instance::mono(cx.tcx, def_id);
if let Some(&g) = cx.instances.borrow().get(&instance) {
return g;
@ -213,13 +215,13 @@ pub fn get_static(cx: &CodegenCx, def_id: DefId) -> ValueRef {
g
}
fn check_and_apply_linkage<'tcx>(
cx: &CodegenCx<'_, 'tcx>,
fn check_and_apply_linkage(
cx: &CodegenCx<'ll, 'tcx>,
attrs: &CodegenFnAttrs,
ty: Ty<'tcx>,
sym: LocalInternedString,
span: Option<Span>
) -> ValueRef {
) -> &'ll Value {
let llty = cx.layout_of(ty).llvm_type(cx);
if let Some(linkage) = attrs.linkage {
debug!("get_static: sym={} linkage={:?}", sym, linkage);

View file

@ -10,7 +10,6 @@
use common;
use llvm;
use llvm::ValueRef;
use rustc::dep_graph::DepGraphSafe;
use rustc::hir;
use rustc::hir::def_id::DefId;
@ -19,6 +18,7 @@ use callee;
use base;
use declare;
use monomorphize::Instance;
use value::Value;
use monomorphize::partitioning::CodegenUnit;
use type_::Type;
@ -56,38 +56,38 @@ pub struct CodegenCx<'a, 'tcx: 'a> {
pub codegen_unit: Arc<CodegenUnit<'tcx>>,
/// Cache instances of monomorphic and polymorphic items
pub instances: RefCell<FxHashMap<Instance<'tcx>, ValueRef>>,
pub instances: RefCell<FxHashMap<Instance<'tcx>, &'a Value>>,
/// Cache generated vtables
pub vtables: RefCell<FxHashMap<(Ty<'tcx>,
Option<ty::PolyExistentialTraitRef<'tcx>>), ValueRef>>,
Option<ty::PolyExistentialTraitRef<'tcx>>), &'a Value>>,
/// Cache of constant strings,
pub const_cstr_cache: RefCell<FxHashMap<LocalInternedString, ValueRef>>,
pub const_cstr_cache: RefCell<FxHashMap<LocalInternedString, &'a Value>>,
/// Reverse-direction for const ptrs cast from globals.
/// Key is a ValueRef holding a *T,
/// Val is a ValueRef holding a *[T].
/// Key is a Value holding a *T,
/// Val is a Value holding a *[T].
///
/// Needed because LLVM loses pointer->pointee association
/// when we ptrcast, and we have to ptrcast during codegen
/// of a [T] const because we form a slice, a (*T,usize) pair, not
/// a pointer to an LLVM array type. Similar for trait objects.
pub const_unsized: RefCell<FxHashMap<ValueRef, ValueRef>>,
pub const_unsized: RefCell<FxHashMap<&'a Value, &'a Value>>,
/// Cache of emitted const globals (value -> global)
pub const_globals: RefCell<FxHashMap<ValueRef, ValueRef>>,
pub const_globals: RefCell<FxHashMap<&'a Value, &'a Value>>,
/// Mapping from static definitions to their DefId's.
pub statics: RefCell<FxHashMap<ValueRef, DefId>>,
pub statics: RefCell<FxHashMap<&'a Value, DefId>>,
/// List of globals for static variables which need to be passed to the
/// LLVM function ReplaceAllUsesWith (RAUW) when codegen is complete.
/// (We have to make sure we don't invalidate any ValueRefs referring
/// (We have to make sure we don't invalidate any Values referring
/// to constants.)
pub statics_to_rauw: RefCell<Vec<(ValueRef, ValueRef)>>,
pub statics_to_rauw: RefCell<Vec<(&'a Value, &'a Value)>>,
/// Statics that will be placed in the llvm.used variable
/// See http://llvm.org/docs/LangRef.html#the-llvm-used-global-variable for details
pub used_statics: RefCell<Vec<ValueRef>>,
pub used_statics: RefCell<Vec<&'a Value>>,
pub lltypes: RefCell<FxHashMap<(Ty<'tcx>, Option<usize>), &'a Type>>,
pub scalar_lltypes: RefCell<FxHashMap<Ty<'tcx>, &'a Type>>,
@ -96,11 +96,11 @@ pub struct CodegenCx<'a, 'tcx: 'a> {
pub dbg_cx: Option<debuginfo::CrateDebugContext<'a, 'tcx>>,
eh_personality: Cell<Option<ValueRef>>,
eh_unwind_resume: Cell<Option<ValueRef>>,
pub rust_try_fn: Cell<Option<ValueRef>>,
eh_personality: Cell<Option<&'a Value>>,
eh_unwind_resume: Cell<Option<&'a Value>>,
pub rust_try_fn: Cell<Option<&'a Value>>,
intrinsics: RefCell<FxHashMap<&'static str, ValueRef>>,
intrinsics: RefCell<FxHashMap<&'static str, &'a Value>>,
/// A counter that is used for generating local symbol names
local_gen_sym_counter: Cell<usize>,
@ -314,7 +314,7 @@ impl<'b, 'tcx> CodegenCx<'b, 'tcx> {
&self.tcx.sess
}
pub fn get_intrinsic(&self, key: &str) -> ValueRef {
pub fn get_intrinsic(&self, key: &str) -> &'b Value {
if let Some(v) = self.intrinsics.borrow().get(key).cloned() {
return v;
}
@ -338,7 +338,7 @@ impl<'b, 'tcx> CodegenCx<'b, 'tcx> {
name
}
pub fn eh_personality(&self) -> ValueRef {
pub fn eh_personality(&self) -> &'b Value {
// The exception handling personality function.
//
// If our compilation unit has the `eh_personality` lang item somewhere
@ -381,9 +381,9 @@ impl<'b, 'tcx> CodegenCx<'b, 'tcx> {
llfn
}
// Returns a ValueRef of the "eh_unwind_resume" lang item if one is defined,
// Returns a Value of the "eh_unwind_resume" lang item if one is defined,
// otherwise declares it as an external function.
pub fn eh_unwind_resume(&self) -> ValueRef {
pub fn eh_unwind_resume(&self) -> &'b Value {
use attributes;
let unwresume = &self.eh_unwind_resume;
if let Some(llfn) = unwresume.get() {
@ -471,7 +471,7 @@ impl LayoutOf for &'a CodegenCx<'ll, 'tcx> {
}
/// Declare any llvm intrinsics that you might need
fn declare_intrinsic(cx: &CodegenCx, key: &str) -> Option<ValueRef> {
fn declare_intrinsic(cx: &CodegenCx<'ll, '_>, key: &str) -> Option<&'ll Value> {
macro_rules! ifn {
($name:expr, fn() -> $ret:expr) => (
if key == $name {

View file

@ -15,8 +15,9 @@ use llvm;
use common::{C_bytes, CodegenCx, C_i32};
use builder::Builder;
use declare;
use type_::Type;
use rustc::session::config::NoDebugInfo;
use type_::Type;
use value::Value;
use syntax::attr;
@ -39,8 +40,8 @@ pub fn insert_reference_to_gdb_debug_scripts_section_global(bx: &Builder) {
/// Allocates the global variable responsible for the .debug_gdb_scripts binary
/// section.
pub fn get_or_insert_gdb_debug_scripts_section_global(cx: &CodegenCx)
-> llvm::ValueRef {
pub fn get_or_insert_gdb_debug_scripts_section_global(cx: &CodegenCx<'ll, '_>)
-> &'ll Value {
let c_section_var_name = "__rustc_debug_gdb_scripts_section__\0";
let section_var_name = &c_section_var_name[..c_section_var_name.len()-1];
@ -49,7 +50,7 @@ pub fn get_or_insert_gdb_debug_scripts_section_global(cx: &CodegenCx)
c_section_var_name.as_ptr() as *const _)
};
if section_var.is_null() {
section_var.unwrap_or_else(|| {
let section_name = b".debug_gdb_scripts\0";
let section_contents = b"\x01gdb_load_rust_pretty_printers.py\0";
@ -71,9 +72,7 @@ pub fn get_or_insert_gdb_debug_scripts_section_global(cx: &CodegenCx)
llvm::LLVMSetAlignment(section_var, 1);
section_var
}
} else {
section_var
}
})
}
pub fn needs_gdb_debug_scripts_section(cx: &CodegenCx) -> bool {

View file

@ -18,8 +18,9 @@ use super::namespace::mangled_name_of_instance;
use super::type_names::compute_debuginfo_type_name;
use super::{CrateDebugContext};
use abi;
use value::Value;
use llvm::{self, ValueRef};
use llvm;
use llvm::debuginfo::{DIType, DIFile, DIScope, DIDescriptor,
DICompositeType, DILexicalBlock, DIFlags};
@ -890,7 +891,7 @@ pub fn compile_unit_metadata(tcx: TyCtxt,
return unit_metadata;
};
fn path_to_mdstring(llcx: &llvm::Context, path: &Path) -> llvm::ValueRef {
fn path_to_mdstring(llcx: &'ll llvm::Context, path: &Path) -> &'ll Value {
let path_str = path2cstr(path);
unsafe {
llvm::LLVMMDStringInContext(llcx,
@ -1679,9 +1680,11 @@ fn create_union_stub(
/// Creates debug information for the given global variable.
///
/// Adds the created metadata nodes directly to the crate's IR.
pub fn create_global_var_metadata(cx: &CodegenCx,
def_id: DefId,
global: ValueRef) {
pub fn create_global_var_metadata(
cx: &CodegenCx<'ll, '_>,
def_id: DefId,
global: &'ll Value,
) {
if cx.dbg_cx.is_none() {
return;
}
@ -1759,9 +1762,11 @@ pub fn extend_scope_to_file(
/// given type.
///
/// Adds the created metadata nodes directly to the crate's IR.
pub fn create_vtable_metadata<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
ty: ty::Ty<'tcx>,
vtable: ValueRef) {
pub fn create_vtable_metadata(
cx: &CodegenCx<'ll, 'tcx>,
ty: ty::Ty<'tcx>,
vtable: &'ll Value,
) {
if cx.dbg_cx.is_none() {
return;
}

View file

@ -21,7 +21,6 @@ use self::metadata::{type_metadata, file_metadata, TypeMap};
use self::source_loc::InternalDebugLocation::{self, UnknownLocation};
use llvm;
use llvm::ValueRef;
use llvm::debuginfo::{DIFile, DIType, DIScope, DIBuilder, DISubprogram, DIArray, DIFlags};
use rustc::hir::CodegenFnAttrFlags;
use rustc::hir::def_id::{DefId, CrateNum};
@ -35,6 +34,7 @@ use rustc::ty::{self, ParamEnv, Ty, InstanceDef};
use rustc::mir;
use rustc::session::config::{self, FullDebugInfo, LimitedDebugInfo, NoDebugInfo};
use rustc::util::nodemap::{DefIdMap, FxHashMap, FxHashSet};
use value::Value;
use libc::c_uint;
use std::cell::{Cell, RefCell};
@ -135,12 +135,12 @@ pub struct FunctionDebugContextData<'ll> {
pub defining_crate: CrateNum,
}
pub enum VariableAccess<'a> {
pub enum VariableAccess<'a, 'll> {
// The llptr given is an alloca containing the variable's value
DirectVariable { alloca: ValueRef },
DirectVariable { alloca: &'ll Value },
// The llptr given is an alloca containing the start of some pointer chain
// leading to the variable's content.
IndirectVariable { alloca: ValueRef, address_operations: &'a [i64] }
IndirectVariable { alloca: &'ll Value, address_operations: &'a [i64] }
}
pub enum VariableKind {
@ -204,7 +204,7 @@ pub fn create_function_debug_context(
cx: &CodegenCx<'ll, 'tcx>,
instance: Instance<'tcx>,
sig: ty::FnSig<'tcx>,
llfn: ValueRef,
llfn: &'ll Value,
mir: &mir::Mir,
) -> FunctionDebugContext<'ll> {
if cx.sess().opts.debuginfo == NoDebugInfo {
@ -482,7 +482,7 @@ pub fn declare_local(
variable_name: ast::Name,
variable_type: Ty<'tcx>,
scope_metadata: &'ll DIScope,
variable_access: VariableAccess,
variable_access: VariableAccess<'_, 'll>,
variable_kind: VariableKind,
span: Span,
) {

View file

@ -19,7 +19,6 @@ use llvm::debuginfo::DIScope;
use builder::Builder;
use libc::c_uint;
use std::ptr::NonNull;
use syntax_pos::{Span, Pos};
/// Sets the current debug location at the beginning of the span.
@ -96,7 +95,7 @@ pub fn set_debug_location(bx: &Builder<'_, 'll, '_>, debug_location: InternalDeb
debug!("setting debug location to {} {}", line, col);
unsafe {
NonNull::new(llvm::LLVMRustDIBuilderCreateDebugLocation(
Some(llvm::LLVMRustDIBuilderCreateDebugLocation(
debug_context(bx.cx).llcontext,
line as c_uint,
col_used,

View file

@ -16,11 +16,11 @@
//! Some useful guidelines:
//!
//! * Use declare_* family of methods if you are declaring, but are not
//! interested in defining the ValueRef they return.
//! * Use define_* family of methods when you might be defining the ValueRef.
//! interested in defining the Value they return.
//! * Use define_* family of methods when you might be defining the Value.
//! * When in doubt, define.
use llvm::{self, ValueRef};
use llvm;
use llvm::AttributePlace::Function;
use rustc::ty::{self, Ty};
use rustc::ty::layout::{self, LayoutOf};
@ -39,8 +39,8 @@ use std::ffi::CString;
/// Declare a global value.
///
/// If theres a value with the same name already declared, the function will
/// return its ValueRef instead.
pub fn declare_global(cx: &CodegenCx, name: &str, ty: &Type) -> llvm::ValueRef {
/// return its Value instead.
pub fn declare_global(cx: &CodegenCx<'ll, '_>, name: &str, ty: &'ll Type) -> &'ll Value {
debug!("declare_global(name={:?})", name);
let namebuf = CString::new(name).unwrap_or_else(|_|{
bug!("name {:?} contains an interior null byte", name)
@ -54,8 +54,8 @@ pub fn declare_global(cx: &CodegenCx, name: &str, ty: &Type) -> llvm::ValueRef {
/// Declare a function.
///
/// If theres a value with the same name already declared, the function will
/// update the declaration and return existing ValueRef instead.
fn declare_raw_fn(cx: &CodegenCx, name: &str, callconv: llvm::CallConv, ty: &Type) -> ValueRef {
/// update the declaration and return existing Value instead.
fn declare_raw_fn(cx: &CodegenCx<'ll, '_>, name: &str, callconv: llvm::CallConv, ty: &'ll Type) -> &'ll Value {
debug!("declare_raw_fn(name={:?}, ty={:?})", name, ty);
let namebuf = CString::new(name).unwrap_or_else(|_|{
bug!("name {:?} contains an interior null byte", name)
@ -114,8 +114,8 @@ fn declare_raw_fn(cx: &CodegenCx, name: &str, callconv: llvm::CallConv, ty: &Typ
/// `declare_fn` instead.
///
/// If theres a value with the same name already declared, the function will
/// update the declaration and return existing ValueRef instead.
pub fn declare_cfn(cx: &CodegenCx, name: &str, fn_type: &Type) -> ValueRef {
/// update the declaration and return existing Value instead.
pub fn declare_cfn(cx: &CodegenCx<'ll, '_>, name: &str, fn_type: &'ll Type) -> &'ll Value {
declare_raw_fn(cx, name, llvm::CCallConv, fn_type)
}
@ -123,9 +123,12 @@ pub fn declare_cfn(cx: &CodegenCx, name: &str, fn_type: &Type) -> ValueRef {
/// Declare a Rust function.
///
/// If theres a value with the same name already declared, the function will
/// update the declaration and return existing ValueRef instead.
pub fn declare_fn<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, name: &str,
fn_type: Ty<'tcx>) -> ValueRef {
/// update the declaration and return existing Value instead.
pub fn declare_fn(
cx: &CodegenCx<'ll, 'tcx>,
name: &str,
fn_type: Ty<'tcx>,
) -> &'ll Value {
debug!("declare_rust_fn(name={:?}, fn_type={:?})", name, fn_type);
let sig = common::ty_fn_sig(cx, fn_type);
let sig = cx.tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig);
@ -154,7 +157,7 @@ pub fn declare_fn<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, name: &str,
/// return None if the name already has a definition associated with it. In that
/// case an error should be reported to the user, because it usually happens due
/// to users fault (e.g. misuse of #[no_mangle] or #[export_name] attributes).
pub fn define_global(cx: &CodegenCx, name: &str, ty: &Type) -> Option<ValueRef> {
pub fn define_global(cx: &CodegenCx<'ll, '_>, name: &str, ty: &'ll Type) -> Option<&'ll Value> {
if get_defined_value(cx, name).is_some() {
None
} else {
@ -167,9 +170,11 @@ pub fn define_global(cx: &CodegenCx, name: &str, ty: &Type) -> Option<ValueRef>
/// Use this function when you intend to define a function. This function will
/// return panic if the name already has a definition associated with it. This
/// can happen with #[no_mangle] or #[export_name], for example.
pub fn define_fn<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
name: &str,
fn_type: Ty<'tcx>) -> ValueRef {
pub fn define_fn(
cx: &CodegenCx<'ll, 'tcx>,
name: &str,
fn_type: Ty<'tcx>,
) -> &'ll Value {
if get_defined_value(cx, name).is_some() {
cx.sess().fatal(&format!("symbol `{}` already defined", name))
} else {
@ -182,9 +187,11 @@ pub fn define_fn<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
/// Use this function when you intend to define a function. This function will
/// return panic if the name already has a definition associated with it. This
/// can happen with #[no_mangle] or #[export_name], for example.
pub fn define_internal_fn<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
name: &str,
fn_type: Ty<'tcx>) -> ValueRef {
pub fn define_internal_fn(
cx: &CodegenCx<'ll, 'tcx>,
name: &str,
fn_type: Ty<'tcx>,
) -> &'ll Value {
let llfn = define_fn(cx, name, fn_type);
unsafe { llvm::LLVMRustSetLinkage(llfn, llvm::Linkage::InternalLinkage) };
llfn
@ -192,24 +199,17 @@ pub fn define_internal_fn<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
/// Get declared value by name.
pub fn get_declared_value(cx: &CodegenCx, name: &str) -> Option<ValueRef> {
pub fn get_declared_value(cx: &CodegenCx<'ll, '_>, name: &str) -> Option<&'ll Value> {
debug!("get_declared_value(name={:?})", name);
let namebuf = CString::new(name).unwrap_or_else(|_|{
bug!("name {:?} contains an interior null byte", name)
});
let val = unsafe { llvm::LLVMRustGetNamedValue(cx.llmod, namebuf.as_ptr()) };
if val.is_null() {
debug!("get_declared_value: {:?} value is null", name);
None
} else {
debug!("get_declared_value: {:?} => {:?}", name, Value(val));
Some(val)
}
unsafe { llvm::LLVMRustGetNamedValue(cx.llmod, namebuf.as_ptr()) }
}
/// Get defined or externally defined (AvailableExternally linkage) value by
/// name.
pub fn get_defined_value(cx: &CodegenCx, name: &str) -> Option<ValueRef> {
pub fn get_defined_value(cx: &CodegenCx<'ll, '_>, name: &str) -> Option<&'ll Value> {
get_declared_value(cx, name).and_then(|val|{
let declaration = unsafe {
llvm::LLVMIsDeclaration(val) != 0

View file

@ -16,37 +16,36 @@ use std;
use builder::Builder;
use common::*;
use llvm::{ValueRef};
use llvm;
use meth;
use rustc::ty::layout::LayoutOf;
use rustc::ty::{self, Ty};
use value::Value;
pub fn size_and_align_of_dst(bx: &Builder<'a, 'll, 'tcx>, t: Ty<'tcx>, info: ValueRef)
-> (ValueRef, ValueRef) {
pub fn size_and_align_of_dst(bx: &Builder<'_, 'll, 'tcx>, t: Ty<'tcx>, info: Option<&'ll Value>)
-> (&'ll Value, &'ll Value) {
debug!("calculate size of DST: {}; with lost info: {:?}",
t, Value(info));
t, info);
if bx.cx.type_is_sized(t) {
let (size, align) = bx.cx.size_and_align_of(t);
debug!("size_and_align_of_dst t={} info={:?} size: {:?} align: {:?}",
t, Value(info), size, align);
t, info, size, align);
let size = C_usize(bx.cx, size.bytes());
let align = C_usize(bx.cx, align.abi());
return (size, align);
}
assert!(!info.is_null());
match t.sty {
ty::TyDynamic(..) => {
// load size/align from vtable
(meth::SIZE.get_usize(bx, info), meth::ALIGN.get_usize(bx, info))
let vtable = info.unwrap();
(meth::SIZE.get_usize(bx, vtable), meth::ALIGN.get_usize(bx, vtable))
}
ty::TySlice(_) | ty::TyStr => {
let unit = t.sequence_element_type(bx.tcx());
// The info in this case is the length of the str, so the size is that
// times the unit size.
let (size, align) = bx.cx.size_and_align_of(unit);
(bx.mul(info, C_usize(bx.cx, size.bytes())),
(bx.mul(info.unwrap(), C_usize(bx.cx, size.bytes())),
C_usize(bx.cx, align.abi()))
}
_ => {

View file

@ -11,8 +11,7 @@
#![allow(non_upper_case_globals)]
use intrinsics::{self, Intrinsic};
use llvm;
use llvm::{TypeKind, ValueRef};
use llvm::{self, TypeKind};
use abi::{Abi, FnType, LlvmType, PassMode};
use mir::place::PlaceRef;
use mir::operand::{OperandRef, OperandValue};
@ -28,6 +27,7 @@ use rustc::hir;
use syntax::ast;
use syntax::symbol::Symbol;
use builder::Builder;
use value::Value;
use rustc::session::Session;
use syntax_pos::Span;
@ -35,7 +35,7 @@ use syntax_pos::Span;
use std::cmp::Ordering;
use std::iter;
fn get_simple_intrinsic(cx: &CodegenCx, name: &str) -> Option<ValueRef> {
fn get_simple_intrinsic(cx: &CodegenCx<'ll, '_>, name: &str) -> Option<&'ll Value> {
let llvm_name = match name {
"sqrtf32" => "llvm.sqrt.f32",
"sqrtf64" => "llvm.sqrt.f64",
@ -89,8 +89,8 @@ pub fn codegen_intrinsic_call(
bx: &Builder<'a, 'll, 'tcx>,
callee_ty: Ty<'tcx>,
fn_ty: &FnType<'tcx, Ty<'tcx>>,
args: &[OperandRef<'tcx>],
llresult: ValueRef,
args: &[OperandRef<'ll, 'tcx>],
llresult: &'ll Value,
span: Span,
) {
let cx = bx.cx;
@ -148,7 +148,7 @@ pub fn codegen_intrinsic_call(
let tp_ty = substs.type_at(0);
if let OperandValue::Pair(_, meta) = args[0].val {
let (llsize, _) =
glue::size_and_align_of_dst(bx, tp_ty, meta);
glue::size_and_align_of_dst(bx, tp_ty, Some(meta));
llsize
} else {
C_usize(cx, cx.size_of(tp_ty).bytes())
@ -162,7 +162,7 @@ pub fn codegen_intrinsic_call(
let tp_ty = substs.type_at(0);
if let OperandValue::Pair(_, meta) = args[0].val {
let (_, llalign) =
glue::size_and_align_of_dst(bx, tp_ty, meta);
glue::size_and_align_of_dst(bx, tp_ty, Some(meta));
llalign
} else {
C_usize(cx, cx.align_of(tp_ty).abi())
@ -592,9 +592,8 @@ pub fn codegen_intrinsic_call(
fn modify_as_needed(
bx: &Builder<'a, 'll, 'tcx>,
t: &intrinsics::Type,
arg: &OperandRef<'tcx>,
) -> Vec<ValueRef>
{
arg: &OperandRef<'ll, 'tcx>,
) -> Vec<&'ll Value> {
match *t {
intrinsics::Type::Aggregate(true, ref contents) => {
// We found a tuple that needs squishing! So
@ -685,10 +684,10 @@ fn copy_intrinsic(
allow_overlap: bool,
volatile: bool,
ty: Ty<'tcx>,
dst: ValueRef,
src: ValueRef,
count: ValueRef,
) -> ValueRef {
dst: &'ll Value,
src: &'ll Value,
count: &'ll Value,
) -> &'ll Value {
let cx = bx.cx;
let (size, align) = cx.size_and_align_of(ty);
let size = C_usize(cx, size.bytes());
@ -720,10 +719,10 @@ fn memset_intrinsic(
bx: &Builder<'a, 'll, 'tcx>,
volatile: bool,
ty: Ty<'tcx>,
dst: ValueRef,
val: ValueRef,
count: ValueRef
) -> ValueRef {
dst: &'ll Value,
val: &'ll Value,
count: &'ll Value
) -> &'ll Value {
let cx = bx.cx;
let (size, align) = cx.size_and_align_of(ty);
let size = C_usize(cx, size.bytes());
@ -734,11 +733,11 @@ fn memset_intrinsic(
fn try_intrinsic(
bx: &Builder<'a, 'll, 'tcx>,
cx: &CodegenCx,
func: ValueRef,
data: ValueRef,
local_ptr: ValueRef,
dest: ValueRef,
cx: &CodegenCx<'ll, 'tcx>,
func: &'ll Value,
data: &'ll Value,
local_ptr: &'ll Value,
dest: &'ll Value,
) {
if bx.sess().no_landing_pads() {
bx.call(func, &[data], None);
@ -760,11 +759,11 @@ fn try_intrinsic(
// as the old ones are still more optimized.
fn codegen_msvc_try(
bx: &Builder<'a, 'll, 'tcx>,
cx: &CodegenCx,
func: ValueRef,
data: ValueRef,
local_ptr: ValueRef,
dest: ValueRef,
cx: &CodegenCx<'ll, 'tcx>,
func: &'ll Value,
data: &'ll Value,
local_ptr: &'ll Value,
dest: &'ll Value,
) {
let llfn = get_rust_try_fn(cx, &mut |bx| {
let cx = bx.cx;
@ -870,11 +869,11 @@ fn codegen_msvc_try(
// the right personality function.
fn codegen_gnu_try(
bx: &Builder<'a, 'll, 'tcx>,
cx: &CodegenCx,
func: ValueRef,
data: ValueRef,
local_ptr: ValueRef,
dest: ValueRef,
cx: &CodegenCx<'ll, 'tcx>,
func: &'ll Value,
data: &'ll Value,
local_ptr: &'ll Value,
dest: &'ll Value,
) {
let llfn = get_rust_try_fn(cx, &mut |bx| {
let cx = bx.cx;
@ -936,7 +935,7 @@ fn gen_fn<'ll, 'tcx>(
inputs: Vec<Ty<'tcx>>,
output: Ty<'tcx>,
codegen: &mut dyn FnMut(Builder<'_, 'll, 'tcx>),
) -> ValueRef {
) -> &'ll Value {
let rust_fn_ty = cx.tcx.mk_fn_ptr(ty::Binder::bind(cx.tcx.mk_fn_sig(
inputs.into_iter(),
output,
@ -957,7 +956,7 @@ fn gen_fn<'ll, 'tcx>(
fn get_rust_try_fn<'ll, 'tcx>(
cx: &CodegenCx<'ll, 'tcx>,
codegen: &mut dyn FnMut(Builder<'_, 'll, 'tcx>),
) -> ValueRef {
) -> &'ll Value {
if let Some(llfn) = cx.rust_try_fn.get() {
return llfn;
}
@ -986,11 +985,11 @@ fn generic_simd_intrinsic(
bx: &Builder<'a, 'll, 'tcx>,
name: &str,
callee_ty: Ty<'tcx>,
args: &[OperandRef<'tcx>],
args: &[OperandRef<'ll, 'tcx>],
ret_ty: Ty<'tcx>,
llret_ty: &'ll Type,
span: Span
) -> Result<ValueRef, ()> {
) -> Result<&'ll Value, ()> {
// macros for error handling:
macro_rules! emit_error {
($msg: tt) => {
@ -1167,8 +1166,8 @@ fn generic_simd_intrinsic(
in_len: usize,
bx: &Builder<'a, 'll, 'tcx>,
span: Span,
args: &[OperandRef<'tcx>],
) -> Result<ValueRef, ()> {
args: &[OperandRef<'ll, 'tcx>],
) -> Result<&'ll Value, ()> {
macro_rules! emit_error {
($msg: tt) => {
emit_error!($msg, )

View file

@ -14,8 +14,9 @@ pub use self::OptimizationDiagnosticKind::*;
pub use self::Diagnostic::*;
use libc::c_uint;
use value::Value;
use super::{DiagnosticInfoRef, TwineRef, ValueRef};
use super::{DiagnosticInfoRef, TwineRef};
#[derive(Copy, Clone)]
pub enum OptimizationDiagnosticKind {
@ -41,21 +42,22 @@ impl OptimizationDiagnosticKind {
}
}
pub struct OptimizationDiagnostic {
pub struct OptimizationDiagnostic<'ll> {
pub kind: OptimizationDiagnosticKind,
pub pass_name: String,
pub function: ValueRef,
pub function: &'ll Value,
pub line: c_uint,
pub column: c_uint,
pub filename: String,
pub message: String,
}
impl OptimizationDiagnostic {
unsafe fn unpack(kind: OptimizationDiagnosticKind,
di: DiagnosticInfoRef)
-> OptimizationDiagnostic {
let mut function = 0 as *mut _;
impl OptimizationDiagnostic<'ll> {
unsafe fn unpack(
kind: OptimizationDiagnosticKind,
di: DiagnosticInfoRef,
) -> Self {
let mut function = None;
let mut line = 0;
let mut column = 0;
@ -83,7 +85,7 @@ impl OptimizationDiagnostic {
OptimizationDiagnostic {
kind,
pass_name: pass_name.expect("got a non-UTF8 pass name from LLVM"),
function,
function: function.unwrap(),
line,
column,
filename,
@ -93,41 +95,44 @@ impl OptimizationDiagnostic {
}
#[derive(Copy, Clone)]
pub struct InlineAsmDiagnostic {
pub struct InlineAsmDiagnostic<'ll> {
pub cookie: c_uint,
pub message: TwineRef,
pub instruction: ValueRef,
pub instruction: &'ll Value,
}
impl InlineAsmDiagnostic {
unsafe fn unpack(di: DiagnosticInfoRef) -> InlineAsmDiagnostic {
impl InlineAsmDiagnostic<'ll> {
unsafe fn unpack(di: DiagnosticInfoRef) -> Self {
let mut cookie = 0;
let mut message = 0 as *mut _;
let mut instruction = None;
let mut opt = InlineAsmDiagnostic {
cookie: 0,
message: 0 as *mut _,
instruction: 0 as *mut _,
};
super::LLVMRustUnpackInlineAsmDiagnostic(
di,
&mut cookie,
&mut message,
&mut instruction,
);
super::LLVMRustUnpackInlineAsmDiagnostic(di,
&mut opt.cookie,
&mut opt.message,
&mut opt.instruction);
opt
InlineAsmDiagnostic {
cookie,
message,
instruction: instruction.unwrap(),
}
}
}
pub enum Diagnostic {
Optimization(OptimizationDiagnostic),
InlineAsm(InlineAsmDiagnostic),
pub enum Diagnostic<'ll> {
Optimization(OptimizationDiagnostic<'ll>),
InlineAsm(InlineAsmDiagnostic<'ll>),
PGO(DiagnosticInfoRef),
/// LLVM has other types that we do not wrap here.
UnknownDiagnostic(DiagnosticInfoRef),
}
impl Diagnostic {
pub unsafe fn unpack(di: DiagnosticInfoRef) -> Diagnostic {
impl Diagnostic<'ll> {
pub unsafe fn unpack(di: DiagnosticInfoRef) -> Self {
use super::DiagnosticKind as Dk;
let kind = super::LLVMRustGetDiagInfoKind(di);

File diff suppressed because it is too large Load diff

View file

@ -42,7 +42,7 @@ impl LLVMRustResult {
}
}
pub fn AddFunctionAttrStringValue(llfn: ValueRef,
pub fn AddFunctionAttrStringValue(llfn: &'a Value,
idx: AttributePlace,
attr: &CStr,
value: &CStr) {
@ -108,12 +108,12 @@ pub unsafe extern "C" fn LLVMRustStringWriteImpl(sr: RustStringRef,
(*sr).borrow_mut().extend_from_slice(slice);
}
pub fn SetInstructionCallConv(instr: ValueRef, cc: CallConv) {
pub fn SetInstructionCallConv(instr: &'a Value, cc: CallConv) {
unsafe {
LLVMSetInstructionCallConv(instr, cc as c_uint);
}
}
pub fn SetFunctionCallConv(fn_: ValueRef, cc: CallConv) {
pub fn SetFunctionCallConv(fn_: &'a Value, cc: CallConv) {
unsafe {
LLVMSetFunctionCallConv(fn_, cc as c_uint);
}
@ -125,49 +125,49 @@ pub fn SetFunctionCallConv(fn_: ValueRef, cc: CallConv) {
// value's name as the comdat value to make sure that it is in a 1-to-1 relationship to the
// function.
// For more details on COMDAT sections see e.g. http://www.airs.com/blog/archives/52
pub fn SetUniqueComdat(llmod: &Module, val: ValueRef) {
pub fn SetUniqueComdat(llmod: &Module, val: &'a Value) {
unsafe {
LLVMRustSetComdat(llmod, val, LLVMGetValueName(val));
}
}
pub fn UnsetComdat(val: ValueRef) {
pub fn UnsetComdat(val: &'a Value) {
unsafe {
LLVMRustUnsetComdat(val);
}
}
pub fn SetUnnamedAddr(global: ValueRef, unnamed: bool) {
pub fn SetUnnamedAddr(global: &'a Value, unnamed: bool) {
unsafe {
LLVMSetUnnamedAddr(global, unnamed as Bool);
}
}
pub fn set_thread_local(global: ValueRef, is_thread_local: bool) {
pub fn set_thread_local(global: &'a Value, is_thread_local: bool) {
unsafe {
LLVMSetThreadLocal(global, is_thread_local as Bool);
}
}
pub fn set_thread_local_mode(global: ValueRef, mode: ThreadLocalMode) {
pub fn set_thread_local_mode(global: &'a Value, mode: ThreadLocalMode) {
unsafe {
LLVMSetThreadLocalMode(global, mode);
}
}
impl Attribute {
pub fn apply_llfn(&self, idx: AttributePlace, llfn: ValueRef) {
pub fn apply_llfn(&self, idx: AttributePlace, llfn: &Value) {
unsafe { LLVMRustAddFunctionAttribute(llfn, idx.as_uint(), *self) }
}
pub fn apply_callsite(&self, idx: AttributePlace, callsite: ValueRef) {
pub fn apply_callsite(&self, idx: AttributePlace, callsite: &Value) {
unsafe { LLVMRustAddCallSiteAttribute(callsite, idx.as_uint(), *self) }
}
pub fn unapply_llfn(&self, idx: AttributePlace, llfn: ValueRef) {
pub fn unapply_llfn(&self, idx: AttributePlace, llfn: &Value) {
unsafe { LLVMRustRemoveFunctionAttributes(llfn, idx.as_uint(), *self) }
}
pub fn toggle_llfn(&self, idx: AttributePlace, llfn: ValueRef, set: bool) {
pub fn toggle_llfn(&self, idx: AttributePlace, llfn: &Value, set: bool) {
if set {
self.apply_llfn(idx, llfn);
} else {
@ -226,7 +226,7 @@ pub fn mk_section_iter(llof: ObjectFileRef) -> SectionIter {
}
/// Safe wrapper around `LLVMGetParam`, because segfaults are no fun.
pub fn get_param(llfn: ValueRef, index: c_uint) -> ValueRef {
pub fn get_param(llfn: &'a Value, index: c_uint) -> &'a Value {
unsafe {
assert!(index < LLVMCountParams(llfn),
"out of bounds argument access: {} out of {} arguments", index, LLVMCountParams(llfn));
@ -265,7 +265,7 @@ pub struct OperandBundleDef {
}
impl OperandBundleDef {
pub fn new(name: &str, vals: &[ValueRef]) -> OperandBundleDef {
pub fn new(name: &str, vals: &[&'a Value]) -> OperandBundleDef {
let name = CString::new(name).unwrap();
let def = unsafe {
LLVMRustBuildOperandBundleDef(name.as_ptr(), vals.as_ptr(), vals.len() as c_uint)

View file

@ -8,7 +8,6 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use llvm::ValueRef;
use abi::{FnType, FnTypeExt};
use callee;
use common::*;
@ -17,6 +16,7 @@ use consts;
use monomorphize;
use type_::Type;
use value::Value;
use rustc::ty::{self, Ty};
use rustc::ty::layout::HasDataLayout;
use debuginfo;
@ -34,10 +34,10 @@ impl<'a, 'tcx> VirtualIndex {
}
pub fn get_fn(self, bx: &Builder<'a, 'll, 'tcx>,
llvtable: ValueRef,
fn_ty: &FnType<'tcx, Ty<'tcx>>) -> ValueRef {
llvtable: &'ll Value,
fn_ty: &FnType<'tcx, Ty<'tcx>>) -> &'ll Value {
// Load the data pointer from the object.
debug!("get_fn({:?}, {:?})", Value(llvtable), self);
debug!("get_fn({:?}, {:?})", llvtable, self);
let llvtable = bx.pointercast(llvtable, fn_ty.llvm_type(bx.cx).ptr_to().ptr_to());
let ptr_align = bx.tcx().data_layout.pointer_align;
@ -48,9 +48,9 @@ impl<'a, 'tcx> VirtualIndex {
ptr
}
pub fn get_usize(self, bx: &Builder<'a, 'll, 'tcx>, llvtable: ValueRef) -> ValueRef {
pub fn get_usize(self, bx: &Builder<'a, 'll, 'tcx>, llvtable: &'ll Value) -> &'ll Value {
// Load the data pointer from the object.
debug!("get_int({:?}, {:?})", Value(llvtable), self);
debug!("get_int({:?}, {:?})", llvtable, self);
let llvtable = bx.pointercast(llvtable, Type::isize(bx.cx).ptr_to());
let usize_align = bx.tcx().data_layout.pointer_align;
@ -69,11 +69,11 @@ impl<'a, 'tcx> VirtualIndex {
/// The `trait_ref` encodes the erased self type. Hence if we are
/// making an object `Foo<Trait>` from a value of type `Foo<T>`, then
/// `trait_ref` would map `T:Trait`.
pub fn get_vtable<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
ty: Ty<'tcx>,
trait_ref: Option<ty::PolyExistentialTraitRef<'tcx>>)
-> ValueRef
{
pub fn get_vtable(
cx: &CodegenCx<'ll, 'tcx>,
ty: Ty<'tcx>,
trait_ref: Option<ty::PolyExistentialTraitRef<'tcx>>,
) -> &'ll Value {
let tcx = cx.tcx;
debug!("get_vtable(ty={:?}, trait_ref={:?})", ty, trait_ref);

View file

@ -34,7 +34,7 @@ pub fn non_ssa_locals(fx: &FunctionCx<'a, 'll, 'tcx>) -> BitVector<mir::Local> {
let layout = fx.cx.layout_of(ty);
if layout.is_llvm_immediate() {
// These sorts of types are immediates that we can store
// in an ValueRef without an alloca.
// in an Value without an alloca.
} else if layout.is_llvm_scalar_pair() {
// We allow pairs and uses of any of their 2 fields.
} else {

View file

@ -8,7 +8,7 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use llvm::{self, ValueRef, BasicBlockRef};
use llvm::{self, BasicBlockRef};
use rustc::middle::lang_items;
use rustc::ty::{self, Ty, TypeFoldable};
use rustc::ty::layout::{self, LayoutOf};
@ -24,6 +24,7 @@ use meth;
use monomorphize;
use type_of::LayoutLlvmExt;
use type_::Type;
use value::Value;
use syntax::symbol::Symbol;
use syntax_pos::Pos;
@ -97,7 +98,7 @@ impl FunctionCx<'a, 'll, 'tcx> {
}
};
let funclet_br = |this: &mut Self, bx: Builder, target: mir::BasicBlock| {
let funclet_br = |this: &mut Self, bx: Builder<'_, 'll, '_>, target: mir::BasicBlock| {
let (lltarget, is_cleanupret) = lltarget(this, target);
if is_cleanupret {
// micro-optimization: generate a `ret` rather than a jump
@ -112,9 +113,9 @@ impl FunctionCx<'a, 'll, 'tcx> {
this: &mut Self,
bx: Builder<'a, 'll, 'tcx>,
fn_ty: FnType<'tcx, Ty<'tcx>>,
fn_ptr: ValueRef,
llargs: &[ValueRef],
destination: Option<(ReturnDest<'tcx>, mir::BasicBlock)>,
fn_ptr: &'ll Value,
llargs: &[&'ll Value],
destination: Option<(ReturnDest<'ll, 'tcx>, mir::BasicBlock)>,
cleanup: Option<mir::BasicBlock>
| {
if let Some(cleanup) = cleanup {
@ -285,8 +286,14 @@ impl FunctionCx<'a, 'll, 'tcx> {
}
let place = self.codegen_place(&bx, location);
let mut args: &[_] = &[place.llval, place.llextra];
args = &args[..1 + place.has_extra() as usize];
let (args1, args2);
let mut args = if let Some(llextra) = place.llextra {
args2 = [place.llval, llextra];
&args2[..]
} else {
args1 = [place.llval];
&args1[..]
};
let (drop_fn, fn_ty) = match ty.sty {
ty::TyDynamic(..) => {
let fn_ty = drop_fn.ty(bx.cx.tcx);
@ -296,8 +303,9 @@ impl FunctionCx<'a, 'll, 'tcx> {
&sig,
);
let fn_ty = FnType::new_vtable(bx.cx, sig, &[]);
let vtable = args[1];
args = &args[..1];
(meth::DESTRUCTOR.get_fn(&bx, place.llextra, &fn_ty), fn_ty)
(meth::DESTRUCTOR.get_fn(&bx, vtable, &fn_ty), fn_ty)
}
_ => {
(callee::get_fn(bx.cx, drop_fn),
@ -628,8 +636,8 @@ impl FunctionCx<'a, 'll, 'tcx> {
fn codegen_argument(&mut self,
bx: &Builder<'a, 'll, 'tcx>,
op: OperandRef<'tcx>,
llargs: &mut Vec<ValueRef>,
op: OperandRef<'ll, 'tcx>,
llargs: &mut Vec<&'ll Value>,
arg: &ArgType<'tcx, Ty<'tcx>>) {
// Fill padding with undef value, where applicable.
if let Some(ty) = arg.pad {
@ -708,7 +716,7 @@ impl FunctionCx<'a, 'll, 'tcx> {
fn codegen_arguments_untupled(&mut self,
bx: &Builder<'a, 'll, 'tcx>,
operand: &mir::Operand<'tcx>,
llargs: &mut Vec<ValueRef>,
llargs: &mut Vec<&'ll Value>,
args: &[ArgType<'tcx, Ty<'tcx>>]) {
let tuple = self.codegen_operand(bx, operand);
@ -728,7 +736,7 @@ impl FunctionCx<'a, 'll, 'tcx> {
}
}
fn get_personality_slot(&mut self, bx: &Builder<'a, 'll, 'tcx>) -> PlaceRef<'tcx> {
fn get_personality_slot(&mut self, bx: &Builder<'a, 'll, 'tcx>) -> PlaceRef<'ll, 'tcx> {
let cx = bx.cx;
if let Some(slot) = self.personality_slot {
slot
@ -803,8 +811,8 @@ impl FunctionCx<'a, 'll, 'tcx> {
fn make_return_dest(&mut self, bx: &Builder<'a, 'll, 'tcx>,
dest: &mir::Place<'tcx>, fn_ret: &ArgType<'tcx, Ty<'tcx>>,
llargs: &mut Vec<ValueRef>, is_intrinsic: bool)
-> ReturnDest<'tcx> {
llargs: &mut Vec<&'ll Value>, is_intrinsic: bool)
-> ReturnDest<'ll, 'tcx> {
// If the return is ignored, we can just return a do-nothing ReturnDest
if fn_ret.is_ignore() {
return ReturnDest::Nothing;
@ -886,7 +894,7 @@ impl FunctionCx<'a, 'll, 'tcx> {
fn codegen_transmute_into(&mut self, bx: &Builder<'a, 'll, 'tcx>,
src: &mir::Operand<'tcx>,
dst: PlaceRef<'tcx>) {
dst: PlaceRef<'ll, 'tcx>) {
let src = self.codegen_operand(bx, src);
let llty = src.layout.llvm_type(bx.cx);
let cast_ptr = bx.pointercast(dst.llval, llty.ptr_to());
@ -898,9 +906,9 @@ impl FunctionCx<'a, 'll, 'tcx> {
// Stores the return value of a function call into it's final location.
fn store_return(&mut self,
bx: &Builder<'a, 'll, 'tcx>,
dest: ReturnDest<'tcx>,
dest: ReturnDest<'ll, 'tcx>,
ret_ty: &ArgType<'tcx, Ty<'tcx>>,
llval: ValueRef) {
llval: &'ll Value) {
use self::ReturnDest::*;
match dest {
@ -929,13 +937,13 @@ impl FunctionCx<'a, 'll, 'tcx> {
}
}
enum ReturnDest<'tcx> {
enum ReturnDest<'ll, 'tcx> {
// Do nothing, the return value is indirect or ignored
Nothing,
// Store the return value to the pointer
Store(PlaceRef<'tcx>),
Store(PlaceRef<'ll, 'tcx>),
// Stores an indirect return value to an operand local place
IndirectOperand(PlaceRef<'tcx>, mir::Local),
IndirectOperand(PlaceRef<'ll, 'tcx>, mir::Local),
// Stores a direct return value to an operand local place
DirectOperand(mir::Local)
}

View file

@ -8,7 +8,7 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use llvm::{self, ValueRef};
use llvm;
use rustc::mir::interpret::ConstEvalErr;
use rustc_mir::interpret::{read_target_uint, const_val_field};
use rustc::hir::def_id::DefId;
@ -26,14 +26,17 @@ use type_of::LayoutLlvmExt;
use type_::Type;
use syntax::ast::Mutability;
use syntax::codemap::Span;
use value::Value;
use super::super::callee;
use super::FunctionCx;
pub fn scalar_to_llvm(cx: &CodegenCx,
cv: Scalar,
layout: &layout::Scalar,
llty: &Type) -> ValueRef {
pub fn scalar_to_llvm(
cx: &CodegenCx<'ll, '_>,
cv: Scalar,
layout: &layout::Scalar,
llty: &'ll Type,
) -> &'ll Value {
let bitsize = if layout.is_bool() { 1 } else { layout.value.size(cx).bits() };
match cv {
Scalar::Bits { defined, .. } if (defined as u64) < bitsize || defined == 0 => {
@ -81,7 +84,7 @@ pub fn scalar_to_llvm(cx: &CodegenCx,
}
}
pub fn const_alloc_to_llvm(cx: &CodegenCx, alloc: &Allocation) -> ValueRef {
pub fn const_alloc_to_llvm(cx: &CodegenCx<'ll, '_>, alloc: &Allocation) -> &'ll Value {
let mut llvals = Vec::with_capacity(alloc.relocations.len() + 1);
let layout = cx.data_layout();
let pointer_size = layout.pointer_size.bytes() as usize;
@ -116,10 +119,10 @@ pub fn const_alloc_to_llvm(cx: &CodegenCx, alloc: &Allocation) -> ValueRef {
C_struct(cx, &llvals, true)
}
pub fn codegen_static_initializer<'a, 'tcx>(
cx: &CodegenCx<'a, 'tcx>,
pub fn codegen_static_initializer(
cx: &CodegenCx<'ll, 'tcx>,
def_id: DefId,
) -> Result<(ValueRef, &'tcx Allocation), Lrc<ConstEvalErr<'tcx>>> {
) -> Result<(&'ll Value, &'tcx Allocation), Lrc<ConstEvalErr<'tcx>>> {
let instance = ty::Instance::mono(cx.tcx, def_id);
let cid = GlobalId {
instance,
@ -172,7 +175,7 @@ impl FunctionCx<'a, 'll, 'tcx> {
span: Span,
ty: Ty<'tcx>,
constant: Result<&'tcx ty::Const<'tcx>, Lrc<ConstEvalErr<'tcx>>>,
) -> (ValueRef, Ty<'tcx>) {
) -> (&'ll Value, Ty<'tcx>) {
constant
.and_then(|c| {
let field_ty = c.ty.builtin_index().unwrap();
@ -180,7 +183,7 @@ impl FunctionCx<'a, 'll, 'tcx> {
ty::TyArray(_, n) => n.unwrap_usize(bx.tcx()),
ref other => bug!("invalid simd shuffle type: {}", other),
};
let values: Result<Vec<ValueRef>, Lrc<_>> = (0..fields).map(|field| {
let values: Result<Vec<_>, Lrc<_>> = (0..fields).map(|field| {
let field = const_val_field(
bx.tcx(),
ty::ParamEnv::reveal_all(),

View file

@ -10,7 +10,7 @@
use common::{C_i32, C_null};
use libc::c_uint;
use llvm::{self, ValueRef, BasicBlockRef};
use llvm::{self, BasicBlockRef};
use llvm::debuginfo::DIScope;
use rustc::ty::{self, Ty, TypeFoldable, UpvarSubsts};
use rustc::ty::layout::{LayoutOf, TyLayout};
@ -24,6 +24,7 @@ use debuginfo::{self, declare_local, VariableAccess, VariableKind, FunctionDebug
use monomorphize::Instance;
use abi::{ArgTypeExt, FnType, FnTypeExt, PassMode};
use type_::Type;
use value::Value;
use syntax_pos::{DUMMY_SP, NO_EXPANSION, BytePos, Span};
use syntax::symbol::keywords;
@ -49,7 +50,7 @@ pub struct FunctionCx<'a, 'll: 'a, 'tcx: 'll> {
debug_context: FunctionDebugContext<'ll>,
llfn: ValueRef,
llfn: &'ll Value,
cx: &'a CodegenCx<'ll, 'tcx>,
@ -62,7 +63,7 @@ pub struct FunctionCx<'a, 'll: 'a, 'tcx: 'll> {
/// don't really care about it very much. Anyway, this value
/// contains an alloca into which the personality is stored and
/// then later loaded when generating the DIVERGE_BLOCK.
personality_slot: Option<PlaceRef<'tcx>>,
personality_slot: Option<PlaceRef<'ll, 'tcx>>,
/// A `Block` for each MIR `BasicBlock`
blocks: IndexVec<mir::BasicBlock, BasicBlockRef>,
@ -72,7 +73,7 @@ pub struct FunctionCx<'a, 'll: 'a, 'tcx: 'll> {
/// When targeting MSVC, this stores the cleanup info for each funclet
/// BB. This is initialized as we compute the funclets' head block in RPO.
funclets: &'a IndexVec<mir::BasicBlock, Option<Funclet>>,
funclets: &'a IndexVec<mir::BasicBlock, Option<Funclet<'ll>>>,
/// This stores the landing-pad block for a given BB, computed lazily on GNU
/// and eagerly on MSVC.
@ -96,7 +97,7 @@ pub struct FunctionCx<'a, 'll: 'a, 'tcx: 'll> {
///
/// Avoiding allocs can also be important for certain intrinsics,
/// notably `expect`.
locals: IndexVec<mir::Local, LocalRef<'tcx>>,
locals: IndexVec<mir::Local, LocalRef<'ll, 'tcx>>,
/// Debug information for MIR scopes.
scopes: IndexVec<mir::SourceScope, debuginfo::MirDebugScope<'ll>>,
@ -177,13 +178,13 @@ impl FunctionCx<'a, 'll, 'tcx> {
}
}
enum LocalRef<'tcx> {
Place(PlaceRef<'tcx>),
Operand(Option<OperandRef<'tcx>>),
enum LocalRef<'ll, 'tcx> {
Place(PlaceRef<'ll, 'tcx>),
Operand(Option<OperandRef<'ll, 'tcx>>),
}
impl<'a, 'tcx> LocalRef<'tcx> {
fn new_operand(cx: &CodegenCx<'a, 'tcx>, layout: TyLayout<'tcx>) -> LocalRef<'tcx> {
impl LocalRef<'ll, 'tcx> {
fn new_operand(cx: &CodegenCx<'ll, 'tcx>, layout: TyLayout<'tcx>) -> LocalRef<'ll, 'tcx> {
if layout.is_zst() {
// Zero-size temporaries aren't always initialized, which
// doesn't matter because they don't contain data, but
@ -199,7 +200,7 @@ impl<'a, 'tcx> LocalRef<'tcx> {
pub fn codegen_mir(
cx: &'a CodegenCx<'ll, 'tcx>,
llfn: ValueRef,
llfn: &'ll Value,
mir: &'a Mir<'tcx>,
instance: Instance<'tcx>,
sig: ty::FnSig<'tcx>,
@ -349,7 +350,7 @@ fn create_funclets(
cleanup_kinds: &IndexVec<mir::BasicBlock, CleanupKind>,
block_bxs: &IndexVec<mir::BasicBlock, BasicBlockRef>)
-> (IndexVec<mir::BasicBlock, Option<BasicBlockRef>>,
IndexVec<mir::BasicBlock, Option<Funclet>>)
IndexVec<mir::BasicBlock, Option<Funclet<'ll>>>)
{
block_bxs.iter_enumerated().zip(cleanup_kinds).map(|((bb, &llbb), cleanup_kind)| {
match *cleanup_kind {
@ -409,7 +410,7 @@ fn create_funclets(
}).unzip()
}
/// Produce, for each argument, a `ValueRef` pointing at the
/// Produce, for each argument, a `Value` pointing at the
/// argument's value. As arguments are places, these are always
/// indirect.
fn arg_local_refs(
@ -417,7 +418,7 @@ fn arg_local_refs(
fx: &FunctionCx<'a, 'll, 'tcx>,
scopes: &IndexVec<mir::SourceScope, debuginfo::MirDebugScope<'ll>>,
memory_locals: &BitVector<mir::Local>,
) -> Vec<LocalRef<'tcx>> {
) -> Vec<LocalRef<'ll, 'tcx>> {
let mir = fx.mir;
let tcx = bx.tcx();
let mut idx = 0;

View file

@ -8,7 +8,6 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use llvm::ValueRef;
use rustc::mir::interpret::ConstEvalErr;
use rustc::mir;
use rustc::mir::interpret::ConstValue;
@ -32,31 +31,15 @@ use super::place::PlaceRef;
/// The representation of a Rust value. The enum variant is in fact
/// uniquely determined by the value's type, but is kept as a
/// safety check.
#[derive(Copy, Clone)]
pub enum OperandValue {
#[derive(Copy, Clone, Debug)]
pub enum OperandValue<'ll> {
/// A reference to the actual operand. The data is guaranteed
/// to be valid for the operand's lifetime.
Ref(ValueRef, Align),
Ref(&'ll Value, Align),
/// A single LLVM value.
Immediate(ValueRef),
Immediate(&'ll Value),
/// A pair of immediate LLVM values. Used by fat pointers too.
Pair(ValueRef, ValueRef)
}
impl fmt::Debug for OperandValue {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
OperandValue::Ref(r, align) => {
write!(f, "Ref({:?}, {:?})", Value(r), align)
}
OperandValue::Immediate(i) => {
write!(f, "Immediate({:?})", Value(i))
}
OperandValue::Pair(a, b) => {
write!(f, "Pair({:?}, {:?})", Value(a), Value(b))
}
}
}
Pair(&'ll Value, &'ll Value)
}
/// An `OperandRef` is an "SSA" reference to a Rust value, along with
@ -68,23 +51,23 @@ impl fmt::Debug for OperandValue {
/// directly is sure to cause problems -- use `OperandRef::store`
/// instead.
#[derive(Copy, Clone)]
pub struct OperandRef<'tcx> {
pub struct OperandRef<'ll, 'tcx> {
// The value.
pub val: OperandValue,
pub val: OperandValue<'ll>,
// The layout of value, based on its Rust type.
pub layout: TyLayout<'tcx>,
}
impl<'tcx> fmt::Debug for OperandRef<'tcx> {
impl fmt::Debug for OperandRef<'ll, 'tcx> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "OperandRef({:?} @ {:?})", self.val, self.layout)
}
}
impl<'a, 'tcx> OperandRef<'tcx> {
pub fn new_zst(cx: &CodegenCx<'a, 'tcx>,
layout: TyLayout<'tcx>) -> OperandRef<'tcx> {
impl OperandRef<'ll, 'tcx> {
pub fn new_zst(cx: &CodegenCx<'ll, 'tcx>,
layout: TyLayout<'tcx>) -> OperandRef<'ll, 'tcx> {
assert!(layout.is_zst());
OperandRef {
val: OperandValue::Immediate(C_undef(layout.immediate_llvm_type(cx))),
@ -94,7 +77,7 @@ impl<'a, 'tcx> OperandRef<'tcx> {
pub fn from_const(bx: &Builder<'a, 'll, 'tcx>,
val: &'tcx ty::Const<'tcx>)
-> Result<OperandRef<'tcx>, Lrc<ConstEvalErr<'tcx>>> {
-> Result<OperandRef<'ll, 'tcx>, Lrc<ConstEvalErr<'tcx>>> {
let layout = bx.cx.layout_of(val.ty);
if layout.is_zst() {
@ -148,19 +131,19 @@ impl<'a, 'tcx> OperandRef<'tcx> {
/// Asserts that this operand refers to a scalar and returns
/// a reference to its value.
pub fn immediate(self) -> ValueRef {
pub fn immediate(self) -> &'ll Value {
match self.val {
OperandValue::Immediate(s) => s,
_ => bug!("not immediate: {:?}", self)
}
}
pub fn deref(self, cx: &CodegenCx<'a, 'tcx>) -> PlaceRef<'tcx> {
pub fn deref(self, cx: &CodegenCx<'ll, 'tcx>) -> PlaceRef<'ll, 'tcx> {
let projected_ty = self.layout.ty.builtin_deref(true)
.unwrap_or_else(|| bug!("deref of non-pointer {:?}", self)).ty;
let (llptr, llextra) = match self.val {
OperandValue::Immediate(llptr) => (llptr, 0 as *mut _),
OperandValue::Pair(llptr, llextra) => (llptr, llextra),
OperandValue::Immediate(llptr) => (llptr, None),
OperandValue::Pair(llptr, llextra) => (llptr, Some(llextra)),
OperandValue::Ref(..) => bug!("Deref of by-Ref operand {:?}", self)
};
let layout = cx.layout_of(projected_ty);
@ -174,7 +157,7 @@ impl<'a, 'tcx> OperandRef<'tcx> {
/// If this operand is a `Pair`, we return an aggregate with the two values.
/// For other cases, see `immediate`.
pub fn immediate_or_packed_pair(self, bx: &Builder<'a, 'll, 'tcx>) -> ValueRef {
pub fn immediate_or_packed_pair(self, bx: &Builder<'a, 'll, 'tcx>) -> &'ll Value {
if let OperandValue::Pair(a, b) = self.val {
let llty = self.layout.llvm_type(bx.cx);
debug!("Operand::immediate_or_packed_pair: packing {:?} into {:?}",
@ -191,9 +174,9 @@ impl<'a, 'tcx> OperandRef<'tcx> {
/// If the type is a pair, we return a `Pair`, otherwise, an `Immediate`.
pub fn from_immediate_or_packed_pair(bx: &Builder<'a, 'll, 'tcx>,
llval: ValueRef,
llval: &'ll Value,
layout: TyLayout<'tcx>)
-> OperandRef<'tcx> {
-> OperandRef<'ll, 'tcx> {
let val = if let layout::Abi::ScalarPair(ref a, ref b) = layout.abi {
debug!("Operand::from_immediate_or_packed_pair: unpacking {:?} @ {:?}",
llval, layout);
@ -208,7 +191,7 @@ impl<'a, 'tcx> OperandRef<'tcx> {
OperandRef { val, layout }
}
pub fn extract_field(&self, bx: &Builder<'a, 'll, 'tcx>, i: usize) -> OperandRef<'tcx> {
pub fn extract_field(&self, bx: &Builder<'a, 'll, 'tcx>, i: usize) -> OperandRef<'ll, 'tcx> {
let field = self.layout.field(bx.cx, i);
let offset = self.layout.fields.offset(i);
@ -266,24 +249,24 @@ impl<'a, 'tcx> OperandRef<'tcx> {
}
}
impl<'a, 'tcx> OperandValue {
pub fn store(self, bx: &Builder<'a, 'll, 'tcx>, dest: PlaceRef<'tcx>) {
impl OperandValue<'ll> {
pub fn store(self, bx: &Builder<'a, 'll, 'tcx>, dest: PlaceRef<'ll, 'tcx>) {
self.store_with_flags(bx, dest, MemFlags::empty());
}
pub fn volatile_store(self, bx: &Builder<'a, 'll, 'tcx>, dest: PlaceRef<'tcx>) {
pub fn volatile_store(self, bx: &Builder<'a, 'll, 'tcx>, dest: PlaceRef<'ll, 'tcx>) {
self.store_with_flags(bx, dest, MemFlags::VOLATILE);
}
pub fn unaligned_volatile_store(self, bx: &Builder<'a, 'll, 'tcx>, dest: PlaceRef<'tcx>) {
pub fn unaligned_volatile_store(self, bx: &Builder<'a, 'll, 'tcx>, dest: PlaceRef<'ll, 'tcx>) {
self.store_with_flags(bx, dest, MemFlags::VOLATILE | MemFlags::UNALIGNED);
}
pub fn nontemporal_store(self, bx: &Builder<'a, 'll, 'tcx>, dest: PlaceRef<'tcx>) {
pub fn nontemporal_store(self, bx: &Builder<'a, 'll, 'tcx>, dest: PlaceRef<'ll, 'tcx>) {
self.store_with_flags(bx, dest, MemFlags::NONTEMPORAL);
}
fn store_with_flags(self, bx: &Builder<'a, 'll, 'tcx>, dest: PlaceRef<'tcx>, flags: MemFlags) {
fn store_with_flags(self, bx: &Builder<'a, 'll, 'tcx>, dest: PlaceRef<'ll, 'tcx>, flags: MemFlags) {
debug!("OperandRef::store: operand={:?}, dest={:?}", self, dest);
// Avoid generating stores of zero-sized values, because the only way to have a zero-sized
// value is through `undef`, and store itself is useless.
@ -314,7 +297,7 @@ impl FunctionCx<'a, 'll, 'tcx> {
fn maybe_codegen_consume_direct(&mut self,
bx: &Builder<'a, 'll, 'tcx>,
place: &mir::Place<'tcx>)
-> Option<OperandRef<'tcx>>
-> Option<OperandRef<'ll, 'tcx>>
{
debug!("maybe_codegen_consume_direct(place={:?})", place);
@ -362,7 +345,7 @@ impl FunctionCx<'a, 'll, 'tcx> {
pub fn codegen_consume(&mut self,
bx: &Builder<'a, 'll, 'tcx>,
place: &mir::Place<'tcx>)
-> OperandRef<'tcx>
-> OperandRef<'ll, 'tcx>
{
debug!("codegen_consume(place={:?})", place);
@ -386,7 +369,7 @@ impl FunctionCx<'a, 'll, 'tcx> {
pub fn codegen_operand(&mut self,
bx: &Builder<'a, 'll, 'tcx>,
operand: &mir::Operand<'tcx>)
-> OperandRef<'tcx>
-> OperandRef<'ll, 'tcx>
{
debug!("codegen_operand(operand={:?})", operand);

View file

@ -8,7 +8,7 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use llvm::{self, ValueRef, LLVMConstInBoundsGEP};
use llvm::{self, LLVMConstInBoundsGEP};
use rustc::ty::{self, Ty};
use rustc::ty::layout::{self, Align, TyLayout, LayoutOf, Size};
use rustc::mir;
@ -28,12 +28,12 @@ use super::{FunctionCx, LocalRef};
use super::operand::{OperandRef, OperandValue};
#[derive(Copy, Clone, Debug)]
pub struct PlaceRef<'tcx> {
pub struct PlaceRef<'ll, 'tcx> {
/// Pointer to the contents of the place
pub llval: ValueRef,
pub llval: &'ll Value,
/// This place's extra data if it is unsized, or null
pub llextra: ValueRef,
pub llextra: Option<&'ll Value>,
/// Monomorphized type of this place, including variant information
pub layout: TyLayout<'tcx>,
@ -42,14 +42,15 @@ pub struct PlaceRef<'tcx> {
pub align: Align,
}
impl<'a, 'tcx> PlaceRef<'tcx> {
pub fn new_sized(llval: ValueRef,
layout: TyLayout<'tcx>,
align: Align)
-> PlaceRef<'tcx> {
impl PlaceRef<'ll, 'tcx> {
pub fn new_sized(
llval: &'ll Value,
layout: TyLayout<'tcx>,
align: Align,
) -> PlaceRef<'ll, 'tcx> {
PlaceRef {
llval,
llextra: 0 as *mut _,
llextra: None,
layout,
align
}
@ -60,7 +61,7 @@ impl<'a, 'tcx> PlaceRef<'tcx> {
layout: TyLayout<'tcx>,
alloc: &mir::interpret::Allocation,
offset: Size,
) -> PlaceRef<'tcx> {
) -> PlaceRef<'ll, 'tcx> {
let init = const_alloc_to_llvm(bx.cx, alloc);
let base_addr = consts::addr_of(bx.cx, init, layout.align, "byte_str");
@ -74,18 +75,17 @@ impl<'a, 'tcx> PlaceRef<'tcx> {
}
pub fn alloca(bx: &Builder<'a, 'll, 'tcx>, layout: TyLayout<'tcx>, name: &str)
-> PlaceRef<'tcx> {
-> PlaceRef<'ll, 'tcx> {
debug!("alloca({:?}: {:?})", name, layout);
let tmp = bx.alloca(layout.llvm_type(bx.cx), name, layout.align);
Self::new_sized(tmp, layout, layout.align)
}
pub fn len(&self, cx: &CodegenCx<'a, 'tcx>) -> ValueRef {
pub fn len(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Value {
if let layout::FieldPlacement::Array { count, .. } = self.layout.fields {
if self.layout.is_unsized() {
assert!(self.has_extra());
assert_eq!(count, 0);
self.llextra
self.llextra.unwrap()
} else {
C_usize(cx, count)
}
@ -94,14 +94,10 @@ impl<'a, 'tcx> PlaceRef<'tcx> {
}
}
pub fn has_extra(&self) -> bool {
!self.llextra.is_null()
}
pub fn load(&self, bx: &Builder<'a, 'll, 'tcx>) -> OperandRef<'tcx> {
pub fn load(&self, bx: &Builder<'a, 'll, 'tcx>) -> OperandRef<'ll, 'tcx> {
debug!("PlaceRef::load: {:?}", self);
assert!(!self.has_extra());
assert_eq!(self.llextra, None);
if self.layout.is_zst() {
return OperandRef::new_zst(bx.cx, self.layout);
@ -124,23 +120,21 @@ impl<'a, 'tcx> PlaceRef<'tcx> {
};
let val = if self.layout.is_llvm_immediate() {
let mut const_llval = 0 as *mut _;
let mut const_llval = None;
unsafe {
let global = llvm::LLVMIsAGlobalVariable(self.llval);
if !global.is_null() && llvm::LLVMIsGlobalConstant(global) == llvm::True {
const_llval = llvm::LLVMGetInitializer(global);
if let Some(global) = llvm::LLVMIsAGlobalVariable(self.llval) {
if llvm::LLVMIsGlobalConstant(global) == llvm::True {
const_llval = llvm::LLVMGetInitializer(global);
}
}
}
let llval = if !const_llval.is_null() {
const_llval
} else {
let llval = const_llval.unwrap_or_else(|| {
let load = bx.load(self.llval, self.align);
if let layout::Abi::Scalar(ref scalar) = self.layout.abi {
scalar_load_metadata(load, scalar);
}
load
};
});
OperandValue::Immediate(base::to_immediate(bx, llval, self.layout))
} else if let layout::Abi::ScalarPair(ref a, ref b) = self.layout.abi {
let load = |i, scalar: &layout::Scalar| {
@ -162,7 +156,7 @@ impl<'a, 'tcx> PlaceRef<'tcx> {
}
/// Access a field, at a point when the value's case is known.
pub fn project_field(self, bx: &Builder<'a, 'll, 'tcx>, ix: usize) -> PlaceRef<'tcx> {
pub fn project_field(self, bx: &Builder<'a, 'll, 'tcx>, ix: usize) -> PlaceRef<'ll, 'tcx> {
let cx = bx.cx;
let field = self.layout.field(cx, ix);
let offset = self.layout.fields.offset(ix);
@ -185,7 +179,7 @@ impl<'a, 'tcx> PlaceRef<'tcx> {
llextra: if cx.type_has_metadata(field.ty) {
self.llextra
} else {
0 as *mut _
None
},
layout: field,
align,
@ -197,9 +191,9 @@ impl<'a, 'tcx> PlaceRef<'tcx> {
// * known alignment - sized types, [T], str or a foreign type
// * packed struct - there is no alignment padding
match field.ty.sty {
_ if !self.has_extra() => {
_ if self.llextra.is_none() => {
debug!("Unsized field `{}`, of `{:?}` has no metadata for adjustment",
ix, Value(self.llval));
ix, self.llval);
return simple();
}
_ if !field.is_unsized() => return simple(),
@ -247,7 +241,7 @@ impl<'a, 'tcx> PlaceRef<'tcx> {
let offset = bx.and(bx.add(unaligned_offset, align_sub_1),
bx.neg(unsized_align));
debug!("struct_field_ptr: DST field offset: {:?}", Value(offset));
debug!("struct_field_ptr: DST field offset: {:?}", offset);
// Cast and adjust pointer
let byte_ptr = bx.pointercast(self.llval, Type::i8p(cx));
@ -266,7 +260,7 @@ impl<'a, 'tcx> PlaceRef<'tcx> {
}
/// Obtain the actual discriminant of a value.
pub fn codegen_get_discr(self, bx: &Builder<'a, 'll, 'tcx>, cast_to: Ty<'tcx>) -> ValueRef {
pub fn codegen_get_discr(self, bx: &Builder<'a, 'll, 'tcx>, cast_to: Ty<'tcx>) -> &'ll Value {
let cast_to = bx.cx.layout_of(cast_to).immediate_llvm_type(bx.cx);
if self.layout.abi == layout::Abi::Uninhabited {
return C_undef(cast_to);
@ -384,18 +378,18 @@ impl<'a, 'tcx> PlaceRef<'tcx> {
}
}
pub fn project_index(&self, bx: &Builder<'a, 'll, 'tcx>, llindex: ValueRef)
-> PlaceRef<'tcx> {
pub fn project_index(&self, bx: &Builder<'a, 'll, 'tcx>, llindex: &'ll Value)
-> PlaceRef<'ll, 'tcx> {
PlaceRef {
llval: bx.inbounds_gep(self.llval, &[C_usize(bx.cx, 0), llindex]),
llextra: 0 as *mut _,
llextra: None,
layout: self.layout.field(bx.cx, 0),
align: self.align
}
}
pub fn project_downcast(&self, bx: &Builder<'a, 'll, 'tcx>, variant_index: usize)
-> PlaceRef<'tcx> {
-> PlaceRef<'ll, 'tcx> {
let mut downcast = *self;
downcast.layout = self.layout.for_variant(bx.cx, variant_index);
@ -419,7 +413,7 @@ impl FunctionCx<'a, 'll, 'tcx> {
pub fn codegen_place(&mut self,
bx: &Builder<'a, 'll, 'tcx>,
place: &mir::Place<'tcx>)
-> PlaceRef<'tcx> {
-> PlaceRef<'ll, 'tcx> {
debug!("codegen_place(place={:?})", place);
let cx = bx.cx;
@ -511,9 +505,8 @@ impl FunctionCx<'a, 'll, 'tcx> {
subslice.layout = bx.cx.layout_of(self.monomorphize(&projected_ty));
if subslice.layout.is_unsized() {
assert!(cg_base.has_extra());
subslice.llextra = bx.sub(cg_base.llextra,
C_usize(bx.cx, (from as u64) + (to as u64)));
subslice.llextra = Some(bx.sub(cg_base.llextra.unwrap(),
C_usize(bx.cx, (from as u64) + (to as u64))));
}
// Cast the place pointer type to the new

View file

@ -8,7 +8,7 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use llvm::{self, ValueRef};
use llvm;
use rustc::ty::{self, Ty};
use rustc::ty::cast::{CastTy, IntTy};
use rustc::ty::layout::{self, LayoutOf};
@ -35,12 +35,12 @@ use super::place::PlaceRef;
impl FunctionCx<'a, 'll, 'tcx> {
pub fn codegen_rvalue(&mut self,
bx: Builder<'a, 'll, 'tcx>,
dest: PlaceRef<'tcx>,
dest: PlaceRef<'ll, 'tcx>,
rvalue: &mir::Rvalue<'tcx>)
-> Builder<'a, 'll, 'tcx>
{
debug!("codegen_rvalue(dest.llval={:?}, rvalue={:?})",
Value(dest.llval), rvalue);
dest.llval, rvalue);
match *rvalue {
mir::Rvalue::Use(ref operand) => {
@ -178,7 +178,7 @@ impl FunctionCx<'a, 'll, 'tcx> {
pub fn codegen_rvalue_operand(&mut self,
bx: Builder<'a, 'll, 'tcx>,
rvalue: &mir::Rvalue<'tcx>)
-> (Builder<'a, 'll, 'tcx>, OperandRef<'tcx>)
-> (Builder<'a, 'll, 'tcx>, OperandRef<'ll, 'tcx>)
{
assert!(self.rvalue_creates_operand(rvalue), "cannot codegen {:?} to operand", rvalue);
@ -371,7 +371,7 @@ impl FunctionCx<'a, 'll, 'tcx> {
let val = if !bx.cx.type_has_metadata(ty) {
OperandValue::Immediate(cg_place.llval)
} else {
OperandValue::Pair(cg_place.llval, cg_place.llextra)
OperandValue::Pair(cg_place.llval, cg_place.llextra.unwrap())
};
(bx, OperandRef {
val,
@ -511,10 +511,11 @@ impl FunctionCx<'a, 'll, 'tcx> {
}
}
fn evaluate_array_len(&mut self,
bx: &Builder<'a, 'll, 'tcx>,
place: &mir::Place<'tcx>) -> ValueRef
{
fn evaluate_array_len(
&mut self,
bx: &Builder<'a, 'll, 'tcx>,
place: &mir::Place<'tcx>,
) -> &'ll Value {
// ZST are passed as operands and require special handling
// because codegen_place() panics if Local is operand.
if let mir::Place::Local(index) = *place {
@ -530,12 +531,14 @@ impl FunctionCx<'a, 'll, 'tcx> {
return cg_value.len(bx.cx);
}
pub fn codegen_scalar_binop(&mut self,
bx: &Builder<'a, 'll, 'tcx>,
op: mir::BinOp,
lhs: ValueRef,
rhs: ValueRef,
input_ty: Ty<'tcx>) -> ValueRef {
pub fn codegen_scalar_binop(
&mut self,
bx: &Builder<'a, 'll, 'tcx>,
op: mir::BinOp,
lhs: &'ll Value,
rhs: &'ll Value,
input_ty: Ty<'tcx>,
) -> &'ll Value {
let is_float = input_ty.is_fp();
let is_signed = input_ty.is_signed();
let is_nil = input_ty.is_nil();
@ -596,15 +599,16 @@ impl FunctionCx<'a, 'll, 'tcx> {
}
}
pub fn codegen_fat_ptr_binop(&mut self,
bx: &Builder<'a, 'll, 'tcx>,
op: mir::BinOp,
lhs_addr: ValueRef,
lhs_extra: ValueRef,
rhs_addr: ValueRef,
rhs_extra: ValueRef,
_input_ty: Ty<'tcx>)
-> ValueRef {
pub fn codegen_fat_ptr_binop(
&mut self,
bx: &Builder<'a, 'll, 'tcx>,
op: mir::BinOp,
lhs_addr: &'ll Value,
lhs_extra: &'ll Value,
rhs_addr: &'ll Value,
rhs_extra: &'ll Value,
_input_ty: Ty<'tcx>,
) -> &'ll Value {
match op {
mir::BinOp::Eq => {
bx.and(
@ -646,9 +650,9 @@ impl FunctionCx<'a, 'll, 'tcx> {
pub fn codegen_scalar_checked_binop(&mut self,
bx: &Builder<'a, 'll, 'tcx>,
op: mir::BinOp,
lhs: ValueRef,
rhs: ValueRef,
input_ty: Ty<'tcx>) -> OperandValue {
lhs: &'ll Value,
rhs: &'ll Value,
input_ty: Ty<'tcx>) -> OperandValue<'ll> {
// This case can currently arise only from functions marked
// with #[rustc_inherit_overflow_checks] and inlined from
// another crate (mostly core::num generic/#[inline] fns),
@ -721,7 +725,7 @@ enum OverflowOp {
Add, Sub, Mul
}
fn get_overflow_intrinsic(oop: OverflowOp, bx: &Builder, ty: Ty) -> ValueRef {
fn get_overflow_intrinsic(oop: OverflowOp, bx: &Builder<'_, 'll, '_>, ty: Ty) -> &'ll Value {
use syntax::ast::IntTy::*;
use syntax::ast::UintTy::*;
use rustc::ty::{TyInt, TyUint};
@ -798,9 +802,9 @@ fn get_overflow_intrinsic(oop: OverflowOp, bx: &Builder, ty: Ty) -> ValueRef {
fn cast_int_to_float(bx: &Builder<'_, 'll, '_>,
signed: bool,
x: ValueRef,
x: &'ll Value,
int_ty: &'ll Type,
float_ty: &'ll Type) -> ValueRef {
float_ty: &'ll Type) -> &'ll Value {
// Most integer types, even i128, fit into [-f32::MAX, f32::MAX] after rounding.
// It's only u128 -> f32 that can cause overflows (i.e., should yield infinity).
// LLVM's uitofp produces undef in those cases, so we manually check for that case.
@ -828,9 +832,9 @@ fn cast_int_to_float(bx: &Builder<'_, 'll, '_>,
fn cast_float_to_int(bx: &Builder<'_, 'll, '_>,
signed: bool,
x: ValueRef,
x: &'ll Value,
float_ty: &'ll Type,
int_ty: &'ll Type) -> ValueRef {
int_ty: &'ll Type) -> &'ll Value {
let fptosui_result = if signed {
bx.fptosi(x, int_ty)
} else {

View file

@ -8,17 +8,32 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub use llvm::Value;
use llvm;
use std::fmt;
use std::hash::{Hash, Hasher};
impl PartialEq for Value {
fn eq(&self, other: &Self) -> bool {
self as *const _ == other as *const _
}
}
impl Eq for Value {}
impl Hash for Value {
fn hash<H: Hasher>(&self, hasher: &mut H) {
(self as *const Self).hash(hasher);
}
}
#[derive(Copy, Clone, PartialEq)]
pub struct Value(pub llvm::ValueRef);
impl fmt::Debug for Value {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(&llvm::build_string(|s| unsafe {
llvm::LLVMRustWriteValueToString(self.0, s);
llvm::LLVMRustWriteValueToString(self, s);
}).expect("nun-UTF8 value description from LLVM"))
}
}