1
Fork 0

Add an AtomicU64 type to std::sync::atomics

This also generalizes all atomic intrinsics over T so we'll be able to add u8
atomics if we really feel the need to (do we really want to?)
This commit is contained in:
Alex Crichton 2014-01-15 15:32:44 -08:00
parent cb40eba4b1
commit b00147a99b
6 changed files with 398 additions and 97 deletions

View file

@ -15,7 +15,7 @@ use lib::llvm::{Opcode, IntPredicate, RealPredicate, False};
use lib::llvm::{ValueRef, BasicBlockRef, BuilderRef, ModuleRef};
use middle::trans::base;
use middle::trans::common::*;
use middle::trans::machine::llalign_of_min;
use middle::trans::machine::llalign_of_pref;
use middle::trans::type_::Type;
use std::cast;
use std::hashmap::HashMap;
@ -461,8 +461,10 @@ impl Builder {
pub fn atomic_load(&self, ptr: ValueRef, order: AtomicOrdering) -> ValueRef {
self.count_insn("load.atomic");
unsafe {
let align = llalign_of_min(self.ccx, self.ccx.int_type);
llvm::LLVMBuildAtomicLoad(self.llbuilder, ptr, noname(), order, align as c_uint)
let ty = Type::from_ref(llvm::LLVMTypeOf(ptr));
let align = llalign_of_pref(self.ccx, ty.element_type());
llvm::LLVMBuildAtomicLoad(self.llbuilder, ptr, noname(), order,
align as c_uint)
}
}
@ -514,8 +516,9 @@ impl Builder {
self.ccx.tn.val_to_str(val),
self.ccx.tn.val_to_str(ptr));
self.count_insn("store.atomic");
let align = llalign_of_min(self.ccx, self.ccx.int_type);
unsafe {
let ty = Type::from_ref(llvm::LLVMTypeOf(ptr));
let align = llalign_of_pref(self.ccx, ty.element_type());
llvm::LLVMBuildAtomicStore(self.llbuilder, val, ptr, order, align as c_uint);
}
}

View file

@ -4030,29 +4030,32 @@ pub fn check_intrinsic_type(ccx: @CrateCtxt, it: &ast::ForeignItem) {
//We only care about the operation here
match split[1] {
"cxchg" => (0, ~[ty::mk_mut_rptr(tcx,
"cxchg" => (1, ~[ty::mk_mut_rptr(tcx,
ty::ReLateBound(it.id, ty::BrAnon(0)),
ty::mk_int()),
ty::mk_int(),
ty::mk_int()
], ty::mk_int()),
"load" => (0,
param(ccx, 0)),
param(ccx, 0),
param(ccx, 0),
], param(ccx, 0)),
"load" => (1,
~[
ty::mk_imm_rptr(tcx, ty::ReLateBound(it.id, ty::BrAnon(0)), ty::mk_int())
ty::mk_imm_rptr(tcx, ty::ReLateBound(it.id, ty::BrAnon(0)),
param(ccx, 0))
],
ty::mk_int()),
"store" => (0,
param(ccx, 0)),
"store" => (1,
~[
ty::mk_mut_rptr(tcx, ty::ReLateBound(it.id, ty::BrAnon(0)), ty::mk_int()),
ty::mk_int()
ty::mk_mut_rptr(tcx, ty::ReLateBound(it.id, ty::BrAnon(0)),
param(ccx, 0)),
param(ccx, 0)
],
ty::mk_nil()),
"xchg" | "xadd" | "xsub" | "and" | "nand" | "or" | "xor" | "max" |
"xchg" | "xadd" | "xsub" | "and" | "nand" | "or" | "xor" | "max" |
"min" | "umax" | "umin" => {
(0, ~[ty::mk_mut_rptr(tcx,
(1, ~[ty::mk_mut_rptr(tcx,
ty::ReLateBound(it.id, ty::BrAnon(0)),
ty::mk_int()), ty::mk_int() ], ty::mk_int())
param(ccx, 0)), param(ccx, 0) ],
param(ccx, 0))
}
"fence" => {
(0, ~[], ty::mk_nil())
@ -4085,16 +4088,6 @@ pub fn check_intrinsic_type(ccx: @CrateCtxt, it: &ast::ForeignItem) {
}
"needs_drop" => (1u, ~[], ty::mk_bool()),
"owns_managed" => (1u, ~[], ty::mk_bool()),
"atomic_xchg" | "atomic_xadd" | "atomic_xsub" |
"atomic_xchg_acq" | "atomic_xadd_acq" | "atomic_xsub_acq" |
"atomic_xchg_rel" | "atomic_xadd_rel" | "atomic_xsub_rel" => {
(0,
~[
ty::mk_mut_rptr(tcx, ty::ReLateBound(it.id, ty::BrAnon(0)), ty::mk_int()),
ty::mk_int()
],
ty::mk_int())
}
"get_tydesc" => {
let tydesc_ty = match ty::get_tydesc_ty(ccx.tcx) {

View file

@ -59,9 +59,25 @@ pub struct AtomicUint {
priv nocopy: NonCopyable
}
/**
* An unsigned atomic integer type that is forced to be 64-bits. This does not
* support all operations.
*/
#[cfg(not(stage0))]
pub struct AtomicU64 {
priv v: u64,
priv nocopy: NonCopyable
}
/**
* An unsafe atomic pointer. Only supports basic atomic operations
*/
#[cfg(not(stage0))]
pub struct AtomicPtr<T> {
priv p: uint,
priv nocopy: NonCopyable
}
#[cfg(stage0)]
pub struct AtomicPtr<T> {
priv p: *mut T,
priv nocopy: NonCopyable
@ -71,6 +87,12 @@ pub struct AtomicPtr<T> {
* An owned atomic pointer. Ensures that only a single reference to the data is held at any time.
*/
#[unsafe_no_drop_flag]
#[cfg(not(stage0))]
pub struct AtomicOption<T> {
priv p: uint,
}
#[unsafe_no_drop_flag]
#[cfg(stage0)]
pub struct AtomicOption<T> {
priv p: *mut u8
}
@ -87,6 +109,8 @@ pub static INIT_ATOMIC_FLAG : AtomicFlag = AtomicFlag { v: 0, nocopy: NonCopyabl
pub static INIT_ATOMIC_BOOL : AtomicBool = AtomicBool { v: 0, nocopy: NonCopyable };
pub static INIT_ATOMIC_INT : AtomicInt = AtomicInt { v: 0, nocopy: NonCopyable };
pub static INIT_ATOMIC_UINT : AtomicUint = AtomicUint { v: 0, nocopy: NonCopyable };
#[cfg(not(stage0))]
pub static INIT_ATOMIC_U64 : AtomicU64 = AtomicU64 { v: 0, nocopy: NonCopyable };
impl AtomicFlag {
@ -215,6 +239,43 @@ impl AtomicInt {
}
}
#[cfg(not(stage0))]
impl AtomicU64 {
pub fn new(v: u64) -> AtomicU64 {
AtomicU64 { v:v, nocopy: NonCopyable }
}
#[inline]
pub fn load(&self, order: Ordering) -> u64 {
unsafe { atomic_load(&self.v, order) }
}
#[inline]
pub fn store(&mut self, val: u64, order: Ordering) {
unsafe { atomic_store(&mut self.v, val, order); }
}
#[inline]
pub fn swap(&mut self, val: u64, order: Ordering) -> u64 {
unsafe { atomic_swap(&mut self.v, val, order) }
}
#[inline]
pub fn compare_and_swap(&mut self, old: u64, new: u64, order: Ordering) -> u64 {
unsafe { atomic_compare_and_swap(&mut self.v, old, new, order) }
}
#[inline]
pub fn fetch_add(&mut self, val: u64, order: Ordering) -> u64 {
unsafe { atomic_add(&mut self.v, val, order) }
}
#[inline]
pub fn fetch_sub(&mut self, val: u64, order: Ordering) -> u64 {
unsafe { atomic_sub(&mut self.v, val, order) }
}
}
impl AtomicUint {
pub fn new(v: uint) -> AtomicUint {
AtomicUint { v:v, nocopy: NonCopyable }
@ -254,26 +315,64 @@ impl AtomicUint {
}
impl<T> AtomicPtr<T> {
#[cfg(stage0)]
pub fn new(p: *mut T) -> AtomicPtr<T> {
AtomicPtr { p:p, nocopy: NonCopyable }
AtomicPtr { p: p, nocopy: NonCopyable }
}
#[cfg(not(stage0))]
pub fn new(p: *mut T) -> AtomicPtr<T> {
AtomicPtr { p: p as uint, nocopy: NonCopyable }
}
#[inline]
#[cfg(not(stage0))]
pub fn load(&self, order: Ordering) -> *mut T {
unsafe {
atomic_load(&self.p, order) as *mut T
}
}
#[inline]
#[cfg(not(stage0))]
pub fn store(&mut self, ptr: *mut T, order: Ordering) {
unsafe { atomic_store(&mut self.p, ptr as uint, order); }
}
#[inline]
#[cfg(not(stage0))]
pub fn swap(&mut self, ptr: *mut T, order: Ordering) -> *mut T {
unsafe { atomic_swap(&mut self.p, ptr as uint, order) as *mut T }
}
#[inline]
#[cfg(not(stage0))]
pub fn compare_and_swap(&mut self, old: *mut T, new: *mut T, order: Ordering) -> *mut T {
unsafe {
atomic_compare_and_swap(&mut self.p, old as uint,
new as uint, order) as *mut T
}
}
#[inline]
#[cfg(stage0)]
pub fn load(&self, order: Ordering) -> *mut T {
unsafe { atomic_load(&self.p, order) }
}
#[inline]
#[cfg(stage0)]
pub fn store(&mut self, ptr: *mut T, order: Ordering) {
unsafe { atomic_store(&mut self.p, ptr, order); }
}
#[inline]
#[cfg(stage0)]
pub fn swap(&mut self, ptr: *mut T, order: Ordering) -> *mut T {
unsafe { atomic_swap(&mut self.p, ptr, order) }
}
#[inline]
#[cfg(stage0)]
pub fn compare_and_swap(&mut self, old: *mut T, new: *mut T, order: Ordering) -> *mut T {
unsafe { atomic_compare_and_swap(&mut self.p, old, new, order) }
}
@ -281,20 +380,13 @@ impl<T> AtomicPtr<T> {
impl<T> AtomicOption<T> {
pub fn new(p: ~T) -> AtomicOption<T> {
unsafe {
AtomicOption {
p: cast::transmute(p)
}
}
unsafe { AtomicOption { p: cast::transmute(p) } }
}
pub fn empty() -> AtomicOption<T> {
unsafe {
AtomicOption {
p: cast::transmute(0)
}
}
}
#[cfg(stage0)]
pub fn empty() -> AtomicOption<T> { AtomicOption { p: 0 as *mut c_void } }
#[cfg(not(stage0))]
pub fn empty() -> AtomicOption<T> { AtomicOption { p: 0 } }
#[inline]
pub fn swap(&mut self, val: ~T, order: Ordering) -> Option<~T> {
@ -302,9 +394,7 @@ impl<T> AtomicOption<T> {
let val = cast::transmute(val);
let p = atomic_swap(&mut self.p, val, order);
let pv : &uint = cast::transmute(&p);
if *pv == 0 {
if p as uint == 0 {
None
} else {
Some(cast::transmute(p))
@ -314,9 +404,7 @@ impl<T> AtomicOption<T> {
#[inline]
pub fn take(&mut self, order: Ordering) -> Option<~T> {
unsafe {
self.swap(cast::transmute(0), order)
}
unsafe { self.swap(cast::transmute(0), order) }
}
/// A compare-and-swap. Succeeds if the option is 'None' and returns 'None'
@ -340,7 +428,7 @@ impl<T> AtomicOption<T> {
/// result does not get invalidated by another task after this returns.
#[inline]
pub fn is_empty(&mut self, order: Ordering) -> bool {
unsafe { atomic_load(&self.p, order) == cast::transmute(0) }
unsafe { atomic_load(&self.p, order) as uint == 0 }
}
}
@ -351,11 +439,20 @@ impl<T> Drop for AtomicOption<T> {
}
}
#[cfg(stage0)]
#[inline]
pub unsafe fn atomic_store<T>(dst: &mut T, val: T, order:Ordering) {
let dst = cast::transmute(dst);
let val = cast::transmute(val);
cast::transmute(match order {
Release => intrinsics::atomic_store_rel(dst, val),
Relaxed => intrinsics::atomic_store_relaxed(dst, val),
_ => intrinsics::atomic_store(dst, val)
})
}
#[cfg(not(stage0))]
#[inline]
pub unsafe fn atomic_store<T>(dst: &mut T, val: T, order:Ordering) {
match order {
Release => intrinsics::atomic_store_rel(dst, val),
Relaxed => intrinsics::atomic_store_relaxed(dst, val),
@ -363,22 +460,31 @@ pub unsafe fn atomic_store<T>(dst: &mut T, val: T, order:Ordering) {
}
}
#[cfg(stage0)]
#[inline]
pub unsafe fn atomic_load<T>(dst: &T, order:Ordering) -> T {
let dst = cast::transmute(dst);
cast::transmute(match order {
Acquire => intrinsics::atomic_load_acq(dst),
Relaxed => intrinsics::atomic_load_relaxed(dst),
_ => intrinsics::atomic_load(dst)
})
}
#[cfg(not(stage0))]
#[inline]
pub unsafe fn atomic_load<T>(dst: &T, order:Ordering) -> T {
match order {
Acquire => intrinsics::atomic_load_acq(dst),
Relaxed => intrinsics::atomic_load_relaxed(dst),
_ => intrinsics::atomic_load(dst)
}
}
#[cfg(stage0)]
#[inline]
pub unsafe fn atomic_swap<T>(dst: &mut T, val: T, order: Ordering) -> T {
let dst = cast::transmute(dst);
let val = cast::transmute(val);
cast::transmute(match order {
Acquire => intrinsics::atomic_xchg_acq(dst, val),
Release => intrinsics::atomic_xchg_rel(dst, val),
@ -387,13 +493,24 @@ pub unsafe fn atomic_swap<T>(dst: &mut T, val: T, order: Ordering) -> T {
_ => intrinsics::atomic_xchg(dst, val)
})
}
#[cfg(not(stage0))]
#[inline]
pub unsafe fn atomic_swap<T>(dst: &mut T, val: T, order: Ordering) -> T {
match order {
Acquire => intrinsics::atomic_xchg_acq(dst, val),
Release => intrinsics::atomic_xchg_rel(dst, val),
AcqRel => intrinsics::atomic_xchg_acqrel(dst, val),
Relaxed => intrinsics::atomic_xchg_relaxed(dst, val),
_ => intrinsics::atomic_xchg(dst, val)
}
}
/// Returns the old value (like __sync_fetch_and_add).
#[cfg(stage0)]
#[inline]
pub unsafe fn atomic_add<T>(dst: &mut T, val: T, order: Ordering) -> T {
let dst = cast::transmute(dst);
let val = cast::transmute(val);
cast::transmute(match order {
Acquire => intrinsics::atomic_xadd_acq(dst, val),
Release => intrinsics::atomic_xadd_rel(dst, val),
@ -402,13 +519,25 @@ pub unsafe fn atomic_add<T>(dst: &mut T, val: T, order: Ordering) -> T {
_ => intrinsics::atomic_xadd(dst, val)
})
}
/// Returns the old value (like __sync_fetch_and_add).
#[cfg(not(stage0))]
#[inline]
pub unsafe fn atomic_add<T>(dst: &mut T, val: T, order: Ordering) -> T {
match order {
Acquire => intrinsics::atomic_xadd_acq(dst, val),
Release => intrinsics::atomic_xadd_rel(dst, val),
AcqRel => intrinsics::atomic_xadd_acqrel(dst, val),
Relaxed => intrinsics::atomic_xadd_relaxed(dst, val),
_ => intrinsics::atomic_xadd(dst, val)
}
}
/// Returns the old value (like __sync_fetch_and_sub).
#[cfg(stage0)]
#[inline]
pub unsafe fn atomic_sub<T>(dst: &mut T, val: T, order: Ordering) -> T {
let dst = cast::transmute(dst);
let val = cast::transmute(val);
cast::transmute(match order {
Acquire => intrinsics::atomic_xsub_acq(dst, val),
Release => intrinsics::atomic_xsub_rel(dst, val),
@ -417,13 +546,25 @@ pub unsafe fn atomic_sub<T>(dst: &mut T, val: T, order: Ordering) -> T {
_ => intrinsics::atomic_xsub(dst, val)
})
}
/// Returns the old value (like __sync_fetch_and_sub).
#[cfg(not(stage0))]
#[inline]
pub unsafe fn atomic_sub<T>(dst: &mut T, val: T, order: Ordering) -> T {
match order {
Acquire => intrinsics::atomic_xsub_acq(dst, val),
Release => intrinsics::atomic_xsub_rel(dst, val),
AcqRel => intrinsics::atomic_xsub_acqrel(dst, val),
Relaxed => intrinsics::atomic_xsub_relaxed(dst, val),
_ => intrinsics::atomic_xsub(dst, val)
}
}
#[cfg(stage0)]
#[inline]
pub unsafe fn atomic_compare_and_swap<T>(dst:&mut T, old:T, new:T, order: Ordering) -> T {
let dst = cast::transmute(dst);
let old = cast::transmute(old);
let new = cast::transmute(new);
let old = cast::transmute(old);
cast::transmute(match order {
Acquire => intrinsics::atomic_cxchg_acq(dst, old, new),
Release => intrinsics::atomic_cxchg_rel(dst, old, new),
@ -432,12 +573,23 @@ pub unsafe fn atomic_compare_and_swap<T>(dst:&mut T, old:T, new:T, order: Orderi
_ => intrinsics::atomic_cxchg(dst, old, new),
})
}
#[cfg(not(stage0))]
#[inline]
pub unsafe fn atomic_compare_and_swap<T>(dst:&mut T, old:T, new:T, order: Ordering) -> T {
match order {
Acquire => intrinsics::atomic_cxchg_acq(dst, old, new),
Release => intrinsics::atomic_cxchg_rel(dst, old, new),
AcqRel => intrinsics::atomic_cxchg_acqrel(dst, old, new),
Relaxed => intrinsics::atomic_cxchg_relaxed(dst, old, new),
_ => intrinsics::atomic_cxchg(dst, old, new),
}
}
#[cfg(stage0)]
#[inline]
pub unsafe fn atomic_and<T>(dst: &mut T, val: T, order: Ordering) -> T {
let dst = cast::transmute(dst);
let val = cast::transmute(val);
cast::transmute(match order {
Acquire => intrinsics::atomic_and_acq(dst, val),
Release => intrinsics::atomic_and_rel(dst, val),
@ -446,13 +598,23 @@ pub unsafe fn atomic_and<T>(dst: &mut T, val: T, order: Ordering) -> T {
_ => intrinsics::atomic_and(dst, val)
})
}
#[cfg(not(stage0))]
#[inline]
pub unsafe fn atomic_and<T>(dst: &mut T, val: T, order: Ordering) -> T {
match order {
Acquire => intrinsics::atomic_and_acq(dst, val),
Release => intrinsics::atomic_and_rel(dst, val),
AcqRel => intrinsics::atomic_and_acqrel(dst, val),
Relaxed => intrinsics::atomic_and_relaxed(dst, val),
_ => intrinsics::atomic_and(dst, val)
}
}
#[cfg(stage0)]
#[inline]
pub unsafe fn atomic_nand<T>(dst: &mut T, val: T, order: Ordering) -> T {
let dst = cast::transmute(dst);
let val = cast::transmute(val);
cast::transmute(match order {
Acquire => intrinsics::atomic_nand_acq(dst, val),
Release => intrinsics::atomic_nand_rel(dst, val),
@ -461,13 +623,24 @@ pub unsafe fn atomic_nand<T>(dst: &mut T, val: T, order: Ordering) -> T {
_ => intrinsics::atomic_nand(dst, val)
})
}
#[cfg(not(stage0))]
#[inline]
pub unsafe fn atomic_nand<T>(dst: &mut T, val: T, order: Ordering) -> T {
match order {
Acquire => intrinsics::atomic_nand_acq(dst, val),
Release => intrinsics::atomic_nand_rel(dst, val),
AcqRel => intrinsics::atomic_nand_acqrel(dst, val),
Relaxed => intrinsics::atomic_nand_relaxed(dst, val),
_ => intrinsics::atomic_nand(dst, val)
}
}
#[cfg(stage0)]
#[inline]
pub unsafe fn atomic_or<T>(dst: &mut T, val: T, order: Ordering) -> T {
let dst = cast::transmute(dst);
let val = cast::transmute(val);
cast::transmute(match order {
Acquire => intrinsics::atomic_or_acq(dst, val),
Release => intrinsics::atomic_or_rel(dst, val),
@ -476,13 +649,24 @@ pub unsafe fn atomic_or<T>(dst: &mut T, val: T, order: Ordering) -> T {
_ => intrinsics::atomic_or(dst, val)
})
}
#[cfg(not(stage0))]
#[inline]
pub unsafe fn atomic_or<T>(dst: &mut T, val: T, order: Ordering) -> T {
match order {
Acquire => intrinsics::atomic_or_acq(dst, val),
Release => intrinsics::atomic_or_rel(dst, val),
AcqRel => intrinsics::atomic_or_acqrel(dst, val),
Relaxed => intrinsics::atomic_or_relaxed(dst, val),
_ => intrinsics::atomic_or(dst, val)
}
}
#[cfg(stage0)]
#[inline]
pub unsafe fn atomic_xor<T>(dst: &mut T, val: T, order: Ordering) -> T {
let dst = cast::transmute(dst);
let val = cast::transmute(val);
cast::transmute(match order {
Acquire => intrinsics::atomic_xor_acq(dst, val),
Release => intrinsics::atomic_xor_rel(dst, val),
@ -491,6 +675,17 @@ pub unsafe fn atomic_xor<T>(dst: &mut T, val: T, order: Ordering) -> T {
_ => intrinsics::atomic_xor(dst, val)
})
}
#[cfg(not(stage0))]
#[inline]
pub unsafe fn atomic_xor<T>(dst: &mut T, val: T, order: Ordering) -> T {
match order {
Acquire => intrinsics::atomic_xor_acq(dst, val),
Release => intrinsics::atomic_xor_rel(dst, val),
AcqRel => intrinsics::atomic_xor_acqrel(dst, val),
Relaxed => intrinsics::atomic_xor_relaxed(dst, val),
_ => intrinsics::atomic_xor(dst, val)
}
}
/**
@ -599,4 +794,22 @@ mod test {
assert!(S_UINT.load(SeqCst) == 0);
}
}
#[test]
#[cfg(not(stage0))]
fn different_sizes() {
unsafe {
let mut slot = 0u16;
assert_eq!(super::atomic_swap(&mut slot, 1, SeqCst), 0);
let mut slot = 0u8;
assert_eq!(super::atomic_compare_and_swap(&mut slot, 1, 2, SeqCst), 0);
let mut slot = 0u32;
assert_eq!(super::atomic_load(&mut slot, SeqCst), 0);
let mut slot = 0u64;
super::atomic_store(&mut slot, 2, SeqCst);
}
}
}

View file

@ -172,16 +172,8 @@ pub trait TyVisitor {
fn visit_closure_ptr(&mut self, ck: uint) -> bool;
}
#[cfg(stage0)]
extern "rust-intrinsic" {
/// Abort the execution of the process.
pub fn abort() -> !;
/// Execute a breakpoint trap, for inspection by a debugger.
pub fn breakpoint();
pub fn volatile_load<T>(src: *T) -> T;
pub fn volatile_store<T>(dst: *mut T, val: T);
/// Atomic compare and exchange, sequentially consistent.
pub fn atomic_cxchg(dst: &mut int, old: int, src: int) -> int;
/// Atomic compare and exchange, acquire ordering.
@ -286,6 +278,106 @@ extern "rust-intrinsic" {
pub fn atomic_fence_acq();
pub fn atomic_fence_rel();
pub fn atomic_fence_acqrel();
}
#[cfg(not(stage0))]
extern "rust-intrinsic" {
pub fn atomic_cxchg<T>(dst: &mut T, old: T, src: T) -> T;
pub fn atomic_cxchg_acq<T>(dst: &mut T, old: T, src: T) -> T;
pub fn atomic_cxchg_rel<T>(dst: &mut T, old: T, src: T) -> T;
pub fn atomic_cxchg_acqrel<T>(dst: &mut T, old: T, src: T) -> T;
pub fn atomic_cxchg_relaxed<T>(dst: &mut T, old: T, src: T) -> T;
pub fn atomic_load<T>(src: &T) -> T;
pub fn atomic_load_acq<T>(src: &T) -> T;
pub fn atomic_load_relaxed<T>(src: &T) -> T;
pub fn atomic_store<T>(dst: &mut T, val: T);
pub fn atomic_store_rel<T>(dst: &mut T, val: T);
pub fn atomic_store_relaxed<T>(dst: &mut T, val: T);
pub fn atomic_xchg<T>(dst: &mut T, src: T) -> T;
pub fn atomic_xchg_acq<T>(dst: &mut T, src: T) -> T;
pub fn atomic_xchg_rel<T>(dst: &mut T, src: T) -> T;
pub fn atomic_xchg_acqrel<T>(dst: &mut T, src: T) -> T;
pub fn atomic_xchg_relaxed<T>(dst: &mut T, src: T) -> T;
pub fn atomic_xadd<T>(dst: &mut T, src: T) -> T;
pub fn atomic_xadd_acq<T>(dst: &mut T, src: T) -> T;
pub fn atomic_xadd_rel<T>(dst: &mut T, src: T) -> T;
pub fn atomic_xadd_acqrel<T>(dst: &mut T, src: T) -> T;
pub fn atomic_xadd_relaxed<T>(dst: &mut T, src: T) -> T;
pub fn atomic_xsub<T>(dst: &mut T, src: T) -> T;
pub fn atomic_xsub_acq<T>(dst: &mut T, src: T) -> T;
pub fn atomic_xsub_rel<T>(dst: &mut T, src: T) -> T;
pub fn atomic_xsub_acqrel<T>(dst: &mut T, src: T) -> T;
pub fn atomic_xsub_relaxed<T>(dst: &mut T, src: T) -> T;
pub fn atomic_and<T>(dst: &mut T, src: T) -> T;
pub fn atomic_and_acq<T>(dst: &mut T, src: T) -> T;
pub fn atomic_and_rel<T>(dst: &mut T, src: T) -> T;
pub fn atomic_and_acqrel<T>(dst: &mut T, src: T) -> T;
pub fn atomic_and_relaxed<T>(dst: &mut T, src: T) -> T;
pub fn atomic_nand<T>(dst: &mut T, src: T) -> T;
pub fn atomic_nand_acq<T>(dst: &mut T, src: T) -> T;
pub fn atomic_nand_rel<T>(dst: &mut T, src: T) -> T;
pub fn atomic_nand_acqrel<T>(dst: &mut T, src: T) -> T;
pub fn atomic_nand_relaxed<T>(dst: &mut T, src: T) -> T;
pub fn atomic_or<T>(dst: &mut T, src: T) -> T;
pub fn atomic_or_acq<T>(dst: &mut T, src: T) -> T;
pub fn atomic_or_rel<T>(dst: &mut T, src: T) -> T;
pub fn atomic_or_acqrel<T>(dst: &mut T, src: T) -> T;
pub fn atomic_or_relaxed<T>(dst: &mut T, src: T) -> T;
pub fn atomic_xor<T>(dst: &mut T, src: T) -> T;
pub fn atomic_xor_acq<T>(dst: &mut T, src: T) -> T;
pub fn atomic_xor_rel<T>(dst: &mut T, src: T) -> T;
pub fn atomic_xor_acqrel<T>(dst: &mut T, src: T) -> T;
pub fn atomic_xor_relaxed<T>(dst: &mut T, src: T) -> T;
pub fn atomic_max<T>(dst: &mut T, src: T) -> T;
pub fn atomic_max_acq<T>(dst: &mut T, src: T) -> T;
pub fn atomic_max_rel<T>(dst: &mut T, src: T) -> T;
pub fn atomic_max_acqrel<T>(dst: &mut T, src: T) -> T;
pub fn atomic_max_relaxed<T>(dst: &mut T, src: T) -> T;
pub fn atomic_min<T>(dst: &mut T, src: T) -> T;
pub fn atomic_min_acq<T>(dst: &mut T, src: T) -> T;
pub fn atomic_min_rel<T>(dst: &mut T, src: T) -> T;
pub fn atomic_min_acqrel<T>(dst: &mut T, src: T) -> T;
pub fn atomic_min_relaxed<T>(dst: &mut T, src: T) -> T;
pub fn atomic_umin<T>(dst: &mut T, src: T) -> T;
pub fn atomic_umin_acq<T>(dst: &mut T, src: T) -> T;
pub fn atomic_umin_rel<T>(dst: &mut T, src: T) -> T;
pub fn atomic_umin_acqrel<T>(dst: &mut T, src: T) -> T;
pub fn atomic_umin_relaxed<T>(dst: &mut T, src: T) -> T;
pub fn atomic_umax<T>(dst: &mut T, src: T) -> T;
pub fn atomic_umax_acq<T>(dst: &mut T, src: T) -> T;
pub fn atomic_umax_rel<T>(dst: &mut T, src: T) -> T;
pub fn atomic_umax_acqrel<T>(dst: &mut T, src: T) -> T;
pub fn atomic_umax_relaxed<T>(dst: &mut T, src: T) -> T;
pub fn atomic_fence();
pub fn atomic_fence_acq();
pub fn atomic_fence_rel();
pub fn atomic_fence_acqrel();
}
extern "rust-intrinsic" {
/// Abort the execution of the process.
pub fn abort() -> !;
/// Execute a breakpoint trap, for inspection by a debugger.
pub fn breakpoint();
pub fn volatile_load<T>(src: *T) -> T;
pub fn volatile_store<T>(dst: *mut T, val: T);
/// The size of a type in bytes.
///

View file

@ -10,21 +10,21 @@
pub mod rusti {
extern "rust-intrinsic" {
pub fn atomic_cxchg(dst: &mut int, old: int, src: int) -> int;
pub fn atomic_cxchg_acq(dst: &mut int, old: int, src: int) -> int;
pub fn atomic_cxchg_rel(dst: &mut int, old: int, src: int) -> int;
pub fn atomic_cxchg<T>(dst: &mut T, old: T, src: T) -> T;
pub fn atomic_cxchg_acq<T>(dst: &mut T, old: T, src: T) -> T;
pub fn atomic_cxchg_rel<T>(dst: &mut T, old: T, src: T) -> T;
pub fn atomic_xchg(dst: &mut int, src: int) -> int;
pub fn atomic_xchg_acq(dst: &mut int, src: int) -> int;
pub fn atomic_xchg_rel(dst: &mut int, src: int) -> int;
pub fn atomic_xchg<T>(dst: &mut T, src: T) -> T;
pub fn atomic_xchg_acq<T>(dst: &mut T, src: T) -> T;
pub fn atomic_xchg_rel<T>(dst: &mut T, src: T) -> T;
pub fn atomic_xadd(dst: &mut int, src: int) -> int;
pub fn atomic_xadd_acq(dst: &mut int, src: int) -> int;
pub fn atomic_xadd_rel(dst: &mut int, src: int) -> int;
pub fn atomic_xadd<T>(dst: &mut T, src: T) -> T;
pub fn atomic_xadd_acq<T>(dst: &mut T, src: T) -> T;
pub fn atomic_xadd_rel<T>(dst: &mut T, src: T) -> T;
pub fn atomic_xsub(dst: &mut int, src: int) -> int;
pub fn atomic_xsub_acq(dst: &mut int, src: int) -> int;
pub fn atomic_xsub_rel(dst: &mut int, src: int) -> int;
pub fn atomic_xsub<T>(dst: &mut T, src: T) -> T;
pub fn atomic_xsub_acq<T>(dst: &mut T, src: T) -> T;
pub fn atomic_xsub_rel<T>(dst: &mut T, src: T) -> T;
}
}

View file

@ -10,27 +10,27 @@
mod rusti {
extern "rust-intrinsic" {
pub fn atomic_cxchg(dst: &mut int, old: int, src: int) -> int;
pub fn atomic_cxchg_acq(dst: &mut int, old: int, src: int) -> int;
pub fn atomic_cxchg_rel(dst: &mut int, old: int, src: int) -> int;
pub fn atomic_cxchg<T>(dst: &mut T, old: T, src: T) -> T;
pub fn atomic_cxchg_acq<T>(dst: &mut T, old: T, src: T) -> T;
pub fn atomic_cxchg_rel<T>(dst: &mut T, old: T, src: T) -> T;
pub fn atomic_load(src: &int) -> int;
pub fn atomic_load_acq(src: &int) -> int;
pub fn atomic_load<T>(src: &T) -> T;
pub fn atomic_load_acq<T>(src: &T) -> T;
pub fn atomic_store(dst: &mut int, val: int);
pub fn atomic_store_rel(dst: &mut int, val: int);
pub fn atomic_store<T>(dst: &mut T, val: T);
pub fn atomic_store_rel<T>(dst: &mut T, val: T);
pub fn atomic_xchg(dst: &mut int, src: int) -> int;
pub fn atomic_xchg_acq(dst: &mut int, src: int) -> int;
pub fn atomic_xchg_rel(dst: &mut int, src: int) -> int;
pub fn atomic_xchg<T>(dst: &mut T, src: T) -> T;
pub fn atomic_xchg_acq<T>(dst: &mut T, src: T) -> T;
pub fn atomic_xchg_rel<T>(dst: &mut T, src: T) -> T;
pub fn atomic_xadd(dst: &mut int, src: int) -> int;
pub fn atomic_xadd_acq(dst: &mut int, src: int) -> int;
pub fn atomic_xadd_rel(dst: &mut int, src: int) -> int;
pub fn atomic_xadd<T>(dst: &mut T, src: T) -> T;
pub fn atomic_xadd_acq<T>(dst: &mut T, src: T) -> T;
pub fn atomic_xadd_rel<T>(dst: &mut T, src: T) -> T;
pub fn atomic_xsub(dst: &mut int, src: int) -> int;
pub fn atomic_xsub_acq(dst: &mut int, src: int) -> int;
pub fn atomic_xsub_rel(dst: &mut int, src: int) -> int;
pub fn atomic_xsub<T>(dst: &mut T, src: T) -> T;
pub fn atomic_xsub_acq<T>(dst: &mut T, src: T) -> T;
pub fn atomic_xsub_rel<T>(dst: &mut T, src: T) -> T;
}
}