Make atomic instructions atomic using a global lock
This commit is contained in:
parent
a3f378754b
commit
ddd3d95a7e
6 changed files with 147 additions and 2 deletions
|
@ -13,6 +13,12 @@ fi
|
|||
TARGET_TRIPLE=$(rustc -vV | grep host | cut -d: -f2 | tr -d " ")
|
||||
|
||||
export RUSTFLAGS='-Cpanic=abort -Cdebuginfo=2 -Zpanic-abort-tests -Zcodegen-backend='$(pwd)'/target/'$CHANNEL'/librustc_codegen_cranelift.'$dylib_ext' --sysroot '$(pwd)'/build_sysroot/sysroot'
|
||||
|
||||
# FIXME remove once the atomic shim is gone
|
||||
if [[ `uname` == 'Darwin' ]]; then
|
||||
export RUSTFLAGS="$RUSTFLAGS -Clink-arg=-undefined -Clink-arg=dynamic_lookup"
|
||||
fi
|
||||
|
||||
RUSTC="rustc $RUSTFLAGS -L crate=target/out --out-dir target/out"
|
||||
export RUSTC_LOG=warn # display metadata load errors
|
||||
|
||||
|
|
94
src/atomic_shim.rs
Normal file
94
src/atomic_shim.rs
Normal file
|
@ -0,0 +1,94 @@
|
|||
//! Atomic intrinsics are implemented using a global lock for now, as Cranelift doesn't support
|
||||
//! atomic operations yet.
|
||||
|
||||
// FIXME implement atomic instructions in Cranelift.
|
||||
|
||||
use crate::prelude::*;
|
||||
|
||||
#[no_mangle]
|
||||
pub static mut __cg_clif_global_atomic_mutex: libc::pthread_mutex_t = libc::PTHREAD_MUTEX_INITIALIZER;
|
||||
|
||||
pub fn init_global_lock(module: &mut Module<impl Backend>, bcx: &mut FunctionBuilder<'_>) {
|
||||
if std::env::var("SHOULD_RUN").is_ok () {
|
||||
// When using JIT, dylibs won't find the __cg_clif_global_atomic_mutex data object defined here,
|
||||
// so instead define it in the cg_clif dylib.
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
let mut data_ctx = DataContext::new();
|
||||
data_ctx.define_zeroinit(1024); // 1024 bytes should be big enough on all platforms.
|
||||
let atomic_mutex = module.declare_data(
|
||||
"__cg_clif_global_atomic_mutex",
|
||||
Linkage::Export,
|
||||
true,
|
||||
Some(16),
|
||||
).unwrap();
|
||||
module.define_data(atomic_mutex, &data_ctx).unwrap();
|
||||
|
||||
let pthread_mutex_init = module.declare_function("pthread_mutex_init", Linkage::Import, &cranelift_codegen::ir::Signature {
|
||||
call_conv: module.target_config().default_call_conv,
|
||||
params: vec![
|
||||
AbiParam::new(module.target_config().pointer_type() /* *mut pthread_mutex_t */),
|
||||
AbiParam::new(module.target_config().pointer_type() /* *const pthread_mutex_attr_t */),
|
||||
],
|
||||
returns: vec![AbiParam::new(types::I32 /* c_int */)],
|
||||
}).unwrap();
|
||||
|
||||
let pthread_mutex_init = module.declare_func_in_func(pthread_mutex_init, bcx.func);
|
||||
|
||||
let atomic_mutex = module.declare_data_in_func(atomic_mutex, bcx.func);
|
||||
let atomic_mutex = bcx.ins().global_value(module.target_config().pointer_type(), atomic_mutex);
|
||||
|
||||
let nullptr = bcx.ins().iconst(module.target_config().pointer_type(), 0);
|
||||
|
||||
bcx.ins().call(pthread_mutex_init, &[atomic_mutex, nullptr]);
|
||||
}
|
||||
|
||||
pub fn lock_global_lock(fx: &mut FunctionCx<'_, '_, impl Backend>) {
|
||||
let atomic_mutex = fx.module.declare_data(
|
||||
"__cg_clif_global_atomic_mutex",
|
||||
Linkage::Import,
|
||||
true,
|
||||
None,
|
||||
).unwrap();
|
||||
|
||||
let pthread_mutex_lock = fx.module.declare_function("pthread_mutex_lock", Linkage::Import, &cranelift_codegen::ir::Signature {
|
||||
call_conv: fx.module.target_config().default_call_conv,
|
||||
params: vec![
|
||||
AbiParam::new(fx.module.target_config().pointer_type() /* *mut pthread_mutex_t */),
|
||||
],
|
||||
returns: vec![AbiParam::new(types::I32 /* c_int */)],
|
||||
}).unwrap();
|
||||
|
||||
let pthread_mutex_lock = fx.module.declare_func_in_func(pthread_mutex_lock, fx.bcx.func);
|
||||
|
||||
let atomic_mutex = fx.module.declare_data_in_func(atomic_mutex, fx.bcx.func);
|
||||
let atomic_mutex = fx.bcx.ins().global_value(fx.module.target_config().pointer_type(), atomic_mutex);
|
||||
|
||||
fx.bcx.ins().call(pthread_mutex_lock, &[atomic_mutex]);
|
||||
}
|
||||
|
||||
pub fn unlock_global_lock(fx: &mut FunctionCx<'_, '_, impl Backend>) {
|
||||
let atomic_mutex = fx.module.declare_data(
|
||||
"__cg_clif_global_atomic_mutex",
|
||||
Linkage::Import,
|
||||
true,
|
||||
None,
|
||||
).unwrap();
|
||||
|
||||
let pthread_mutex_unlock = fx.module.declare_function("pthread_mutex_unlock", Linkage::Import, &cranelift_codegen::ir::Signature {
|
||||
call_conv: fx.module.target_config().default_call_conv,
|
||||
params: vec![
|
||||
AbiParam::new(fx.module.target_config().pointer_type() /* *mut pthread_mutex_t */),
|
||||
],
|
||||
returns: vec![AbiParam::new(types::I32 /* c_int */)],
|
||||
}).unwrap();
|
||||
|
||||
let pthread_mutex_unlock = fx.module.declare_func_in_func(pthread_mutex_unlock, fx.bcx.func);
|
||||
|
||||
let atomic_mutex = fx.module.declare_data_in_func(atomic_mutex, fx.bcx.func);
|
||||
let atomic_mutex = fx.bcx.ins().global_value(fx.module.target_config().pointer_type(), atomic_mutex);
|
||||
|
||||
fx.bcx.ins().call(pthread_mutex_unlock, &[atomic_mutex]);
|
||||
}
|
|
@ -36,6 +36,13 @@ pub fn codegen_crate(
|
|||
fn run_jit(tcx: TyCtxt<'_>) -> ! {
|
||||
use cranelift_simplejit::{SimpleJITBackend, SimpleJITBuilder};
|
||||
|
||||
// Rustc opens us without the RTLD_GLOBAL flag, so __cg_clif_global_atomic_mutex will not be
|
||||
// exported. We fix this by opening ourself again as global.
|
||||
// FIXME remove once atomic_shim is gone
|
||||
let cg_dylib = std::ffi::OsString::from(&tcx.sess.opts.debugging_opts.codegen_backend.as_ref().unwrap());
|
||||
std::mem::forget(libloading::os::unix::Library::open(Some(cg_dylib), libc::RTLD_NOW | libc::RTLD_GLOBAL).unwrap());
|
||||
|
||||
|
||||
let imported_symbols = load_imported_symbols_for_jit(tcx);
|
||||
|
||||
let mut jit_builder = SimpleJITBuilder::with_isa(
|
||||
|
|
|
@ -104,14 +104,20 @@ macro call_intrinsic_match {
|
|||
}
|
||||
|
||||
macro atomic_binop_return_old($fx:expr, $op:ident<$T:ident>($ptr:ident, $src:ident) -> $ret:ident) {
|
||||
crate::atomic_shim::lock_global_lock($fx);
|
||||
|
||||
let clif_ty = $fx.clif_type($T).unwrap();
|
||||
let old = $fx.bcx.ins().load(clif_ty, MemFlags::new(), $ptr, 0);
|
||||
let new = $fx.bcx.ins().$op(old, $src);
|
||||
$fx.bcx.ins().store(MemFlags::new(), new, $ptr, 0);
|
||||
$ret.write_cvalue($fx, CValue::by_val(old, $fx.layout_of($T)));
|
||||
|
||||
crate::atomic_shim::unlock_global_lock($fx);
|
||||
}
|
||||
|
||||
macro atomic_minmax($fx:expr, $cc:expr, <$T:ident> ($ptr:ident, $src:ident) -> $ret:ident) {
|
||||
crate::atomic_shim::lock_global_lock($fx);
|
||||
|
||||
// Read old
|
||||
let clif_ty = $fx.clif_type($T).unwrap();
|
||||
let old = $fx.bcx.ins().load(clif_ty, MemFlags::new(), $ptr, 0);
|
||||
|
@ -125,6 +131,8 @@ macro atomic_minmax($fx:expr, $cc:expr, <$T:ident> ($ptr:ident, $src:ident) -> $
|
|||
|
||||
let ret_val = CValue::by_val(old, $ret.layout());
|
||||
$ret.write_cvalue($fx, ret_val);
|
||||
|
||||
crate::atomic_shim::unlock_global_lock($fx);
|
||||
}
|
||||
|
||||
fn lane_type_and_count<'tcx>(
|
||||
|
@ -845,19 +853,35 @@ pub fn codegen_intrinsic_call<'tcx>(
|
|||
ret.write_cvalue(fx, caller_location);
|
||||
};
|
||||
|
||||
_ if intrinsic.starts_with("atomic_fence"), () {};
|
||||
_ if intrinsic.starts_with("atomic_singlethreadfence"), () {};
|
||||
_ if intrinsic.starts_with("atomic_fence"), () {
|
||||
crate::atomic_shim::lock_global_lock(fx);
|
||||
crate::atomic_shim::unlock_global_lock(fx);
|
||||
};
|
||||
_ if intrinsic.starts_with("atomic_singlethreadfence"), () {
|
||||
crate::atomic_shim::lock_global_lock(fx);
|
||||
crate::atomic_shim::unlock_global_lock(fx);
|
||||
};
|
||||
_ if intrinsic.starts_with("atomic_load"), (c ptr) {
|
||||
crate::atomic_shim::lock_global_lock(fx);
|
||||
|
||||
let inner_layout =
|
||||
fx.layout_of(ptr.layout().ty.builtin_deref(true).unwrap().ty);
|
||||
let val = CValue::by_ref(Pointer::new(ptr.load_scalar(fx)), inner_layout);
|
||||
ret.write_cvalue(fx, val);
|
||||
|
||||
crate::atomic_shim::unlock_global_lock(fx);
|
||||
};
|
||||
_ if intrinsic.starts_with("atomic_store"), (v ptr, c val) {
|
||||
crate::atomic_shim::lock_global_lock(fx);
|
||||
|
||||
let dest = CPlace::for_ptr(Pointer::new(ptr), val.layout());
|
||||
dest.write_cvalue(fx, val);
|
||||
|
||||
crate::atomic_shim::unlock_global_lock(fx);
|
||||
};
|
||||
_ if intrinsic.starts_with("atomic_xchg"), <T> (v ptr, c src) {
|
||||
crate::atomic_shim::lock_global_lock(fx);
|
||||
|
||||
// Read old
|
||||
let clif_ty = fx.clif_type(T).unwrap();
|
||||
let old = fx.bcx.ins().load(clif_ty, MemFlags::new(), ptr, 0);
|
||||
|
@ -866,8 +890,12 @@ pub fn codegen_intrinsic_call<'tcx>(
|
|||
// Write new
|
||||
let dest = CPlace::for_ptr(Pointer::new(ptr), src.layout());
|
||||
dest.write_cvalue(fx, src);
|
||||
|
||||
crate::atomic_shim::unlock_global_lock(fx);
|
||||
};
|
||||
_ if intrinsic.starts_with("atomic_cxchg"), <T> (v ptr, v test_old, v new) { // both atomic_cxchg_* and atomic_cxchgweak_*
|
||||
crate::atomic_shim::lock_global_lock(fx);
|
||||
|
||||
// Read old
|
||||
let clif_ty = fx.clif_type(T).unwrap();
|
||||
let old = fx.bcx.ins().load(clif_ty, MemFlags::new(), ptr, 0);
|
||||
|
@ -881,6 +909,8 @@ pub fn codegen_intrinsic_call<'tcx>(
|
|||
|
||||
let ret_val = CValue::by_val_pair(old, fx.bcx.ins().bint(types::I8, is_eq), ret.layout());
|
||||
ret.write_cvalue(fx, ret_val);
|
||||
|
||||
crate::atomic_shim::unlock_global_lock(fx);
|
||||
};
|
||||
|
||||
_ if intrinsic.starts_with("atomic_xadd"), <T> (v ptr, v amount) {
|
||||
|
@ -893,12 +923,16 @@ pub fn codegen_intrinsic_call<'tcx>(
|
|||
atomic_binop_return_old! (fx, band<T>(ptr, src) -> ret);
|
||||
};
|
||||
_ if intrinsic.starts_with("atomic_nand"), <T> (v ptr, v src) {
|
||||
crate::atomic_shim::lock_global_lock(fx);
|
||||
|
||||
let clif_ty = fx.clif_type(T).unwrap();
|
||||
let old = fx.bcx.ins().load(clif_ty, MemFlags::new(), ptr, 0);
|
||||
let and = fx.bcx.ins().band(old, src);
|
||||
let new = fx.bcx.ins().bnot(and);
|
||||
fx.bcx.ins().store(MemFlags::new(), new, ptr, 0);
|
||||
ret.write_cvalue(fx, CValue::by_val(old, fx.layout_of(T)));
|
||||
|
||||
crate::atomic_shim::unlock_global_lock(fx);
|
||||
};
|
||||
_ if intrinsic.starts_with("atomic_or"), <T> (v ptr, v src) {
|
||||
atomic_binop_return_old! (fx, bor<T>(ptr, src) -> ret);
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
#![allow(intra_doc_link_resolution_failure)]
|
||||
|
||||
extern crate flate2;
|
||||
extern crate libc;
|
||||
extern crate tempfile;
|
||||
extern crate rustc;
|
||||
extern crate rustc_codegen_ssa;
|
||||
|
@ -36,6 +37,7 @@ mod abi;
|
|||
mod allocator;
|
||||
mod analyze;
|
||||
mod archive;
|
||||
mod atomic_shim;
|
||||
mod base;
|
||||
mod backend;
|
||||
mod cast;
|
||||
|
|
|
@ -67,6 +67,8 @@ pub fn maybe_create_entry_wrapper(tcx: TyCtxt<'_>, module: &mut Module<impl Back
|
|||
let arg_argc = bcx.append_ebb_param(ebb, m.target_config().pointer_type());
|
||||
let arg_argv = bcx.append_ebb_param(ebb, m.target_config().pointer_type());
|
||||
|
||||
crate::atomic_shim::init_global_lock(m, &mut bcx);
|
||||
|
||||
let main_func_ref = m.declare_func_in_func(main_func_id, &mut bcx.func);
|
||||
|
||||
let call_inst = if use_start_lang_item {
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue