1
Fork 0

Replace LockCell with atomic types

This commit is contained in:
John Kåre Alsaker 2018-12-05 16:51:58 +01:00
parent 35a64f8bbf
commit 03b7cec2de
5 changed files with 100 additions and 175 deletions

View file

@ -16,7 +16,10 @@ use util::common::{duration_to_secs_str, ErrorReported};
use util::common::ProfileQueriesMsg;
use rustc_data_structures::base_n;
use rustc_data_structures::sync::{self, Lrc, Lock, LockCell, OneThread, Once, RwLock};
use rustc_data_structures::sync::{
self, Lrc, Lock, OneThread, Once, RwLock, AtomicU64, AtomicUsize, AtomicBool, Ordering,
Ordering::SeqCst,
};
use errors::{self, DiagnosticBuilder, DiagnosticId, Applicability};
use errors::emitter::{Emitter, EmitterWriter};
@ -41,7 +44,6 @@ use std::io::Write;
use std::path::PathBuf;
use std::time::Duration;
use std::sync::mpsc;
use std::sync::atomic::{AtomicUsize, Ordering};
mod code_stats;
pub mod config;
@ -138,15 +140,15 @@ pub struct Session {
/// If -zfuel=crate=n is specified, Some(crate).
optimization_fuel_crate: Option<String>,
/// If -zfuel=crate=n is specified, initially set to n. Otherwise 0.
optimization_fuel_limit: LockCell<u64>,
optimization_fuel_limit: AtomicU64,
/// We're rejecting all further optimizations.
out_of_fuel: LockCell<bool>,
out_of_fuel: AtomicBool,
// The next two are public because the driver needs to read them.
/// If -zprint-fuel=crate, Some(crate).
pub print_fuel_crate: Option<String>,
/// Always set to zero and incremented so that we can print fuel expended by a crate.
pub print_fuel: LockCell<u64>,
pub print_fuel: AtomicU64,
/// Loaded up early on in the initialization of this `Session` to avoid
/// false positives about a job server in our environment.
@ -857,32 +859,43 @@ impl Session {
self.perf_stats.normalize_projection_ty.load(Ordering::Relaxed));
}
/// We want to know if we're allowed to do an optimization for crate foo from -z fuel=foo=n.
/// This expends fuel if applicable, and records fuel if applicable.
pub fn consider_optimizing<T: Fn() -> String>(&self, crate_name: &str, msg: T) -> bool {
#[inline(never)]
#[cold]
pub fn consider_optimizing_cold<T: Fn() -> String>(&self, crate_name: &str, msg: T) -> bool {
let mut ret = true;
if let Some(ref c) = self.optimization_fuel_crate {
if c == crate_name {
assert_eq!(self.query_threads(), 1);
let fuel = self.optimization_fuel_limit.get();
let fuel = self.optimization_fuel_limit.load(SeqCst);
ret = fuel != 0;
if fuel == 0 && !self.out_of_fuel.get() {
if fuel == 0 && !self.out_of_fuel.load(SeqCst) {
eprintln!("optimization-fuel-exhausted: {}", msg());
self.out_of_fuel.set(true);
self.out_of_fuel.store(true, SeqCst);
} else if fuel > 0 {
self.optimization_fuel_limit.set(fuel - 1);
self.optimization_fuel_limit.store(fuel - 1, SeqCst);
}
}
}
if let Some(ref c) = self.print_fuel_crate {
if c == crate_name {
assert_eq!(self.query_threads(), 1);
self.print_fuel.set(self.print_fuel.get() + 1);
self.print_fuel.store(self.print_fuel.load(SeqCst) + 1, SeqCst);
}
}
ret
}
/// We want to know if we're allowed to do an optimization for crate foo from -z fuel=foo=n.
/// This expends fuel if applicable, and records fuel if applicable.
#[inline(always)]
pub fn consider_optimizing<T: Fn() -> String>(&self, crate_name: &str, msg: T) -> bool {
if likely!(self.optimization_fuel_crate.is_none() && self.print_fuel_crate.is_none()) {
true
} else {
self.consider_optimizing_cold(crate_name, msg)
}
}
/// Returns the number of query threads that should be used for this
/// compilation
pub fn query_threads_from_opts(opts: &config::Options) -> usize {
@ -1128,9 +1141,9 @@ pub fn build_session_(
let optimization_fuel_crate = sopts.debugging_opts.fuel.as_ref().map(|i| i.0.clone());
let optimization_fuel_limit =
LockCell::new(sopts.debugging_opts.fuel.as_ref().map(|i| i.1).unwrap_or(0));
AtomicU64::new(sopts.debugging_opts.fuel.as_ref().map(|i| i.1).unwrap_or(0));
let print_fuel_crate = sopts.debugging_opts.print_fuel.clone();
let print_fuel = LockCell::new(0);
let print_fuel = AtomicU64::new(0);
let working_dir = env::current_dir().unwrap_or_else(|e|
p_s.span_diagnostic
@ -1195,7 +1208,7 @@ pub fn build_session_(
optimization_fuel_limit,
print_fuel_crate,
print_fuel,
out_of_fuel: LockCell::new(false),
out_of_fuel: AtomicBool::new(false),
// Note that this is unsafe because it may misinterpret file descriptors
// on Unix as jobserver file descriptors. We hopefully execute this near
// the beginning of the process though to ensure we don't get false

View file

@ -21,6 +21,7 @@
#![feature(hash_raw_entry)]
#![feature(stmt_expr_attributes)]
#![feature(core_intrinsics)]
#![feature(integer_atomics)]
#![cfg_attr(unix, feature(libc))]
#![cfg_attr(test, feature(test))]

View file

@ -10,10 +10,6 @@
//! It internally uses `parking_lot::RwLock` if cfg!(parallel_queries) is true,
//! `RefCell` otherwise.
//!
//! `LockCell` is a thread safe version of `Cell`, with `set` and `get` operations.
//! It can never deadlock. It uses `Cell` when
//! cfg!(parallel_queries) is false, otherwise it is a `Lock`.
//!
//! `MTLock` is a mutex which disappears if cfg!(parallel_queries) is false.
//!
//! `MTRef` is a immutable reference if cfg!(parallel_queries), and an mutable reference otherwise.
@ -23,11 +19,7 @@
use std::collections::HashMap;
use std::hash::{Hash, BuildHasher};
use std::cmp::Ordering;
use std::marker::PhantomData;
use std::fmt::Debug;
use std::fmt::Formatter;
use std::fmt;
use std::ops::{Deref, DerefMut};
use owning_ref::{Erased, OwningRef};
@ -54,6 +46,9 @@ pub fn serial_scope<F, R>(f: F) -> R
f(&SerialScope)
}
pub use std::sync::atomic::Ordering::SeqCst;
pub use std::sync::atomic::Ordering;
cfg_if! {
if #[cfg(not(parallel_queries))] {
pub auto trait Send {}
@ -69,6 +64,62 @@ cfg_if! {
}
}
use std::ops::Add;
#[derive(Debug)]
pub struct Atomic<T: Copy>(Cell<T>);
impl<T: Copy> Atomic<T> {
pub fn new(v: T) -> Self {
Atomic(Cell::new(v))
}
}
impl<T: Copy + PartialEq> Atomic<T> {
pub fn into_inner(self) -> T {
self.0.into_inner()
}
pub fn load(&self, _: Ordering) -> T {
self.0.get()
}
pub fn store(&self, val: T, _: Ordering) {
self.0.set(val)
}
pub fn swap(&self, val: T, _: Ordering) -> T {
self.0.replace(val)
}
pub fn compare_exchange(&self,
current: T,
new: T,
_: Ordering,
_: Ordering)
-> Result<T, T> {
let read = self.0.get();
if read == current {
self.0.set(new);
Ok(read)
} else {
Err(read)
}
}
}
impl<T: Add<Output=T> + Copy> Atomic<T> {
pub fn fetch_add(&self, val: T, _: Ordering) -> T {
let old = self.0.get();
self.0.set(old + val);
old
}
}
pub type AtomicUsize = Atomic<usize>;
pub type AtomicBool = Atomic<bool>;
pub type AtomicU64 = Atomic<u64>;
pub use self::serial_join as join;
pub use self::serial_scope as scope;
@ -160,47 +211,6 @@ cfg_if! {
MTLock(self.0.clone())
}
}
pub struct LockCell<T>(Cell<T>);
impl<T> LockCell<T> {
#[inline(always)]
pub fn new(inner: T) -> Self {
LockCell(Cell::new(inner))
}
#[inline(always)]
pub fn into_inner(self) -> T {
self.0.into_inner()
}
#[inline(always)]
pub fn set(&self, new_inner: T) {
self.0.set(new_inner);
}
#[inline(always)]
pub fn get(&self) -> T where T: Copy {
self.0.get()
}
#[inline(always)]
pub fn set_mut(&mut self, new_inner: T) {
self.0.set(new_inner);
}
#[inline(always)]
pub fn get_mut(&mut self) -> T where T: Copy {
self.0.get()
}
}
impl<T> LockCell<Option<T>> {
#[inline(always)]
pub fn take(&self) -> Option<T> {
unsafe { (*self.0.as_ptr()).take() }
}
}
} else {
pub use std::marker::Send as Send;
pub use std::marker::Sync as Sync;
@ -213,6 +223,8 @@ cfg_if! {
pub use parking_lot::MutexGuard as LockGuard;
pub use parking_lot::MappedMutexGuard as MappedLockGuard;
pub use std::sync::atomic::{AtomicBool, AtomicUsize, AtomicU64};
pub use std::sync::Arc as Lrc;
pub use std::sync::Weak as Weak;
@ -278,47 +290,6 @@ cfg_if! {
v.erase_send_sync_owner()
}}
}
pub struct LockCell<T>(Lock<T>);
impl<T> LockCell<T> {
#[inline(always)]
pub fn new(inner: T) -> Self {
LockCell(Lock::new(inner))
}
#[inline(always)]
pub fn into_inner(self) -> T {
self.0.into_inner()
}
#[inline(always)]
pub fn set(&self, new_inner: T) {
*self.0.lock() = new_inner;
}
#[inline(always)]
pub fn get(&self) -> T where T: Copy {
*self.0.lock()
}
#[inline(always)]
pub fn set_mut(&mut self, new_inner: T) {
*self.0.get_mut() = new_inner;
}
#[inline(always)]
pub fn get_mut(&mut self) -> T where T: Copy {
*self.0.get_mut()
}
}
impl<T> LockCell<Option<T>> {
#[inline(always)]
pub fn take(&self) -> Option<T> {
self.0.lock().take()
}
}
}
}
@ -466,65 +437,6 @@ impl<T> Once<T> {
}
}
impl<T: Copy + Debug> Debug for LockCell<T> {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
f.debug_struct("LockCell")
.field("value", &self.get())
.finish()
}
}
impl<T:Default> Default for LockCell<T> {
/// Creates a `LockCell<T>`, with the `Default` value for T.
#[inline]
fn default() -> LockCell<T> {
LockCell::new(Default::default())
}
}
impl<T:PartialEq + Copy> PartialEq for LockCell<T> {
#[inline]
fn eq(&self, other: &LockCell<T>) -> bool {
self.get() == other.get()
}
}
impl<T:Eq + Copy> Eq for LockCell<T> {}
impl<T:PartialOrd + Copy> PartialOrd for LockCell<T> {
#[inline]
fn partial_cmp(&self, other: &LockCell<T>) -> Option<Ordering> {
self.get().partial_cmp(&other.get())
}
#[inline]
fn lt(&self, other: &LockCell<T>) -> bool {
self.get() < other.get()
}
#[inline]
fn le(&self, other: &LockCell<T>) -> bool {
self.get() <= other.get()
}
#[inline]
fn gt(&self, other: &LockCell<T>) -> bool {
self.get() > other.get()
}
#[inline]
fn ge(&self, other: &LockCell<T>) -> bool {
self.get() >= other.get()
}
}
impl<T:Ord + Copy> Ord for LockCell<T> {
#[inline]
fn cmp(&self, other: &LockCell<T>) -> Ordering {
self.get().cmp(&other.get())
}
}
#[derive(Debug)]
pub struct Lock<T>(InnerLock<T>);

View file

@ -16,6 +16,7 @@
#![feature(slice_sort_by_cached_key)]
#![feature(set_stdio)]
#![feature(no_debug)]
#![feature(integer_atomics)]
#![recursion_limit="256"]
@ -59,7 +60,7 @@ use pretty::{PpMode, UserIdentifiedItem};
use rustc_resolve as resolve;
use rustc_save_analysis as save;
use rustc_save_analysis::DumpHandler;
use rustc_data_structures::sync::{self, Lrc};
use rustc_data_structures::sync::{self, Lrc, Ordering::SeqCst};
use rustc_data_structures::OnDrop;
use rustc::session::{self, config, Session, build_session, CompileResult};
use rustc::session::CompileIncomplete;
@ -934,7 +935,7 @@ impl<'a> CompilerCalls<'a> for RustcDefaultCalls {
let sess = state.session;
eprintln!("Fuel used by {}: {}",
sess.print_fuel_crate.as_ref().unwrap(),
sess.print_fuel.get());
sess.print_fuel.load(SeqCst));
}
}
control

View file

@ -26,15 +26,13 @@ use self::Level::*;
use emitter::{Emitter, EmitterWriter};
use rustc_data_structures::sync::{self, Lrc, Lock, LockCell};
use rustc_data_structures::sync::{self, Lrc, Lock, AtomicUsize, AtomicBool, SeqCst};
use rustc_data_structures::fx::FxHashSet;
use rustc_data_structures::stable_hasher::StableHasher;
use std::borrow::Cow;
use std::cell::Cell;
use std::{error, fmt};
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering::SeqCst;
use std::panic;
use termcolor::{ColorSpec, Color};
@ -271,7 +269,7 @@ pub struct Handler {
err_count: AtomicUsize,
emitter: Lock<Box<dyn Emitter + sync::Send>>,
continue_after_error: LockCell<bool>,
continue_after_error: AtomicBool,
delayed_span_bugs: Lock<Vec<Diagnostic>>,
// This set contains the `DiagnosticId` of all emitted diagnostics to avoid
@ -370,7 +368,7 @@ impl Handler {
flags,
err_count: AtomicUsize::new(0),
emitter: Lock::new(e),
continue_after_error: LockCell::new(true),
continue_after_error: AtomicBool::new(true),
delayed_span_bugs: Lock::new(Vec::new()),
taught_diagnostics: Default::default(),
emitted_diagnostic_codes: Default::default(),
@ -379,7 +377,7 @@ impl Handler {
}
pub fn set_continue_after_error(&self, continue_after_error: bool) {
self.continue_after_error.set(continue_after_error);
self.continue_after_error.store(continue_after_error, SeqCst);
}
/// Resets the diagnostic error count as well as the cached emitted diagnostics.
@ -658,7 +656,7 @@ impl Handler {
let mut db = DiagnosticBuilder::new(self, lvl, msg);
db.set_span(msp.clone());
db.emit();
if !self.continue_after_error.get() {
if !self.continue_after_error.load(SeqCst) {
self.abort_if_errors();
}
}
@ -669,7 +667,7 @@ impl Handler {
let mut db = DiagnosticBuilder::new_with_code(self, lvl, Some(code), msg);
db.set_span(msp.clone());
db.emit();
if !self.continue_after_error.get() {
if !self.continue_after_error.load(SeqCst) {
self.abort_if_errors();
}
}