1
Fork 0

async-llvm(15): Don't require number of codegen units upfront.

This commit is contained in:
Michael Woerister 2017-07-26 14:18:11 +02:00 committed by Michael Woerister
parent a1be65845c
commit 943a5bdf35
2 changed files with 25 additions and 32 deletions

View file

@ -33,7 +33,6 @@ use context::{is_pie_binary, get_reloc_model};
use jobserver::{Client, Acquired}; use jobserver::{Client, Acquired};
use rustc_demangle; use rustc_demangle;
use std::cmp;
use std::ffi::CString; use std::ffi::CString;
use std::fmt; use std::fmt;
use std::fs; use std::fs;
@ -663,7 +662,6 @@ fn need_crate_bitcode_for_rlib(sess: &Session) -> bool {
pub fn run_passes(sess: &Session, pub fn run_passes(sess: &Session,
crate_output: &OutputFilenames, crate_output: &OutputFilenames,
total_work_item_count: usize,
crate_name: Symbol, crate_name: Symbol,
link: LinkMeta, link: LinkMeta,
metadata: EncodedMetadata, metadata: EncodedMetadata,
@ -758,8 +756,7 @@ pub fn run_passes(sess: &Session,
// Pick a "reasonable maximum" if we don't otherwise have a jobserver in // Pick a "reasonable maximum" if we don't otherwise have a jobserver in
// our environment, capping out at 32 so we don't take everything down // our environment, capping out at 32 so we don't take everything down
// by hogging the process run queue. // by hogging the process run queue.
let num_workers = cmp::min(total_work_item_count - 1, 32); Client::new(32).expect("failed to create jobserver")
Client::new(num_workers).expect("failed to create jobserver")
}); });
let (shared_emitter, shared_emitter_main) = SharedEmitter::new(); let (shared_emitter, shared_emitter_main) = SharedEmitter::new();
@ -767,14 +764,12 @@ pub fn run_passes(sess: &Session,
let (coordinator_send, coordinator_receive) = channel(); let (coordinator_send, coordinator_receive) = channel();
let coordinator_thread = start_executing_work(sess, let coordinator_thread = start_executing_work(sess,
total_work_item_count,
shared_emitter, shared_emitter,
trans_worker_send, trans_worker_send,
coordinator_send.clone(), coordinator_send.clone(),
coordinator_receive, coordinator_receive,
client, client,
exported_symbols.clone()); exported_symbols.clone());
OngoingCrateTranslation { OngoingCrateTranslation {
crate_name, crate_name,
link, link,
@ -1072,6 +1067,7 @@ pub enum Message {
Done { result: Result<CompiledModule, ()> }, Done { result: Result<CompiledModule, ()> },
WorkItem(WorkItem), WorkItem(WorkItem),
CheckErrorMessages, CheckErrorMessages,
TranslationDone,
} }
@ -1082,7 +1078,6 @@ pub struct Diagnostic {
} }
fn start_executing_work(sess: &Session, fn start_executing_work(sess: &Session,
total_work_item_count: usize,
shared_emitter: SharedEmitter, shared_emitter: SharedEmitter,
trans_worker_send: Sender<Message>, trans_worker_send: Sender<Message>,
coordinator_send: Sender<Message>, coordinator_send: Sender<Message>,
@ -1104,9 +1099,6 @@ fn start_executing_work(sess: &Session,
let helper = jobserver.into_helper_thread(move |token| { let helper = jobserver.into_helper_thread(move |token| {
drop(coordinator_send2.send(Message::Token(token))); drop(coordinator_send2.send(Message::Token(token)));
}).expect("failed to spawn helper thread"); }).expect("failed to spawn helper thread");
for _ in 0..total_work_item_count - 1 {
helper.request_token();
}
let mut each_linked_rlib_for_lto = Vec::new(); let mut each_linked_rlib_for_lto = Vec::new();
drop(link::each_linked_rlib(sess, &mut |cnum, path| { drop(link::each_linked_rlib(sess, &mut |cnum, path| {
@ -1193,29 +1185,25 @@ fn start_executing_work(sess: &Session,
let mut compiled_metadata_module = None; let mut compiled_metadata_module = None;
let mut compiled_allocator_module = None; let mut compiled_allocator_module = None;
let mut work_items_left = total_work_item_count; let mut translation_done = false;
let mut work_items = Vec::with_capacity(total_work_item_count); let mut work_items = Vec::new();
let mut tokens = Vec::new(); let mut tokens = Vec::new();
let mut running = 0; let mut running = 0;
while work_items_left > 0 || running > 0 { while !translation_done || work_items.len() > 0 || running > 0 {
// Spin up what work we can, only doing this while we've got available // Spin up what work we can, only doing this while we've got available
// parallelism slots and work left to spawn. // parallelism slots and work left to spawn.
while work_items_left > 0 && running < tokens.len() + 1 { while work_items.len() > 0 && running < tokens.len() + 1 {
if let Some(item) = work_items.pop() { let item = work_items.pop().unwrap();
work_items_left -= 1; let worker_index = work_items.len();
let worker_index = work_items_left;
let cgcx = CodegenContext { let cgcx = CodegenContext {
worker: worker_index, worker: worker_index,
.. cgcx.clone() .. cgcx.clone()
}; };
spawn_work(cgcx, item); spawn_work(cgcx, item);
running += 1; running += 1;
} else {
break
}
} }
// Relinquish accidentally acquired extra tokens // Relinquish accidentally acquired extra tokens
@ -1238,6 +1226,7 @@ fn start_executing_work(sess: &Session,
Message::WorkItem(work_item) => { Message::WorkItem(work_item) => {
work_items.push(work_item); work_items.push(work_item);
helper.request_token();
} }
// If a thread exits successfully then we drop a token associated // If a thread exits successfully then we drop a token associated
@ -1273,6 +1262,9 @@ fn start_executing_work(sess: &Session,
// Exit the coordinator thread // Exit the coordinator thread
panic!() panic!()
} }
Message::TranslationDone => {
translation_done = true;
}
msg @ Message::CheckErrorMessages => { msg @ Message::CheckErrorMessages => {
bug!("unexpected message: {:?}", msg); bug!("unexpected message: {:?}", msg);
} }
@ -1619,4 +1611,8 @@ impl OngoingCrateTranslation {
drop(self.coordinator_send.send(Message::WorkItem(work_item))); drop(self.coordinator_send.send(Message::WorkItem(work_item)));
} }
pub fn signal_translation_done(&self) {
drop(self.coordinator_send.send(Message::TranslationDone));
}
} }

View file

@ -966,7 +966,6 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
let ongoing_translation = write::run_passes( let ongoing_translation = write::run_passes(
tcx.sess, tcx.sess,
output_filenames, output_filenames,
1,
tcx.crate_name(LOCAL_CRATE), tcx.crate_name(LOCAL_CRATE),
link_meta, link_meta,
metadata, metadata,
@ -977,6 +976,7 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
false); false);
ongoing_translation.submit_translated_module_to_llvm(tcx.sess, metadata_module); ongoing_translation.submit_translated_module_to_llvm(tcx.sess, metadata_module);
ongoing_translation.signal_translation_done();
return ongoing_translation; return ongoing_translation;
} }
@ -1237,14 +1237,9 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
&metadata.hashes, &metadata.hashes,
link_meta.crate_hash)); link_meta.crate_hash));
// --- // ---
let total_module_count = modules.len() + 1 +
if allocator_module.is_some() { 1 } else { 0 };
let ongoing_translation = write::run_passes( let ongoing_translation = write::run_passes(
sess, sess,
outputs, outputs,
total_module_count,
tcx.crate_name(LOCAL_CRATE), tcx.crate_name(LOCAL_CRATE),
link_meta, link_meta,
metadata, metadata,
@ -1264,6 +1259,8 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
ongoing_translation.submit_translated_module_to_llvm(sess, allocator_module); ongoing_translation.submit_translated_module_to_llvm(sess, allocator_module);
} }
ongoing_translation.signal_translation_done();
ongoing_translation ongoing_translation
} }