Separate out methods for running thin and fat LTO
This commit is contained in:
parent
a17de6980a
commit
bdbee6311b
4 changed files with 74 additions and 47 deletions
|
@ -48,18 +48,11 @@ pub fn crate_type_allows_lto(crate_type: config::CrateType) -> bool {
|
|||
}
|
||||
}
|
||||
|
||||
/// Performs LTO, which in the case of full LTO means merging all modules into
|
||||
/// a single one and returning it for further optimizing. For ThinLTO, it will
|
||||
/// do the global analysis necessary and return two lists, one of the modules
|
||||
/// the need optimization and another for modules that can simply be copied over
|
||||
/// from the incr. comp. cache.
|
||||
pub(crate) fn run(cgcx: &CodegenContext<LlvmCodegenBackend>,
|
||||
modules: Vec<ModuleCodegen<ModuleLlvm>>,
|
||||
cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>,
|
||||
timeline: &mut Timeline)
|
||||
-> Result<(Vec<LtoModuleCodegen<LlvmCodegenBackend>>, Vec<WorkProduct>), FatalError>
|
||||
fn prepare_lto(cgcx: &CodegenContext<LlvmCodegenBackend>,
|
||||
timeline: &mut Timeline,
|
||||
diag_handler: &Handler)
|
||||
-> Result<(Vec<CString>, Vec<(SerializedModule<ModuleBuffer>, CString)>), FatalError>
|
||||
{
|
||||
let diag_handler = cgcx.create_diag_handler();
|
||||
let export_threshold = match cgcx.lto {
|
||||
// We're just doing LTO for our one crate
|
||||
Lto::ThinLocal => SymbolExportLevel::Rust,
|
||||
|
@ -144,36 +137,56 @@ pub(crate) fn run(cgcx: &CodegenContext<LlvmCodegenBackend>,
|
|||
}
|
||||
}
|
||||
|
||||
Ok((symbol_white_list, upstream_modules))
|
||||
}
|
||||
|
||||
/// Performs fat LTO by merging all modules into a single one and returning it
|
||||
/// for further optimization.
|
||||
pub(crate) fn run_fat(cgcx: &CodegenContext<LlvmCodegenBackend>,
|
||||
modules: Vec<ModuleCodegen<ModuleLlvm>>,
|
||||
_cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>,
|
||||
timeline: &mut Timeline)
|
||||
-> Result<(Vec<LtoModuleCodegen<LlvmCodegenBackend>>, Vec<WorkProduct>), FatalError>
|
||||
{
|
||||
let diag_handler = cgcx.create_diag_handler();
|
||||
let (symbol_white_list, upstream_modules) = prepare_lto(cgcx, timeline, &diag_handler)?;
|
||||
let symbol_white_list = symbol_white_list.iter()
|
||||
.map(|c| c.as_ptr())
|
||||
.collect::<Vec<_>>();
|
||||
match cgcx.lto {
|
||||
Lto::Fat => {
|
||||
assert!(cached_modules.is_empty());
|
||||
let opt_jobs = fat_lto(cgcx,
|
||||
&diag_handler,
|
||||
modules,
|
||||
upstream_modules,
|
||||
&symbol_white_list,
|
||||
timeline);
|
||||
opt_jobs.map(|opt_jobs| (opt_jobs, vec![]))
|
||||
}
|
||||
Lto::Thin |
|
||||
Lto::ThinLocal => {
|
||||
if cgcx.opts.debugging_opts.cross_lang_lto.enabled() {
|
||||
unreachable!("We should never reach this case if the LTO step \
|
||||
is deferred to the linker");
|
||||
}
|
||||
thin_lto(cgcx,
|
||||
&diag_handler,
|
||||
modules,
|
||||
upstream_modules,
|
||||
cached_modules,
|
||||
&symbol_white_list,
|
||||
timeline)
|
||||
}
|
||||
Lto::No => unreachable!(),
|
||||
let opt_jobs = fat_lto(cgcx,
|
||||
&diag_handler,
|
||||
modules,
|
||||
upstream_modules,
|
||||
&symbol_white_list,
|
||||
timeline);
|
||||
opt_jobs.map(|opt_jobs| (opt_jobs, vec![]))
|
||||
}
|
||||
|
||||
/// Performs thin LTO by performing necessary global analysis and returning two
|
||||
/// lists, one of the modules that need optimization and another for modules that
|
||||
/// can simply be copied over from the incr. comp. cache.
|
||||
pub(crate) fn run_thin(cgcx: &CodegenContext<LlvmCodegenBackend>,
|
||||
modules: Vec<ModuleCodegen<ModuleLlvm>>,
|
||||
cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>,
|
||||
timeline: &mut Timeline)
|
||||
-> Result<(Vec<LtoModuleCodegen<LlvmCodegenBackend>>, Vec<WorkProduct>), FatalError>
|
||||
{
|
||||
let diag_handler = cgcx.create_diag_handler();
|
||||
let (symbol_white_list, upstream_modules) = prepare_lto(cgcx, timeline, &diag_handler)?;
|
||||
let symbol_white_list = symbol_white_list.iter()
|
||||
.map(|c| c.as_ptr())
|
||||
.collect::<Vec<_>>();
|
||||
if cgcx.opts.debugging_opts.cross_lang_lto.enabled() {
|
||||
unreachable!("We should never reach this case if the LTO step \
|
||||
is deferred to the linker");
|
||||
}
|
||||
thin_lto(cgcx,
|
||||
&diag_handler,
|
||||
modules,
|
||||
upstream_modules,
|
||||
cached_modules,
|
||||
&symbol_white_list,
|
||||
timeline)
|
||||
}
|
||||
|
||||
fn fat_lto(cgcx: &CodegenContext<LlvmCodegenBackend>,
|
||||
|
|
|
@ -176,13 +176,21 @@ impl WriteBackendMethods for LlvmCodegenBackend {
|
|||
fn print_pass_timings(&self) {
|
||||
unsafe { llvm::LLVMRustPrintPassTimings(); }
|
||||
}
|
||||
fn run_lto(
|
||||
fn run_fat_lto(
|
||||
cgcx: &CodegenContext<Self>,
|
||||
modules: Vec<ModuleCodegen<Self::Module>>,
|
||||
cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>,
|
||||
timeline: &mut Timeline
|
||||
) -> Result<(Vec<LtoModuleCodegen<Self>>, Vec<WorkProduct>), FatalError> {
|
||||
back::lto::run(cgcx, modules, cached_modules, timeline)
|
||||
back::lto::run_fat(cgcx, modules, cached_modules, timeline)
|
||||
}
|
||||
fn run_thin_lto(
|
||||
cgcx: &CodegenContext<Self>,
|
||||
modules: Vec<ModuleCodegen<Self::Module>>,
|
||||
cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>,
|
||||
timeline: &mut Timeline
|
||||
) -> Result<(Vec<LtoModuleCodegen<Self>>, Vec<WorkProduct>), FatalError> {
|
||||
back::lto::run_thin(cgcx, modules, cached_modules, timeline)
|
||||
}
|
||||
unsafe fn optimize(
|
||||
cgcx: &CodegenContext<Self>,
|
||||
|
|
|
@ -264,11 +264,11 @@ fn generate_lto_work<B: ExtraBackendMethods>(
|
|||
|
||||
let (lto_modules, copy_jobs) = if !needs_fat_lto.is_empty() {
|
||||
assert!(needs_thin_lto.is_empty());
|
||||
B::run_lto(cgcx, needs_fat_lto, import_only_modules, &mut timeline)
|
||||
B::run_fat_lto(cgcx, needs_fat_lto, import_only_modules, &mut timeline)
|
||||
.unwrap_or_else(|e| e.raise())
|
||||
} else {
|
||||
assert!(needs_fat_lto.is_empty());
|
||||
B::run_lto(cgcx, needs_thin_lto, import_only_modules, &mut timeline)
|
||||
B::run_thin_lto(cgcx, needs_thin_lto, import_only_modules, &mut timeline)
|
||||
.unwrap_or_else(|e| e.raise())
|
||||
};
|
||||
|
||||
|
|
|
@ -24,12 +24,18 @@ pub trait WriteBackendMethods: 'static + Sized + Clone {
|
|||
type ThinData: Send + Sync;
|
||||
type ThinBuffer: ThinBufferMethods;
|
||||
|
||||
/// Performs LTO, which in the case of full LTO means merging all modules into
|
||||
/// a single one and returning it for further optimizing. For ThinLTO, it will
|
||||
/// do the global analysis necessary and return two lists, one of the modules
|
||||
/// the need optimization and another for modules that can simply be copied over
|
||||
/// from the incr. comp. cache.
|
||||
fn run_lto(
|
||||
/// Performs fat LTO by merging all modules into a single one and returning it
|
||||
/// for further optimization.
|
||||
fn run_fat_lto(
|
||||
cgcx: &CodegenContext<Self>,
|
||||
modules: Vec<ModuleCodegen<Self::Module>>,
|
||||
cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>,
|
||||
timeline: &mut Timeline,
|
||||
) -> Result<(Vec<LtoModuleCodegen<Self>>, Vec<WorkProduct>), FatalError>;
|
||||
/// Performs thin LTO by performing necessary global analysis and returning two
|
||||
/// lists, one of the modules that need optimization and another for modules that
|
||||
/// can simply be copied over from the incr. comp. cache.
|
||||
fn run_thin_lto(
|
||||
cgcx: &CodegenContext<Self>,
|
||||
modules: Vec<ModuleCodegen<Self::Module>>,
|
||||
cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue