Separate out methods for running thin and fat LTO
This commit is contained in:
parent
a17de6980a
commit
bdbee6311b
4 changed files with 74 additions and 47 deletions
|
@ -48,18 +48,11 @@ pub fn crate_type_allows_lto(crate_type: config::CrateType) -> bool {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Performs LTO, which in the case of full LTO means merging all modules into
|
fn prepare_lto(cgcx: &CodegenContext<LlvmCodegenBackend>,
|
||||||
/// a single one and returning it for further optimizing. For ThinLTO, it will
|
timeline: &mut Timeline,
|
||||||
/// do the global analysis necessary and return two lists, one of the modules
|
diag_handler: &Handler)
|
||||||
/// the need optimization and another for modules that can simply be copied over
|
-> Result<(Vec<CString>, Vec<(SerializedModule<ModuleBuffer>, CString)>), FatalError>
|
||||||
/// from the incr. comp. cache.
|
|
||||||
pub(crate) fn run(cgcx: &CodegenContext<LlvmCodegenBackend>,
|
|
||||||
modules: Vec<ModuleCodegen<ModuleLlvm>>,
|
|
||||||
cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>,
|
|
||||||
timeline: &mut Timeline)
|
|
||||||
-> Result<(Vec<LtoModuleCodegen<LlvmCodegenBackend>>, Vec<WorkProduct>), FatalError>
|
|
||||||
{
|
{
|
||||||
let diag_handler = cgcx.create_diag_handler();
|
|
||||||
let export_threshold = match cgcx.lto {
|
let export_threshold = match cgcx.lto {
|
||||||
// We're just doing LTO for our one crate
|
// We're just doing LTO for our one crate
|
||||||
Lto::ThinLocal => SymbolExportLevel::Rust,
|
Lto::ThinLocal => SymbolExportLevel::Rust,
|
||||||
|
@ -144,36 +137,56 @@ pub(crate) fn run(cgcx: &CodegenContext<LlvmCodegenBackend>,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Ok((symbol_white_list, upstream_modules))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Performs fat LTO by merging all modules into a single one and returning it
|
||||||
|
/// for further optimization.
|
||||||
|
pub(crate) fn run_fat(cgcx: &CodegenContext<LlvmCodegenBackend>,
|
||||||
|
modules: Vec<ModuleCodegen<ModuleLlvm>>,
|
||||||
|
_cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>,
|
||||||
|
timeline: &mut Timeline)
|
||||||
|
-> Result<(Vec<LtoModuleCodegen<LlvmCodegenBackend>>, Vec<WorkProduct>), FatalError>
|
||||||
|
{
|
||||||
|
let diag_handler = cgcx.create_diag_handler();
|
||||||
|
let (symbol_white_list, upstream_modules) = prepare_lto(cgcx, timeline, &diag_handler)?;
|
||||||
let symbol_white_list = symbol_white_list.iter()
|
let symbol_white_list = symbol_white_list.iter()
|
||||||
.map(|c| c.as_ptr())
|
.map(|c| c.as_ptr())
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
match cgcx.lto {
|
let opt_jobs = fat_lto(cgcx,
|
||||||
Lto::Fat => {
|
&diag_handler,
|
||||||
assert!(cached_modules.is_empty());
|
modules,
|
||||||
let opt_jobs = fat_lto(cgcx,
|
upstream_modules,
|
||||||
&diag_handler,
|
&symbol_white_list,
|
||||||
modules,
|
timeline);
|
||||||
upstream_modules,
|
opt_jobs.map(|opt_jobs| (opt_jobs, vec![]))
|
||||||
&symbol_white_list,
|
}
|
||||||
timeline);
|
|
||||||
opt_jobs.map(|opt_jobs| (opt_jobs, vec![]))
|
/// Performs thin LTO by performing necessary global analysis and returning two
|
||||||
}
|
/// lists, one of the modules that need optimization and another for modules that
|
||||||
Lto::Thin |
|
/// can simply be copied over from the incr. comp. cache.
|
||||||
Lto::ThinLocal => {
|
pub(crate) fn run_thin(cgcx: &CodegenContext<LlvmCodegenBackend>,
|
||||||
if cgcx.opts.debugging_opts.cross_lang_lto.enabled() {
|
modules: Vec<ModuleCodegen<ModuleLlvm>>,
|
||||||
unreachable!("We should never reach this case if the LTO step \
|
cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>,
|
||||||
is deferred to the linker");
|
timeline: &mut Timeline)
|
||||||
}
|
-> Result<(Vec<LtoModuleCodegen<LlvmCodegenBackend>>, Vec<WorkProduct>), FatalError>
|
||||||
thin_lto(cgcx,
|
{
|
||||||
&diag_handler,
|
let diag_handler = cgcx.create_diag_handler();
|
||||||
modules,
|
let (symbol_white_list, upstream_modules) = prepare_lto(cgcx, timeline, &diag_handler)?;
|
||||||
upstream_modules,
|
let symbol_white_list = symbol_white_list.iter()
|
||||||
cached_modules,
|
.map(|c| c.as_ptr())
|
||||||
&symbol_white_list,
|
.collect::<Vec<_>>();
|
||||||
timeline)
|
if cgcx.opts.debugging_opts.cross_lang_lto.enabled() {
|
||||||
}
|
unreachable!("We should never reach this case if the LTO step \
|
||||||
Lto::No => unreachable!(),
|
is deferred to the linker");
|
||||||
}
|
}
|
||||||
|
thin_lto(cgcx,
|
||||||
|
&diag_handler,
|
||||||
|
modules,
|
||||||
|
upstream_modules,
|
||||||
|
cached_modules,
|
||||||
|
&symbol_white_list,
|
||||||
|
timeline)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn fat_lto(cgcx: &CodegenContext<LlvmCodegenBackend>,
|
fn fat_lto(cgcx: &CodegenContext<LlvmCodegenBackend>,
|
||||||
|
|
|
@ -176,13 +176,21 @@ impl WriteBackendMethods for LlvmCodegenBackend {
|
||||||
fn print_pass_timings(&self) {
|
fn print_pass_timings(&self) {
|
||||||
unsafe { llvm::LLVMRustPrintPassTimings(); }
|
unsafe { llvm::LLVMRustPrintPassTimings(); }
|
||||||
}
|
}
|
||||||
fn run_lto(
|
fn run_fat_lto(
|
||||||
cgcx: &CodegenContext<Self>,
|
cgcx: &CodegenContext<Self>,
|
||||||
modules: Vec<ModuleCodegen<Self::Module>>,
|
modules: Vec<ModuleCodegen<Self::Module>>,
|
||||||
cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>,
|
cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>,
|
||||||
timeline: &mut Timeline
|
timeline: &mut Timeline
|
||||||
) -> Result<(Vec<LtoModuleCodegen<Self>>, Vec<WorkProduct>), FatalError> {
|
) -> Result<(Vec<LtoModuleCodegen<Self>>, Vec<WorkProduct>), FatalError> {
|
||||||
back::lto::run(cgcx, modules, cached_modules, timeline)
|
back::lto::run_fat(cgcx, modules, cached_modules, timeline)
|
||||||
|
}
|
||||||
|
fn run_thin_lto(
|
||||||
|
cgcx: &CodegenContext<Self>,
|
||||||
|
modules: Vec<ModuleCodegen<Self::Module>>,
|
||||||
|
cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>,
|
||||||
|
timeline: &mut Timeline
|
||||||
|
) -> Result<(Vec<LtoModuleCodegen<Self>>, Vec<WorkProduct>), FatalError> {
|
||||||
|
back::lto::run_thin(cgcx, modules, cached_modules, timeline)
|
||||||
}
|
}
|
||||||
unsafe fn optimize(
|
unsafe fn optimize(
|
||||||
cgcx: &CodegenContext<Self>,
|
cgcx: &CodegenContext<Self>,
|
||||||
|
|
|
@ -264,11 +264,11 @@ fn generate_lto_work<B: ExtraBackendMethods>(
|
||||||
|
|
||||||
let (lto_modules, copy_jobs) = if !needs_fat_lto.is_empty() {
|
let (lto_modules, copy_jobs) = if !needs_fat_lto.is_empty() {
|
||||||
assert!(needs_thin_lto.is_empty());
|
assert!(needs_thin_lto.is_empty());
|
||||||
B::run_lto(cgcx, needs_fat_lto, import_only_modules, &mut timeline)
|
B::run_fat_lto(cgcx, needs_fat_lto, import_only_modules, &mut timeline)
|
||||||
.unwrap_or_else(|e| e.raise())
|
.unwrap_or_else(|e| e.raise())
|
||||||
} else {
|
} else {
|
||||||
assert!(needs_fat_lto.is_empty());
|
assert!(needs_fat_lto.is_empty());
|
||||||
B::run_lto(cgcx, needs_thin_lto, import_only_modules, &mut timeline)
|
B::run_thin_lto(cgcx, needs_thin_lto, import_only_modules, &mut timeline)
|
||||||
.unwrap_or_else(|e| e.raise())
|
.unwrap_or_else(|e| e.raise())
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -24,12 +24,18 @@ pub trait WriteBackendMethods: 'static + Sized + Clone {
|
||||||
type ThinData: Send + Sync;
|
type ThinData: Send + Sync;
|
||||||
type ThinBuffer: ThinBufferMethods;
|
type ThinBuffer: ThinBufferMethods;
|
||||||
|
|
||||||
/// Performs LTO, which in the case of full LTO means merging all modules into
|
/// Performs fat LTO by merging all modules into a single one and returning it
|
||||||
/// a single one and returning it for further optimizing. For ThinLTO, it will
|
/// for further optimization.
|
||||||
/// do the global analysis necessary and return two lists, one of the modules
|
fn run_fat_lto(
|
||||||
/// the need optimization and another for modules that can simply be copied over
|
cgcx: &CodegenContext<Self>,
|
||||||
/// from the incr. comp. cache.
|
modules: Vec<ModuleCodegen<Self::Module>>,
|
||||||
fn run_lto(
|
cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>,
|
||||||
|
timeline: &mut Timeline,
|
||||||
|
) -> Result<(Vec<LtoModuleCodegen<Self>>, Vec<WorkProduct>), FatalError>;
|
||||||
|
/// Performs thin LTO by performing necessary global analysis and returning two
|
||||||
|
/// lists, one of the modules that need optimization and another for modules that
|
||||||
|
/// can simply be copied over from the incr. comp. cache.
|
||||||
|
fn run_thin_lto(
|
||||||
cgcx: &CodegenContext<Self>,
|
cgcx: &CodegenContext<Self>,
|
||||||
modules: Vec<ModuleCodegen<Self::Module>>,
|
modules: Vec<ModuleCodegen<Self::Module>>,
|
||||||
cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>,
|
cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>,
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue