• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 use crate::back::write::{self, save_temp_bitcode, CodegenDiagnosticsStage, DiagnosticHandlers};
2 use crate::errors::{
3     DynamicLinkingWithLTO, LlvmError, LtoBitcodeFromRlib, LtoDisallowed, LtoDylib,
4 };
5 use crate::llvm::{self, build_string};
6 use crate::{LlvmCodegenBackend, ModuleLlvm};
7 use object::read::archive::ArchiveFile;
8 use rustc_codegen_ssa::back::lto::{LtoModuleCodegen, SerializedModule, ThinModule, ThinShared};
9 use rustc_codegen_ssa::back::symbol_export;
10 use rustc_codegen_ssa::back::write::{CodegenContext, FatLTOInput, TargetMachineFactoryConfig};
11 use rustc_codegen_ssa::traits::*;
12 use rustc_codegen_ssa::{looks_like_rust_object_file, ModuleCodegen, ModuleKind};
13 use rustc_data_structures::fx::FxHashMap;
14 use rustc_data_structures::memmap::Mmap;
15 use rustc_errors::{FatalError, Handler};
16 use rustc_hir::def_id::LOCAL_CRATE;
17 use rustc_middle::bug;
18 use rustc_middle::dep_graph::WorkProduct;
19 use rustc_middle::middle::exported_symbols::{SymbolExportInfo, SymbolExportLevel};
20 use rustc_session::cgu_reuse_tracker::CguReuse;
21 use rustc_session::config::{self, CrateType, Lto};
22 
23 use std::ffi::{CStr, CString};
24 use std::fs::File;
25 use std::io;
26 use std::iter;
27 use std::path::Path;
28 use std::slice;
29 use std::sync::Arc;
30 
31 /// We keep track of the computed LTO cache keys from the previous
32 /// session to determine which CGUs we can reuse.
33 pub const THIN_LTO_KEYS_INCR_COMP_FILE_NAME: &str = "thin-lto-past-keys.bin";
34 
crate_type_allows_lto(crate_type: CrateType) -> bool35 pub fn crate_type_allows_lto(crate_type: CrateType) -> bool {
36     match crate_type {
37         CrateType::Executable | CrateType::Dylib | CrateType::Staticlib | CrateType::Cdylib => true,
38         CrateType::Rlib | CrateType::ProcMacro => false,
39     }
40 }
41 
prepare_lto( cgcx: &CodegenContext<LlvmCodegenBackend>, diag_handler: &Handler, ) -> Result<(Vec<CString>, Vec<(SerializedModule<ModuleBuffer>, CString)>), FatalError>42 fn prepare_lto(
43     cgcx: &CodegenContext<LlvmCodegenBackend>,
44     diag_handler: &Handler,
45 ) -> Result<(Vec<CString>, Vec<(SerializedModule<ModuleBuffer>, CString)>), FatalError> {
46     let export_threshold = match cgcx.lto {
47         // We're just doing LTO for our one crate
48         Lto::ThinLocal => SymbolExportLevel::Rust,
49 
50         // We're doing LTO for the entire crate graph
51         Lto::Fat | Lto::Thin => symbol_export::crates_export_threshold(&cgcx.crate_types),
52 
53         Lto::No => panic!("didn't request LTO but we're doing LTO"),
54     };
55 
56     let symbol_filter = &|&(ref name, info): &(String, SymbolExportInfo)| {
57         if info.level.is_below_threshold(export_threshold) || info.used {
58             Some(CString::new(name.as_str()).unwrap())
59         } else {
60             None
61         }
62     };
63     let exported_symbols = cgcx.exported_symbols.as_ref().expect("needs exported symbols for LTO");
64     let mut symbols_below_threshold = {
65         let _timer = cgcx.prof.generic_activity("LLVM_lto_generate_symbols_below_threshold");
66         exported_symbols[&LOCAL_CRATE].iter().filter_map(symbol_filter).collect::<Vec<CString>>()
67     };
68     info!("{} symbols to preserve in this crate", symbols_below_threshold.len());
69 
70     // If we're performing LTO for the entire crate graph, then for each of our
71     // upstream dependencies, find the corresponding rlib and load the bitcode
72     // from the archive.
73     //
74     // We save off all the bytecode and LLVM module ids for later processing
75     // with either fat or thin LTO
76     let mut upstream_modules = Vec::new();
77     if cgcx.lto != Lto::ThinLocal {
78         // Make sure we actually can run LTO
79         for crate_type in cgcx.crate_types.iter() {
80             if !crate_type_allows_lto(*crate_type) {
81                 diag_handler.emit_err(LtoDisallowed);
82                 return Err(FatalError);
83             } else if *crate_type == CrateType::Dylib {
84                 if !cgcx.opts.unstable_opts.dylib_lto {
85                     diag_handler.emit_err(LtoDylib);
86                     return Err(FatalError);
87                 }
88             }
89         }
90 
91         if cgcx.opts.cg.prefer_dynamic && !cgcx.opts.unstable_opts.dylib_lto {
92             diag_handler.emit_err(DynamicLinkingWithLTO);
93             return Err(FatalError);
94         }
95 
96         for &(cnum, ref path) in cgcx.each_linked_rlib_for_lto.iter() {
97             let exported_symbols =
98                 cgcx.exported_symbols.as_ref().expect("needs exported symbols for LTO");
99             {
100                 let _timer =
101                     cgcx.prof.generic_activity("LLVM_lto_generate_symbols_below_threshold");
102                 symbols_below_threshold
103                     .extend(exported_symbols[&cnum].iter().filter_map(symbol_filter));
104             }
105 
106             let archive_data = unsafe {
107                 Mmap::map(std::fs::File::open(&path).expect("couldn't open rlib"))
108                     .expect("couldn't map rlib")
109             };
110             let archive = ArchiveFile::parse(&*archive_data).expect("wanted an rlib");
111             let obj_files = archive
112                 .members()
113                 .filter_map(|child| {
114                     child.ok().and_then(|c| {
115                         std::str::from_utf8(c.name()).ok().map(|name| (name.trim(), c))
116                     })
117                 })
118                 .filter(|&(name, _)| looks_like_rust_object_file(name));
119             for (name, child) in obj_files {
120                 info!("adding bitcode from {}", name);
121                 match get_bitcode_slice_from_object_data(
122                     child.data(&*archive_data).expect("corrupt rlib"),
123                 ) {
124                     Ok(data) => {
125                         let module = SerializedModule::FromRlib(data.to_vec());
126                         upstream_modules.push((module, CString::new(name).unwrap()));
127                     }
128                     Err(e) => {
129                         diag_handler.emit_err(e);
130                         return Err(FatalError);
131                     }
132                 }
133             }
134         }
135     }
136 
137     // __llvm_profile_counter_bias is pulled in at link time by an undefined reference to
138     // __llvm_profile_runtime, therefore we won't know until link time if this symbol
139     // should have default visibility.
140     symbols_below_threshold.push(CString::new("__llvm_profile_counter_bias").unwrap());
141     Ok((symbols_below_threshold, upstream_modules))
142 }
143 
get_bitcode_slice_from_object_data(obj: &[u8]) -> Result<&[u8], LtoBitcodeFromRlib>144 fn get_bitcode_slice_from_object_data(obj: &[u8]) -> Result<&[u8], LtoBitcodeFromRlib> {
145     let mut len = 0;
146     let data =
147         unsafe { llvm::LLVMRustGetBitcodeSliceFromObjectData(obj.as_ptr(), obj.len(), &mut len) };
148     if !data.is_null() {
149         assert!(len != 0);
150         let bc = unsafe { slice::from_raw_parts(data, len) };
151 
152         // `bc` must be a sub-slice of `obj`.
153         assert!(obj.as_ptr() <= bc.as_ptr());
154         assert!(bc[bc.len()..bc.len()].as_ptr() <= obj[obj.len()..obj.len()].as_ptr());
155 
156         Ok(bc)
157     } else {
158         assert!(len == 0);
159         Err(LtoBitcodeFromRlib {
160             llvm_err: llvm::last_error().unwrap_or_else(|| "unknown LLVM error".to_string()),
161         })
162     }
163 }
164 
165 /// Performs fat LTO by merging all modules into a single one and returning it
166 /// for further optimization.
run_fat( cgcx: &CodegenContext<LlvmCodegenBackend>, modules: Vec<FatLTOInput<LlvmCodegenBackend>>, cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>, ) -> Result<LtoModuleCodegen<LlvmCodegenBackend>, FatalError>167 pub(crate) fn run_fat(
168     cgcx: &CodegenContext<LlvmCodegenBackend>,
169     modules: Vec<FatLTOInput<LlvmCodegenBackend>>,
170     cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>,
171 ) -> Result<LtoModuleCodegen<LlvmCodegenBackend>, FatalError> {
172     let diag_handler = cgcx.create_diag_handler();
173     let (symbols_below_threshold, upstream_modules) = prepare_lto(cgcx, &diag_handler)?;
174     let symbols_below_threshold =
175         symbols_below_threshold.iter().map(|c| c.as_ptr()).collect::<Vec<_>>();
176     fat_lto(
177         cgcx,
178         &diag_handler,
179         modules,
180         cached_modules,
181         upstream_modules,
182         &symbols_below_threshold,
183     )
184 }
185 
186 /// Performs thin LTO by performing necessary global analysis and returning two
187 /// lists, one of the modules that need optimization and another for modules that
188 /// can simply be copied over from the incr. comp. cache.
run_thin( cgcx: &CodegenContext<LlvmCodegenBackend>, modules: Vec<(String, ThinBuffer)>, cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>, ) -> Result<(Vec<LtoModuleCodegen<LlvmCodegenBackend>>, Vec<WorkProduct>), FatalError>189 pub(crate) fn run_thin(
190     cgcx: &CodegenContext<LlvmCodegenBackend>,
191     modules: Vec<(String, ThinBuffer)>,
192     cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>,
193 ) -> Result<(Vec<LtoModuleCodegen<LlvmCodegenBackend>>, Vec<WorkProduct>), FatalError> {
194     let diag_handler = cgcx.create_diag_handler();
195     let (symbols_below_threshold, upstream_modules) = prepare_lto(cgcx, &diag_handler)?;
196     let symbols_below_threshold =
197         symbols_below_threshold.iter().map(|c| c.as_ptr()).collect::<Vec<_>>();
198     if cgcx.opts.cg.linker_plugin_lto.enabled() {
199         unreachable!(
200             "We should never reach this case if the LTO step \
201                       is deferred to the linker"
202         );
203     }
204     thin_lto(
205         cgcx,
206         &diag_handler,
207         modules,
208         upstream_modules,
209         cached_modules,
210         &symbols_below_threshold,
211     )
212 }
213 
prepare_thin(module: ModuleCodegen<ModuleLlvm>) -> (String, ThinBuffer)214 pub(crate) fn prepare_thin(module: ModuleCodegen<ModuleLlvm>) -> (String, ThinBuffer) {
215     let name = module.name;
216     let buffer = ThinBuffer::new(module.module_llvm.llmod(), true);
217     (name, buffer)
218 }
219 
fat_lto( cgcx: &CodegenContext<LlvmCodegenBackend>, diag_handler: &Handler, modules: Vec<FatLTOInput<LlvmCodegenBackend>>, cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>, mut serialized_modules: Vec<(SerializedModule<ModuleBuffer>, CString)>, symbols_below_threshold: &[*const libc::c_char], ) -> Result<LtoModuleCodegen<LlvmCodegenBackend>, FatalError>220 fn fat_lto(
221     cgcx: &CodegenContext<LlvmCodegenBackend>,
222     diag_handler: &Handler,
223     modules: Vec<FatLTOInput<LlvmCodegenBackend>>,
224     cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>,
225     mut serialized_modules: Vec<(SerializedModule<ModuleBuffer>, CString)>,
226     symbols_below_threshold: &[*const libc::c_char],
227 ) -> Result<LtoModuleCodegen<LlvmCodegenBackend>, FatalError> {
228     let _timer = cgcx.prof.generic_activity("LLVM_fat_lto_build_monolithic_module");
229     info!("going for a fat lto");
230 
231     // Sort out all our lists of incoming modules into two lists.
232     //
233     // * `serialized_modules` (also and argument to this function) contains all
234     //   modules that are serialized in-memory.
235     // * `in_memory` contains modules which are already parsed and in-memory,
236     //   such as from multi-CGU builds.
237     //
238     // All of `cached_modules` (cached from previous incremental builds) can
239     // immediately go onto the `serialized_modules` modules list and then we can
240     // split the `modules` array into these two lists.
241     let mut in_memory = Vec::new();
242     serialized_modules.extend(cached_modules.into_iter().map(|(buffer, wp)| {
243         info!("pushing cached module {:?}", wp.cgu_name);
244         (buffer, CString::new(wp.cgu_name).unwrap())
245     }));
246     for module in modules {
247         match module {
248             FatLTOInput::InMemory(m) => in_memory.push(m),
249             FatLTOInput::Serialized { name, buffer } => {
250                 info!("pushing serialized module {:?}", name);
251                 let buffer = SerializedModule::Local(buffer);
252                 serialized_modules.push((buffer, CString::new(name).unwrap()));
253             }
254         }
255     }
256 
257     // Find the "costliest" module and merge everything into that codegen unit.
258     // All the other modules will be serialized and reparsed into the new
259     // context, so this hopefully avoids serializing and parsing the largest
260     // codegen unit.
261     //
262     // Additionally use a regular module as the base here to ensure that various
263     // file copy operations in the backend work correctly. The only other kind
264     // of module here should be an allocator one, and if your crate is smaller
265     // than the allocator module then the size doesn't really matter anyway.
266     let costliest_module = in_memory
267         .iter()
268         .enumerate()
269         .filter(|&(_, module)| module.kind == ModuleKind::Regular)
270         .map(|(i, module)| {
271             let cost = unsafe { llvm::LLVMRustModuleCost(module.module_llvm.llmod()) };
272             (cost, i)
273         })
274         .max();
275 
276     // If we found a costliest module, we're good to go. Otherwise all our
277     // inputs were serialized which could happen in the case, for example, that
278     // all our inputs were incrementally reread from the cache and we're just
279     // re-executing the LTO passes. If that's the case deserialize the first
280     // module and create a linker with it.
281     let module: ModuleCodegen<ModuleLlvm> = match costliest_module {
282         Some((_cost, i)) => in_memory.remove(i),
283         None => {
284             assert!(!serialized_modules.is_empty(), "must have at least one serialized module");
285             let (buffer, name) = serialized_modules.remove(0);
286             info!("no in-memory regular modules to choose from, parsing {:?}", name);
287             ModuleCodegen {
288                 module_llvm: ModuleLlvm::parse(cgcx, &name, buffer.data(), diag_handler)?,
289                 name: name.into_string().unwrap(),
290                 kind: ModuleKind::Regular,
291             }
292         }
293     };
294     let mut serialized_bitcode = Vec::new();
295     {
296         let (llcx, llmod) = {
297             let llvm = &module.module_llvm;
298             (&llvm.llcx, llvm.llmod())
299         };
300         info!("using {:?} as a base module", module.name);
301 
302         // The linking steps below may produce errors and diagnostics within LLVM
303         // which we'd like to handle and print, so set up our diagnostic handlers
304         // (which get unregistered when they go out of scope below).
305         let _handler = DiagnosticHandlers::new(
306             cgcx,
307             diag_handler,
308             llcx,
309             &module,
310             CodegenDiagnosticsStage::LTO,
311         );
312 
313         // For all other modules we codegened we'll need to link them into our own
314         // bitcode. All modules were codegened in their own LLVM context, however,
315         // and we want to move everything to the same LLVM context. Currently the
316         // way we know of to do that is to serialize them to a string and them parse
317         // them later. Not great but hey, that's why it's "fat" LTO, right?
318         for module in in_memory {
319             let buffer = ModuleBuffer::new(module.module_llvm.llmod());
320             let llmod_id = CString::new(&module.name[..]).unwrap();
321             serialized_modules.push((SerializedModule::Local(buffer), llmod_id));
322         }
323         // Sort the modules to ensure we produce deterministic results.
324         serialized_modules.sort_by(|module1, module2| module1.1.cmp(&module2.1));
325 
326         // For all serialized bitcode files we parse them and link them in as we did
327         // above, this is all mostly handled in C++. Like above, though, we don't
328         // know much about the memory management here so we err on the side of being
329         // save and persist everything with the original module.
330         let mut linker = Linker::new(llmod);
331         for (bc_decoded, name) in serialized_modules {
332             let _timer = cgcx
333                 .prof
334                 .generic_activity_with_arg_recorder("LLVM_fat_lto_link_module", |recorder| {
335                     recorder.record_arg(format!("{:?}", name))
336                 });
337             info!("linking {:?}", name);
338             let data = bc_decoded.data();
339             linker
340                 .add(data)
341                 .map_err(|()| write::llvm_err(diag_handler, LlvmError::LoadBitcode { name }))?;
342             serialized_bitcode.push(bc_decoded);
343         }
344         drop(linker);
345         save_temp_bitcode(cgcx, &module, "lto.input");
346 
347         // Internalize everything below threshold to help strip out more modules and such.
348         unsafe {
349             let ptr = symbols_below_threshold.as_ptr();
350             llvm::LLVMRustRunRestrictionPass(
351                 llmod,
352                 ptr as *const *const libc::c_char,
353                 symbols_below_threshold.len() as libc::size_t,
354             );
355             save_temp_bitcode(cgcx, &module, "lto.after-restriction");
356         }
357     }
358 
359     Ok(LtoModuleCodegen::Fat { module, _serialized_bitcode: serialized_bitcode })
360 }
361 
362 pub(crate) struct Linker<'a>(&'a mut llvm::Linker<'a>);
363 
364 impl<'a> Linker<'a> {
new(llmod: &'a llvm::Module) -> Self365     pub(crate) fn new(llmod: &'a llvm::Module) -> Self {
366         unsafe { Linker(llvm::LLVMRustLinkerNew(llmod)) }
367     }
368 
add(&mut self, bytecode: &[u8]) -> Result<(), ()>369     pub(crate) fn add(&mut self, bytecode: &[u8]) -> Result<(), ()> {
370         unsafe {
371             if llvm::LLVMRustLinkerAdd(
372                 self.0,
373                 bytecode.as_ptr() as *const libc::c_char,
374                 bytecode.len(),
375             ) {
376                 Ok(())
377             } else {
378                 Err(())
379             }
380         }
381     }
382 }
383 
384 impl Drop for Linker<'_> {
drop(&mut self)385     fn drop(&mut self) {
386         unsafe {
387             llvm::LLVMRustLinkerFree(&mut *(self.0 as *mut _));
388         }
389     }
390 }
391 
392 /// Prepare "thin" LTO to get run on these modules.
393 ///
394 /// The general structure of ThinLTO is quite different from the structure of
395 /// "fat" LTO above. With "fat" LTO all LLVM modules in question are merged into
396 /// one giant LLVM module, and then we run more optimization passes over this
397 /// big module after internalizing most symbols. Thin LTO, on the other hand,
398 /// avoid this large bottleneck through more targeted optimization.
399 ///
400 /// At a high level Thin LTO looks like:
401 ///
402 ///    1. Prepare a "summary" of each LLVM module in question which describes
403 ///       the values inside, cost of the values, etc.
404 ///    2. Merge the summaries of all modules in question into one "index"
405 ///    3. Perform some global analysis on this index
406 ///    4. For each module, use the index and analysis calculated previously to
407 ///       perform local transformations on the module, for example inlining
408 ///       small functions from other modules.
409 ///    5. Run thin-specific optimization passes over each module, and then code
410 ///       generate everything at the end.
411 ///
412 /// The summary for each module is intended to be quite cheap, and the global
413 /// index is relatively quite cheap to create as well. As a result, the goal of
414 /// ThinLTO is to reduce the bottleneck on LTO and enable LTO to be used in more
415 /// situations. For example one cheap optimization is that we can parallelize
416 /// all codegen modules, easily making use of all the cores on a machine.
417 ///
418 /// With all that in mind, the function here is designed at specifically just
419 /// calculating the *index* for ThinLTO. This index will then be shared amongst
420 /// all of the `LtoModuleCodegen` units returned below and destroyed once
421 /// they all go out of scope.
thin_lto( cgcx: &CodegenContext<LlvmCodegenBackend>, diag_handler: &Handler, modules: Vec<(String, ThinBuffer)>, serialized_modules: Vec<(SerializedModule<ModuleBuffer>, CString)>, cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>, symbols_below_threshold: &[*const libc::c_char], ) -> Result<(Vec<LtoModuleCodegen<LlvmCodegenBackend>>, Vec<WorkProduct>), FatalError>422 fn thin_lto(
423     cgcx: &CodegenContext<LlvmCodegenBackend>,
424     diag_handler: &Handler,
425     modules: Vec<(String, ThinBuffer)>,
426     serialized_modules: Vec<(SerializedModule<ModuleBuffer>, CString)>,
427     cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>,
428     symbols_below_threshold: &[*const libc::c_char],
429 ) -> Result<(Vec<LtoModuleCodegen<LlvmCodegenBackend>>, Vec<WorkProduct>), FatalError> {
430     let _timer = cgcx.prof.generic_activity("LLVM_thin_lto_global_analysis");
431     unsafe {
432         info!("going for that thin, thin LTO");
433 
434         let green_modules: FxHashMap<_, _> =
435             cached_modules.iter().map(|(_, wp)| (wp.cgu_name.clone(), wp.clone())).collect();
436 
437         let full_scope_len = modules.len() + serialized_modules.len() + cached_modules.len();
438         let mut thin_buffers = Vec::with_capacity(modules.len());
439         let mut module_names = Vec::with_capacity(full_scope_len);
440         let mut thin_modules = Vec::with_capacity(full_scope_len);
441 
442         for (i, (name, buffer)) in modules.into_iter().enumerate() {
443             info!("local module: {} - {}", i, name);
444             let cname = CString::new(name.clone()).unwrap();
445             thin_modules.push(llvm::ThinLTOModule {
446                 identifier: cname.as_ptr(),
447                 data: buffer.data().as_ptr(),
448                 len: buffer.data().len(),
449             });
450             thin_buffers.push(buffer);
451             module_names.push(cname);
452         }
453 
454         // FIXME: All upstream crates are deserialized internally in the
455         //        function below to extract their summary and modules. Note that
456         //        unlike the loop above we *must* decode and/or read something
457         //        here as these are all just serialized files on disk. An
458         //        improvement, however, to make here would be to store the
459         //        module summary separately from the actual module itself. Right
460         //        now this is store in one large bitcode file, and the entire
461         //        file is deflate-compressed. We could try to bypass some of the
462         //        decompression by storing the index uncompressed and only
463         //        lazily decompressing the bytecode if necessary.
464         //
465         //        Note that truly taking advantage of this optimization will
466         //        likely be further down the road. We'd have to implement
467         //        incremental ThinLTO first where we could actually avoid
468         //        looking at upstream modules entirely sometimes (the contents,
469         //        we must always unconditionally look at the index).
470         let mut serialized = Vec::with_capacity(serialized_modules.len() + cached_modules.len());
471 
472         let cached_modules =
473             cached_modules.into_iter().map(|(sm, wp)| (sm, CString::new(wp.cgu_name).unwrap()));
474 
475         for (module, name) in serialized_modules.into_iter().chain(cached_modules) {
476             info!("upstream or cached module {:?}", name);
477             thin_modules.push(llvm::ThinLTOModule {
478                 identifier: name.as_ptr(),
479                 data: module.data().as_ptr(),
480                 len: module.data().len(),
481             });
482             serialized.push(module);
483             module_names.push(name);
484         }
485 
486         // Sanity check
487         assert_eq!(thin_modules.len(), module_names.len());
488 
489         // Delegate to the C++ bindings to create some data here. Once this is a
490         // tried-and-true interface we may wish to try to upstream some of this
491         // to LLVM itself, right now we reimplement a lot of what they do
492         // upstream...
493         let data = llvm::LLVMRustCreateThinLTOData(
494             thin_modules.as_ptr(),
495             thin_modules.len() as u32,
496             symbols_below_threshold.as_ptr(),
497             symbols_below_threshold.len() as u32,
498         )
499         .ok_or_else(|| write::llvm_err(diag_handler, LlvmError::PrepareThinLtoContext))?;
500 
501         let data = ThinData(data);
502 
503         info!("thin LTO data created");
504 
505         let (key_map_path, prev_key_map, curr_key_map) = if let Some(ref incr_comp_session_dir) =
506             cgcx.incr_comp_session_dir
507         {
508             let path = incr_comp_session_dir.join(THIN_LTO_KEYS_INCR_COMP_FILE_NAME);
509             // If the previous file was deleted, or we get an IO error
510             // reading the file, then we'll just use `None` as the
511             // prev_key_map, which will force the code to be recompiled.
512             let prev =
513                 if path.exists() { ThinLTOKeysMap::load_from_file(&path).ok() } else { None };
514             let curr = ThinLTOKeysMap::from_thin_lto_modules(&data, &thin_modules, &module_names);
515             (Some(path), prev, curr)
516         } else {
517             // If we don't compile incrementally, we don't need to load the
518             // import data from LLVM.
519             assert!(green_modules.is_empty());
520             let curr = ThinLTOKeysMap::default();
521             (None, None, curr)
522         };
523         info!("thin LTO cache key map loaded");
524         info!("prev_key_map: {:#?}", prev_key_map);
525         info!("curr_key_map: {:#?}", curr_key_map);
526 
527         // Throw our data in an `Arc` as we'll be sharing it across threads. We
528         // also put all memory referenced by the C++ data (buffers, ids, etc)
529         // into the arc as well. After this we'll create a thin module
530         // codegen per module in this data.
531         let shared = Arc::new(ThinShared {
532             data,
533             thin_buffers,
534             serialized_modules: serialized,
535             module_names,
536         });
537 
538         let mut copy_jobs = vec![];
539         let mut opt_jobs = vec![];
540 
541         info!("checking which modules can be-reused and which have to be re-optimized.");
542         for (module_index, module_name) in shared.module_names.iter().enumerate() {
543             let module_name = module_name_to_str(module_name);
544             if let (Some(prev_key_map), true) =
545                 (prev_key_map.as_ref(), green_modules.contains_key(module_name))
546             {
547                 assert!(cgcx.incr_comp_session_dir.is_some());
548 
549                 // If a module exists in both the current and the previous session,
550                 // and has the same LTO cache key in both sessions, then we can re-use it
551                 if prev_key_map.keys.get(module_name) == curr_key_map.keys.get(module_name) {
552                     let work_product = green_modules[module_name].clone();
553                     copy_jobs.push(work_product);
554                     info!(" - {}: re-used", module_name);
555                     assert!(cgcx.incr_comp_session_dir.is_some());
556                     cgcx.cgu_reuse_tracker.set_actual_reuse(module_name, CguReuse::PostLto);
557                     continue;
558                 }
559             }
560 
561             info!(" - {}: re-compiled", module_name);
562             opt_jobs.push(LtoModuleCodegen::Thin(ThinModule {
563                 shared: shared.clone(),
564                 idx: module_index,
565             }));
566         }
567 
568         // Save the current ThinLTO import information for the next compilation
569         // session, overwriting the previous serialized data (if any).
570         if let Some(path) = key_map_path {
571             if let Err(err) = curr_key_map.save_to_file(&path) {
572                 return Err(write::llvm_err(diag_handler, LlvmError::WriteThinLtoKey { err }));
573             }
574         }
575 
576         Ok((opt_jobs, copy_jobs))
577     }
578 }
579 
run_pass_manager( cgcx: &CodegenContext<LlvmCodegenBackend>, diag_handler: &Handler, module: &mut ModuleCodegen<ModuleLlvm>, thin: bool, ) -> Result<(), FatalError>580 pub(crate) fn run_pass_manager(
581     cgcx: &CodegenContext<LlvmCodegenBackend>,
582     diag_handler: &Handler,
583     module: &mut ModuleCodegen<ModuleLlvm>,
584     thin: bool,
585 ) -> Result<(), FatalError> {
586     let _timer = cgcx.prof.verbose_generic_activity_with_arg("LLVM_lto_optimize", &*module.name);
587     let config = cgcx.config(module.kind);
588 
589     // Now we have one massive module inside of llmod. Time to run the
590     // LTO-specific optimization passes that LLVM provides.
591     //
592     // This code is based off the code found in llvm's LTO code generator:
593     //      llvm/lib/LTO/LTOCodeGenerator.cpp
594     debug!("running the pass manager");
595     unsafe {
596         if !llvm::LLVMRustHasModuleFlag(
597             module.module_llvm.llmod(),
598             "LTOPostLink".as_ptr().cast(),
599             11,
600         ) {
601             llvm::LLVMRustAddModuleFlag(
602                 module.module_llvm.llmod(),
603                 llvm::LLVMModFlagBehavior::Error,
604                 "LTOPostLink\0".as_ptr().cast(),
605                 1,
606             );
607         }
608         let opt_stage = if thin { llvm::OptStage::ThinLTO } else { llvm::OptStage::FatLTO };
609         let opt_level = config.opt_level.unwrap_or(config::OptLevel::No);
610         write::llvm_optimize(cgcx, diag_handler, module, config, opt_level, opt_stage)?;
611     }
612     debug!("lto done");
613     Ok(())
614 }
615 
616 pub struct ModuleBuffer(&'static mut llvm::ModuleBuffer);
617 
618 unsafe impl Send for ModuleBuffer {}
619 unsafe impl Sync for ModuleBuffer {}
620 
621 impl ModuleBuffer {
new(m: &llvm::Module) -> ModuleBuffer622     pub fn new(m: &llvm::Module) -> ModuleBuffer {
623         ModuleBuffer(unsafe { llvm::LLVMRustModuleBufferCreate(m) })
624     }
625 }
626 
627 impl ModuleBufferMethods for ModuleBuffer {
data(&self) -> &[u8]628     fn data(&self) -> &[u8] {
629         unsafe {
630             let ptr = llvm::LLVMRustModuleBufferPtr(self.0);
631             let len = llvm::LLVMRustModuleBufferLen(self.0);
632             slice::from_raw_parts(ptr, len)
633         }
634     }
635 }
636 
637 impl Drop for ModuleBuffer {
drop(&mut self)638     fn drop(&mut self) {
639         unsafe {
640             llvm::LLVMRustModuleBufferFree(&mut *(self.0 as *mut _));
641         }
642     }
643 }
644 
645 pub struct ThinData(&'static mut llvm::ThinLTOData);
646 
647 unsafe impl Send for ThinData {}
648 unsafe impl Sync for ThinData {}
649 
650 impl Drop for ThinData {
drop(&mut self)651     fn drop(&mut self) {
652         unsafe {
653             llvm::LLVMRustFreeThinLTOData(&mut *(self.0 as *mut _));
654         }
655     }
656 }
657 
658 pub struct ThinBuffer(&'static mut llvm::ThinLTOBuffer);
659 
660 unsafe impl Send for ThinBuffer {}
661 unsafe impl Sync for ThinBuffer {}
662 
663 impl ThinBuffer {
new(m: &llvm::Module, is_thin: bool) -> ThinBuffer664     pub fn new(m: &llvm::Module, is_thin: bool) -> ThinBuffer {
665         unsafe {
666             let buffer = llvm::LLVMRustThinLTOBufferCreate(m, is_thin);
667             ThinBuffer(buffer)
668         }
669     }
670 }
671 
672 impl ThinBufferMethods for ThinBuffer {
data(&self) -> &[u8]673     fn data(&self) -> &[u8] {
674         unsafe {
675             let ptr = llvm::LLVMRustThinLTOBufferPtr(self.0) as *const _;
676             let len = llvm::LLVMRustThinLTOBufferLen(self.0);
677             slice::from_raw_parts(ptr, len)
678         }
679     }
680 }
681 
682 impl Drop for ThinBuffer {
drop(&mut self)683     fn drop(&mut self) {
684         unsafe {
685             llvm::LLVMRustThinLTOBufferFree(&mut *(self.0 as *mut _));
686         }
687     }
688 }
689 
optimize_thin_module( thin_module: ThinModule<LlvmCodegenBackend>, cgcx: &CodegenContext<LlvmCodegenBackend>, ) -> Result<ModuleCodegen<ModuleLlvm>, FatalError>690 pub unsafe fn optimize_thin_module(
691     thin_module: ThinModule<LlvmCodegenBackend>,
692     cgcx: &CodegenContext<LlvmCodegenBackend>,
693 ) -> Result<ModuleCodegen<ModuleLlvm>, FatalError> {
694     let diag_handler = cgcx.create_diag_handler();
695 
696     let module_name = &thin_module.shared.module_names[thin_module.idx];
697     let tm_factory_config = TargetMachineFactoryConfig::new(cgcx, module_name.to_str().unwrap());
698     let tm = (cgcx.tm_factory)(tm_factory_config).map_err(|e| write::llvm_err(&diag_handler, e))?;
699 
700     // Right now the implementation we've got only works over serialized
701     // modules, so we create a fresh new LLVM context and parse the module
702     // into that context. One day, however, we may do this for upstream
703     // crates but for locally codegened modules we may be able to reuse
704     // that LLVM Context and Module.
705     let llcx = llvm::LLVMRustContextCreate(cgcx.fewer_names);
706     let llmod_raw = parse_module(llcx, module_name, thin_module.data(), &diag_handler)? as *const _;
707     let mut module = ModuleCodegen {
708         module_llvm: ModuleLlvm { llmod_raw, llcx, tm },
709         name: thin_module.name().to_string(),
710         kind: ModuleKind::Regular,
711     };
712     {
713         let target = &*module.module_llvm.tm;
714         let llmod = module.module_llvm.llmod();
715         save_temp_bitcode(cgcx, &module, "thin-lto-input");
716 
717         // Up next comes the per-module local analyses that we do for Thin LTO.
718         // Each of these functions is basically copied from the LLVM
719         // implementation and then tailored to suit this implementation. Ideally
720         // each of these would be supported by upstream LLVM but that's perhaps
721         // a patch for another day!
722         //
723         // You can find some more comments about these functions in the LLVM
724         // bindings we've got (currently `PassWrapper.cpp`)
725         {
726             let _timer =
727                 cgcx.prof.generic_activity_with_arg("LLVM_thin_lto_rename", thin_module.name());
728             if !llvm::LLVMRustPrepareThinLTORename(thin_module.shared.data.0, llmod, target) {
729                 return Err(write::llvm_err(&diag_handler, LlvmError::PrepareThinLtoModule));
730             }
731             save_temp_bitcode(cgcx, &module, "thin-lto-after-rename");
732         }
733 
734         {
735             let _timer = cgcx
736                 .prof
737                 .generic_activity_with_arg("LLVM_thin_lto_resolve_weak", thin_module.name());
738             if !llvm::LLVMRustPrepareThinLTOResolveWeak(thin_module.shared.data.0, llmod) {
739                 return Err(write::llvm_err(&diag_handler, LlvmError::PrepareThinLtoModule));
740             }
741             save_temp_bitcode(cgcx, &module, "thin-lto-after-resolve");
742         }
743 
744         {
745             let _timer = cgcx
746                 .prof
747                 .generic_activity_with_arg("LLVM_thin_lto_internalize", thin_module.name());
748             if !llvm::LLVMRustPrepareThinLTOInternalize(thin_module.shared.data.0, llmod) {
749                 return Err(write::llvm_err(&diag_handler, LlvmError::PrepareThinLtoModule));
750             }
751             save_temp_bitcode(cgcx, &module, "thin-lto-after-internalize");
752         }
753 
754         {
755             let _timer =
756                 cgcx.prof.generic_activity_with_arg("LLVM_thin_lto_import", thin_module.name());
757             if !llvm::LLVMRustPrepareThinLTOImport(thin_module.shared.data.0, llmod, target) {
758                 return Err(write::llvm_err(&diag_handler, LlvmError::PrepareThinLtoModule));
759             }
760             save_temp_bitcode(cgcx, &module, "thin-lto-after-import");
761         }
762 
763         // Alright now that we've done everything related to the ThinLTO
764         // analysis it's time to run some optimizations! Here we use the same
765         // `run_pass_manager` as the "fat" LTO above except that we tell it to
766         // populate a thin-specific pass manager, which presumably LLVM treats a
767         // little differently.
768         {
769             info!("running thin lto passes over {}", module.name);
770             run_pass_manager(cgcx, &diag_handler, &mut module, true)?;
771             save_temp_bitcode(cgcx, &module, "thin-lto-after-pm");
772         }
773     }
774     Ok(module)
775 }
776 
777 /// Maps LLVM module identifiers to their corresponding LLVM LTO cache keys
778 #[derive(Debug, Default)]
779 pub struct ThinLTOKeysMap {
780     // key = llvm name of importing module, value = LLVM cache key
781     keys: FxHashMap<String, String>,
782 }
783 
784 impl ThinLTOKeysMap {
save_to_file(&self, path: &Path) -> io::Result<()>785     fn save_to_file(&self, path: &Path) -> io::Result<()> {
786         use std::io::Write;
787         let file = File::create(path)?;
788         let mut writer = io::BufWriter::new(file);
789         for (module, key) in &self.keys {
790             writeln!(writer, "{} {}", module, key)?;
791         }
792         Ok(())
793     }
794 
load_from_file(path: &Path) -> io::Result<Self>795     fn load_from_file(path: &Path) -> io::Result<Self> {
796         use std::io::BufRead;
797         let mut keys = FxHashMap::default();
798         let file = File::open(path)?;
799         for line in io::BufReader::new(file).lines() {
800             let line = line?;
801             let mut split = line.split(' ');
802             let module = split.next().unwrap();
803             let key = split.next().unwrap();
804             assert_eq!(split.next(), None, "Expected two space-separated values, found {:?}", line);
805             keys.insert(module.to_string(), key.to_string());
806         }
807         Ok(Self { keys })
808     }
809 
from_thin_lto_modules( data: &ThinData, modules: &[llvm::ThinLTOModule], names: &[CString], ) -> Self810     fn from_thin_lto_modules(
811         data: &ThinData,
812         modules: &[llvm::ThinLTOModule],
813         names: &[CString],
814     ) -> Self {
815         let keys = iter::zip(modules, names)
816             .map(|(module, name)| {
817                 let key = build_string(|rust_str| unsafe {
818                     llvm::LLVMRustComputeLTOCacheKey(rust_str, module.identifier, data.0);
819                 })
820                 .expect("Invalid ThinLTO module key");
821                 (name.clone().into_string().unwrap(), key)
822             })
823             .collect();
824         Self { keys }
825     }
826 }
827 
module_name_to_str(c_str: &CStr) -> &str828 fn module_name_to_str(c_str: &CStr) -> &str {
829     c_str.to_str().unwrap_or_else(|e| {
830         bug!("Encountered non-utf8 LLVM module name `{}`: {}", c_str.to_string_lossy(), e)
831     })
832 }
833 
parse_module<'a>( cx: &'a llvm::Context, name: &CStr, data: &[u8], diag_handler: &Handler, ) -> Result<&'a llvm::Module, FatalError>834 pub fn parse_module<'a>(
835     cx: &'a llvm::Context,
836     name: &CStr,
837     data: &[u8],
838     diag_handler: &Handler,
839 ) -> Result<&'a llvm::Module, FatalError> {
840     unsafe {
841         llvm::LLVMRustParseBitcodeForLTO(cx, data.as_ptr(), data.len(), name.as_ptr())
842             .ok_or_else(|| write::llvm_err(diag_handler, LlvmError::ParseBitcode))
843     }
844 }
845