1 use crate::attributes;
2 use crate::back::write::to_llvm_code_model;
3 use crate::callee::get_fn;
4 use crate::coverageinfo;
5 use crate::debuginfo;
6 use crate::llvm;
7 use crate::llvm_util;
8 use crate::type_::Type;
9 use crate::value::Value;
10
11 use cstr::cstr;
12 use rustc_codegen_ssa::base::{wants_msvc_seh, wants_wasm_eh};
13 use rustc_codegen_ssa::traits::*;
14 use rustc_data_structures::base_n;
15 use rustc_data_structures::fx::FxHashMap;
16 use rustc_data_structures::small_c_str::SmallCStr;
17 use rustc_hir::def_id::DefId;
18 use rustc_middle::mir::mono::CodegenUnit;
19 use rustc_middle::ty::layout::{
20 FnAbiError, FnAbiOfHelpers, FnAbiRequest, HasParamEnv, LayoutError, LayoutOfHelpers,
21 TyAndLayout,
22 };
23 use rustc_middle::ty::{self, Instance, Ty, TyCtxt};
24 use rustc_middle::{bug, span_bug};
25 use rustc_session::config::{BranchProtection, CFGuard, CFProtection};
26 use rustc_session::config::{CrateType, DebugInfo, PAuthKey, PacRet};
27 use rustc_session::Session;
28 use rustc_span::source_map::Span;
29 use rustc_span::source_map::Spanned;
30 use rustc_target::abi::{
31 call::FnAbi, HasDataLayout, PointeeInfo, Size, TargetDataLayout, VariantIdx,
32 };
33 use rustc_target::spec::{HasTargetSpec, RelocModel, Target, TlsModel};
34 use smallvec::SmallVec;
35
36 use std::cell::{Cell, RefCell};
37 use std::ffi::CStr;
38 use std::str;
39
40 /// There is one `CodegenCx` per compilation unit. Each one has its own LLVM
41 /// `llvm::Context` so that several compilation units may be optimized in parallel.
42 /// All other LLVM data structures in the `CodegenCx` are tied to that `llvm::Context`.
43 pub struct CodegenCx<'ll, 'tcx> {
44 pub tcx: TyCtxt<'tcx>,
45 pub check_overflow: bool,
46 pub use_dll_storage_attrs: bool,
47 pub tls_model: llvm::ThreadLocalMode,
48
49 pub llmod: &'ll llvm::Module,
50 pub llcx: &'ll llvm::Context,
51 pub codegen_unit: &'tcx CodegenUnit<'tcx>,
52
53 /// Cache instances of monomorphic and polymorphic items
54 pub instances: RefCell<FxHashMap<Instance<'tcx>, &'ll Value>>,
55 /// Cache generated vtables
56 pub vtables:
57 RefCell<FxHashMap<(Ty<'tcx>, Option<ty::PolyExistentialTraitRef<'tcx>>), &'ll Value>>,
58 /// Cache of constant strings,
59 pub const_str_cache: RefCell<FxHashMap<String, &'ll Value>>,
60
61 /// Reverse-direction for const ptrs cast from globals.
62 ///
63 /// Key is a Value holding a `*T`,
64 /// Val is a Value holding a `*[T]`.
65 ///
66 /// Needed because LLVM loses pointer->pointee association
67 /// when we ptrcast, and we have to ptrcast during codegen
68 /// of a `[T]` const because we form a slice, a `(*T,usize)` pair, not
69 /// a pointer to an LLVM array type. Similar for trait objects.
70 pub const_unsized: RefCell<FxHashMap<&'ll Value, &'ll Value>>,
71
72 /// Cache of emitted const globals (value -> global)
73 pub const_globals: RefCell<FxHashMap<&'ll Value, &'ll Value>>,
74
75 /// List of globals for static variables which need to be passed to the
76 /// LLVM function ReplaceAllUsesWith (RAUW) when codegen is complete.
77 /// (We have to make sure we don't invalidate any Values referring
78 /// to constants.)
79 pub statics_to_rauw: RefCell<Vec<(&'ll Value, &'ll Value)>>,
80
81 /// Statics that will be placed in the llvm.used variable
82 /// See <https://llvm.org/docs/LangRef.html#the-llvm-used-global-variable> for details
83 pub used_statics: RefCell<Vec<&'ll Value>>,
84
85 /// Statics that will be placed in the llvm.compiler.used variable
86 /// See <https://llvm.org/docs/LangRef.html#the-llvm-compiler-used-global-variable> for details
87 pub compiler_used_statics: RefCell<Vec<&'ll Value>>,
88
89 /// Mapping of non-scalar types to llvm types and field remapping if needed.
90 pub type_lowering: RefCell<FxHashMap<(Ty<'tcx>, Option<VariantIdx>), TypeLowering<'ll>>>,
91
92 /// Mapping of scalar types to llvm types.
93 pub scalar_lltypes: RefCell<FxHashMap<Ty<'tcx>, &'ll Type>>,
94
95 pub pointee_infos: RefCell<FxHashMap<(Ty<'tcx>, Size), Option<PointeeInfo>>>,
96 pub isize_ty: &'ll Type,
97
98 pub coverage_cx: Option<coverageinfo::CrateCoverageContext<'ll, 'tcx>>,
99 pub dbg_cx: Option<debuginfo::CodegenUnitDebugContext<'ll, 'tcx>>,
100
101 eh_personality: Cell<Option<&'ll Value>>,
102 eh_catch_typeinfo: Cell<Option<&'ll Value>>,
103 pub rust_try_fn: Cell<Option<(&'ll Type, &'ll Value)>>,
104
105 intrinsics: RefCell<FxHashMap<&'static str, (&'ll Type, &'ll Value)>>,
106
107 /// A counter that is used for generating local symbol names
108 local_gen_sym_counter: Cell<usize>,
109
110 /// `codegen_static` will sometimes create a second global variable with a
111 /// different type and clear the symbol name of the original global.
112 /// `global_asm!` needs to be able to find this new global so that it can
113 /// compute the correct mangled symbol name to insert into the asm.
114 pub renamed_statics: RefCell<FxHashMap<DefId, &'ll Value>>,
115 }
116
117 pub struct TypeLowering<'ll> {
118 /// Associated LLVM type
119 pub lltype: &'ll Type,
120
121 /// If padding is used the slice maps fields from source order
122 /// to llvm order.
123 pub field_remapping: Option<SmallVec<[u32; 4]>>,
124 }
125
to_llvm_tls_model(tls_model: TlsModel) -> llvm::ThreadLocalMode126 fn to_llvm_tls_model(tls_model: TlsModel) -> llvm::ThreadLocalMode {
127 match tls_model {
128 TlsModel::GeneralDynamic => llvm::ThreadLocalMode::GeneralDynamic,
129 TlsModel::LocalDynamic => llvm::ThreadLocalMode::LocalDynamic,
130 TlsModel::InitialExec => llvm::ThreadLocalMode::InitialExec,
131 TlsModel::LocalExec => llvm::ThreadLocalMode::LocalExec,
132 }
133 }
134
create_module<'ll>( tcx: TyCtxt<'_>, llcx: &'ll llvm::Context, mod_name: &str, ) -> &'ll llvm::Module135 pub unsafe fn create_module<'ll>(
136 tcx: TyCtxt<'_>,
137 llcx: &'ll llvm::Context,
138 mod_name: &str,
139 ) -> &'ll llvm::Module {
140 let sess = tcx.sess;
141 let mod_name = SmallCStr::new(mod_name);
142 let llmod = llvm::LLVMModuleCreateWithNameInContext(mod_name.as_ptr(), llcx);
143
144 let mut target_data_layout = sess.target.data_layout.to_string();
145 let llvm_version = llvm_util::get_version();
146 if llvm_version < (16, 0, 0) {
147 if sess.target.arch == "s390x" {
148 // LLVM 16 data layout changed to always set 64-bit vector alignment,
149 // which is conditional in earlier LLVM versions.
150 // https://reviews.llvm.org/D131158 for the discussion.
151 target_data_layout = target_data_layout.replace("-v128:64", "");
152 } else if sess.target.arch == "riscv64" {
153 // LLVM 16 introduced this change so as to produce more efficient code.
154 // See https://reviews.llvm.org/D116735 for the discussion.
155 target_data_layout = target_data_layout.replace("-n32:64-", "-n64-");
156 }
157 }
158
159 // Ensure the data-layout values hardcoded remain the defaults.
160 if sess.target.is_builtin {
161 let tm = crate::back::write::create_informational_target_machine(tcx.sess);
162 llvm::LLVMRustSetDataLayoutFromTargetMachine(llmod, tm);
163 llvm::LLVMRustDisposeTargetMachine(tm);
164
165 let llvm_data_layout = llvm::LLVMGetDataLayoutStr(llmod);
166 let llvm_data_layout = str::from_utf8(CStr::from_ptr(llvm_data_layout).to_bytes())
167 .expect("got a non-UTF8 data-layout from LLVM");
168
169 // Unfortunately LLVM target specs change over time, and right now we
170 // don't have proper support to work with any more than one
171 // `data_layout` than the one that is in the rust-lang/rust repo. If
172 // this compiler is configured against a custom LLVM, we may have a
173 // differing data layout, even though we should update our own to use
174 // that one.
175 //
176 // As an interim hack, if CFG_LLVM_ROOT is not an empty string then we
177 // disable this check entirely as we may be configured with something
178 // that has a different target layout.
179 //
180 // Unsure if this will actually cause breakage when rustc is configured
181 // as such.
182 //
183 // FIXME(#34960)
184 let cfg_llvm_root = option_env!("CFG_LLVM_ROOT").unwrap_or("");
185 let custom_llvm_used = !cfg_llvm_root.trim().is_empty();
186
187 if !custom_llvm_used && target_data_layout != llvm_data_layout {
188 bug!(
189 "data-layout for target `{rustc_target}`, `{rustc_layout}`, \
190 differs from LLVM target's `{llvm_target}` default layout, `{llvm_layout}`",
191 rustc_target = sess.opts.target_triple,
192 rustc_layout = target_data_layout,
193 llvm_target = sess.target.llvm_target,
194 llvm_layout = llvm_data_layout
195 );
196 }
197 }
198
199 let data_layout = SmallCStr::new(&target_data_layout);
200 llvm::LLVMSetDataLayout(llmod, data_layout.as_ptr());
201
202 let llvm_target = SmallCStr::new(&sess.target.llvm_target);
203 llvm::LLVMRustSetNormalizedTarget(llmod, llvm_target.as_ptr());
204
205 let reloc_model = sess.relocation_model();
206 if matches!(reloc_model, RelocModel::Pic | RelocModel::Pie) {
207 llvm::LLVMRustSetModulePICLevel(llmod);
208 // PIE is potentially more effective than PIC, but can only be used in executables.
209 // If all our outputs are executables, then we can relax PIC to PIE.
210 if reloc_model == RelocModel::Pie
211 || sess.crate_types().iter().all(|ty| *ty == CrateType::Executable)
212 {
213 llvm::LLVMRustSetModulePIELevel(llmod);
214 }
215 }
216
217 // Linking object files with different code models is undefined behavior
218 // because the compiler would have to generate additional code (to span
219 // longer jumps) if a larger code model is used with a smaller one.
220 //
221 // See https://reviews.llvm.org/D52322 and https://reviews.llvm.org/D52323.
222 llvm::LLVMRustSetModuleCodeModel(llmod, to_llvm_code_model(sess.code_model()));
223
224 // If skipping the PLT is enabled, we need to add some module metadata
225 // to ensure intrinsic calls don't use it.
226 if !sess.needs_plt() {
227 let avoid_plt = "RtLibUseGOT\0".as_ptr().cast();
228 llvm::LLVMRustAddModuleFlag(llmod, llvm::LLVMModFlagBehavior::Warning, avoid_plt, 1);
229 }
230
231 // Enable canonical jump tables if CFI is enabled. (See https://reviews.llvm.org/D65629.)
232 if sess.is_sanitizer_cfi_canonical_jump_tables_enabled() && sess.is_sanitizer_cfi_enabled() {
233 let canonical_jump_tables = "CFI Canonical Jump Tables\0".as_ptr().cast();
234 llvm::LLVMRustAddModuleFlag(
235 llmod,
236 llvm::LLVMModFlagBehavior::Override,
237 canonical_jump_tables,
238 1,
239 );
240 }
241
242 // Enable LTO unit splitting if specified or if CFI is enabled. (See https://reviews.llvm.org/D53891.)
243 if sess.is_split_lto_unit_enabled() || sess.is_sanitizer_cfi_enabled() {
244 let enable_split_lto_unit = "EnableSplitLTOUnit\0".as_ptr().cast();
245 llvm::LLVMRustAddModuleFlag(
246 llmod,
247 llvm::LLVMModFlagBehavior::Override,
248 enable_split_lto_unit,
249 1,
250 );
251 }
252
253 // Add "kcfi" module flag if KCFI is enabled. (See https://reviews.llvm.org/D119296.)
254 if sess.is_sanitizer_kcfi_enabled() {
255 let kcfi = "kcfi\0".as_ptr().cast();
256 llvm::LLVMRustAddModuleFlag(llmod, llvm::LLVMModFlagBehavior::Override, kcfi, 1);
257 }
258
259 // Control Flow Guard is currently only supported by the MSVC linker on Windows.
260 if sess.target.is_like_msvc {
261 match sess.opts.cg.control_flow_guard {
262 CFGuard::Disabled => {}
263 CFGuard::NoChecks => {
264 // Set `cfguard=1` module flag to emit metadata only.
265 llvm::LLVMRustAddModuleFlag(
266 llmod,
267 llvm::LLVMModFlagBehavior::Warning,
268 "cfguard\0".as_ptr() as *const _,
269 1,
270 )
271 }
272 CFGuard::Checks => {
273 // Set `cfguard=2` module flag to emit metadata and checks.
274 llvm::LLVMRustAddModuleFlag(
275 llmod,
276 llvm::LLVMModFlagBehavior::Warning,
277 "cfguard\0".as_ptr() as *const _,
278 2,
279 )
280 }
281 }
282 }
283
284 if let Some(BranchProtection { bti, pac_ret }) = sess.opts.unstable_opts.branch_protection {
285 let behavior = if llvm_version >= (15, 0, 0) {
286 llvm::LLVMModFlagBehavior::Min
287 } else {
288 llvm::LLVMModFlagBehavior::Error
289 };
290
291 if sess.target.arch == "aarch64" {
292 llvm::LLVMRustAddModuleFlag(
293 llmod,
294 behavior,
295 "branch-target-enforcement\0".as_ptr().cast(),
296 bti.into(),
297 );
298 llvm::LLVMRustAddModuleFlag(
299 llmod,
300 behavior,
301 "sign-return-address\0".as_ptr().cast(),
302 pac_ret.is_some().into(),
303 );
304 let pac_opts = pac_ret.unwrap_or(PacRet { leaf: false, key: PAuthKey::A });
305 llvm::LLVMRustAddModuleFlag(
306 llmod,
307 behavior,
308 "sign-return-address-all\0".as_ptr().cast(),
309 pac_opts.leaf.into(),
310 );
311 llvm::LLVMRustAddModuleFlag(
312 llmod,
313 behavior,
314 "sign-return-address-with-bkey\0".as_ptr().cast(),
315 u32::from(pac_opts.key == PAuthKey::B),
316 );
317 } else {
318 bug!(
319 "branch-protection used on non-AArch64 target; \
320 this should be checked in rustc_session."
321 );
322 }
323 }
324
325 // Pass on the control-flow protection flags to LLVM (equivalent to `-fcf-protection` in Clang).
326 if let CFProtection::Branch | CFProtection::Full = sess.opts.unstable_opts.cf_protection {
327 llvm::LLVMRustAddModuleFlag(
328 llmod,
329 llvm::LLVMModFlagBehavior::Override,
330 "cf-protection-branch\0".as_ptr().cast(),
331 1,
332 )
333 }
334 if let CFProtection::Return | CFProtection::Full = sess.opts.unstable_opts.cf_protection {
335 llvm::LLVMRustAddModuleFlag(
336 llmod,
337 llvm::LLVMModFlagBehavior::Override,
338 "cf-protection-return\0".as_ptr().cast(),
339 1,
340 )
341 }
342
343 if sess.opts.unstable_opts.virtual_function_elimination {
344 llvm::LLVMRustAddModuleFlag(
345 llmod,
346 llvm::LLVMModFlagBehavior::Error,
347 "Virtual Function Elim\0".as_ptr().cast(),
348 1,
349 );
350 }
351
352 llmod
353 }
354
355 impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> {
new( tcx: TyCtxt<'tcx>, codegen_unit: &'tcx CodegenUnit<'tcx>, llvm_module: &'ll crate::ModuleLlvm, ) -> Self356 pub(crate) fn new(
357 tcx: TyCtxt<'tcx>,
358 codegen_unit: &'tcx CodegenUnit<'tcx>,
359 llvm_module: &'ll crate::ModuleLlvm,
360 ) -> Self {
361 // An interesting part of Windows which MSVC forces our hand on (and
362 // apparently MinGW didn't) is the usage of `dllimport` and `dllexport`
363 // attributes in LLVM IR as well as native dependencies (in C these
364 // correspond to `__declspec(dllimport)`).
365 //
366 // LD (BFD) in MinGW mode can often correctly guess `dllexport` but
367 // relying on that can result in issues like #50176.
368 // LLD won't support that and expects symbols with proper attributes.
369 // Because of that we make MinGW target emit dllexport just like MSVC.
370 // When it comes to dllimport we use it for constants but for functions
371 // rely on the linker to do the right thing. Opposed to dllexport this
372 // task is easy for them (both LD and LLD) and allows us to easily use
373 // symbols from static libraries in shared libraries.
374 //
375 // Whenever a dynamic library is built on Windows it must have its public
376 // interface specified by functions tagged with `dllexport` or otherwise
377 // they're not available to be linked against. This poses a few problems
378 // for the compiler, some of which are somewhat fundamental, but we use
379 // the `use_dll_storage_attrs` variable below to attach the `dllexport`
380 // attribute to all LLVM functions that are exported e.g., they're
381 // already tagged with external linkage). This is suboptimal for a few
382 // reasons:
383 //
384 // * If an object file will never be included in a dynamic library,
385 // there's no need to attach the dllexport attribute. Most object
386 // files in Rust are not destined to become part of a dll as binaries
387 // are statically linked by default.
388 // * If the compiler is emitting both an rlib and a dylib, the same
389 // source object file is currently used but with MSVC this may be less
390 // feasible. The compiler may be able to get around this, but it may
391 // involve some invasive changes to deal with this.
392 //
393 // The flip side of this situation is that whenever you link to a dll and
394 // you import a function from it, the import should be tagged with
395 // `dllimport`. At this time, however, the compiler does not emit
396 // `dllimport` for any declarations other than constants (where it is
397 // required), which is again suboptimal for even more reasons!
398 //
399 // * Calling a function imported from another dll without using
400 // `dllimport` causes the linker/compiler to have extra overhead (one
401 // `jmp` instruction on x86) when calling the function.
402 // * The same object file may be used in different circumstances, so a
403 // function may be imported from a dll if the object is linked into a
404 // dll, but it may be just linked against if linked into an rlib.
405 // * The compiler has no knowledge about whether native functions should
406 // be tagged dllimport or not.
407 //
408 // For now the compiler takes the perf hit (I do not have any numbers to
409 // this effect) by marking very little as `dllimport` and praying the
410 // linker will take care of everything. Fixing this problem will likely
411 // require adding a few attributes to Rust itself (feature gated at the
412 // start) and then strongly recommending static linkage on Windows!
413 let use_dll_storage_attrs = tcx.sess.target.is_like_windows;
414
415 let check_overflow = tcx.sess.overflow_checks();
416
417 let tls_model = to_llvm_tls_model(tcx.sess.tls_model());
418
419 let (llcx, llmod) = (&*llvm_module.llcx, llvm_module.llmod());
420
421 let coverage_cx =
422 tcx.sess.instrument_coverage().then(coverageinfo::CrateCoverageContext::new);
423
424 let dbg_cx = if tcx.sess.opts.debuginfo != DebugInfo::None {
425 let dctx = debuginfo::CodegenUnitDebugContext::new(llmod);
426 debuginfo::metadata::build_compile_unit_di_node(
427 tcx,
428 codegen_unit.name().as_str(),
429 &dctx,
430 );
431 Some(dctx)
432 } else {
433 None
434 };
435
436 let isize_ty = Type::ix_llcx(llcx, tcx.data_layout.pointer_size.bits());
437
438 CodegenCx {
439 tcx,
440 check_overflow,
441 use_dll_storage_attrs,
442 tls_model,
443 llmod,
444 llcx,
445 codegen_unit,
446 instances: Default::default(),
447 vtables: Default::default(),
448 const_str_cache: Default::default(),
449 const_unsized: Default::default(),
450 const_globals: Default::default(),
451 statics_to_rauw: RefCell::new(Vec::new()),
452 used_statics: RefCell::new(Vec::new()),
453 compiler_used_statics: RefCell::new(Vec::new()),
454 type_lowering: Default::default(),
455 scalar_lltypes: Default::default(),
456 pointee_infos: Default::default(),
457 isize_ty,
458 coverage_cx,
459 dbg_cx,
460 eh_personality: Cell::new(None),
461 eh_catch_typeinfo: Cell::new(None),
462 rust_try_fn: Cell::new(None),
463 intrinsics: Default::default(),
464 local_gen_sym_counter: Cell::new(0),
465 renamed_statics: Default::default(),
466 }
467 }
468
statics_to_rauw(&self) -> &RefCell<Vec<(&'ll Value, &'ll Value)>>469 pub(crate) fn statics_to_rauw(&self) -> &RefCell<Vec<(&'ll Value, &'ll Value)>> {
470 &self.statics_to_rauw
471 }
472
473 #[inline]
coverage_context(&self) -> Option<&coverageinfo::CrateCoverageContext<'ll, 'tcx>>474 pub fn coverage_context(&self) -> Option<&coverageinfo::CrateCoverageContext<'ll, 'tcx>> {
475 self.coverage_cx.as_ref()
476 }
477
create_used_variable_impl(&self, name: &'static CStr, values: &[&'ll Value])478 pub(crate) fn create_used_variable_impl(&self, name: &'static CStr, values: &[&'ll Value]) {
479 let section = cstr!("llvm.metadata");
480 let array = self.const_array(self.type_ptr_to(self.type_i8()), values);
481
482 unsafe {
483 let g = llvm::LLVMAddGlobal(self.llmod, self.val_ty(array), name.as_ptr());
484 llvm::LLVMSetInitializer(g, array);
485 llvm::LLVMRustSetLinkage(g, llvm::Linkage::AppendingLinkage);
486 llvm::LLVMSetSection(g, section.as_ptr());
487 }
488 }
489 }
490
491 impl<'ll, 'tcx> MiscMethods<'tcx> for CodegenCx<'ll, 'tcx> {
vtables( &self, ) -> &RefCell<FxHashMap<(Ty<'tcx>, Option<ty::PolyExistentialTraitRef<'tcx>>), &'ll Value>>492 fn vtables(
493 &self,
494 ) -> &RefCell<FxHashMap<(Ty<'tcx>, Option<ty::PolyExistentialTraitRef<'tcx>>), &'ll Value>>
495 {
496 &self.vtables
497 }
498
get_fn(&self, instance: Instance<'tcx>) -> &'ll Value499 fn get_fn(&self, instance: Instance<'tcx>) -> &'ll Value {
500 get_fn(self, instance)
501 }
502
get_fn_addr(&self, instance: Instance<'tcx>) -> &'ll Value503 fn get_fn_addr(&self, instance: Instance<'tcx>) -> &'ll Value {
504 get_fn(self, instance)
505 }
506
eh_personality(&self) -> &'ll Value507 fn eh_personality(&self) -> &'ll Value {
508 // The exception handling personality function.
509 //
510 // If our compilation unit has the `eh_personality` lang item somewhere
511 // within it, then we just need to codegen that. Otherwise, we're
512 // building an rlib which will depend on some upstream implementation of
513 // this function, so we just codegen a generic reference to it. We don't
514 // specify any of the types for the function, we just make it a symbol
515 // that LLVM can later use.
516 //
517 // Note that MSVC is a little special here in that we don't use the
518 // `eh_personality` lang item at all. Currently LLVM has support for
519 // both Dwarf and SEH unwind mechanisms for MSVC targets and uses the
520 // *name of the personality function* to decide what kind of unwind side
521 // tables/landing pads to emit. It looks like Dwarf is used by default,
522 // injecting a dependency on the `_Unwind_Resume` symbol for resuming
523 // an "exception", but for MSVC we want to force SEH. This means that we
524 // can't actually have the personality function be our standard
525 // `rust_eh_personality` function, but rather we wired it up to the
526 // CRT's custom personality function, which forces LLVM to consider
527 // landing pads as "landing pads for SEH".
528 if let Some(llpersonality) = self.eh_personality.get() {
529 return llpersonality;
530 }
531
532 let name = if wants_msvc_seh(self.sess()) {
533 Some("__CxxFrameHandler3")
534 } else if wants_wasm_eh(self.sess()) {
535 // LLVM specifically tests for the name of the personality function
536 // There is no need for this function to exist anywhere, it will
537 // not be called. However, its name has to be "__gxx_wasm_personality_v0"
538 // for native wasm exceptions.
539 Some("__gxx_wasm_personality_v0")
540 } else {
541 None
542 };
543
544 let tcx = self.tcx;
545 let llfn = match tcx.lang_items().eh_personality() {
546 Some(def_id) if name.is_none() => self.get_fn_addr(
547 ty::Instance::resolve(tcx, ty::ParamEnv::reveal_all(), def_id, ty::List::empty())
548 .unwrap()
549 .unwrap(),
550 ),
551 _ => {
552 let name = name.unwrap_or("rust_eh_personality");
553 if let Some(llfn) = self.get_declared_value(name) {
554 llfn
555 } else {
556 let fty = self.type_variadic_func(&[], self.type_i32());
557 let llfn = self.declare_cfn(name, llvm::UnnamedAddr::Global, fty);
558 let target_cpu = attributes::target_cpu_attr(self);
559 attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &[target_cpu]);
560 llfn
561 }
562 }
563 };
564 self.eh_personality.set(Some(llfn));
565 llfn
566 }
567
sess(&self) -> &Session568 fn sess(&self) -> &Session {
569 self.tcx.sess
570 }
571
check_overflow(&self) -> bool572 fn check_overflow(&self) -> bool {
573 self.check_overflow
574 }
575
codegen_unit(&self) -> &'tcx CodegenUnit<'tcx>576 fn codegen_unit(&self) -> &'tcx CodegenUnit<'tcx> {
577 self.codegen_unit
578 }
579
set_frame_pointer_type(&self, llfn: &'ll Value)580 fn set_frame_pointer_type(&self, llfn: &'ll Value) {
581 if let Some(attr) = attributes::frame_pointer_type_attr(self) {
582 attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &[attr]);
583 }
584 }
585
apply_target_cpu_attr(&self, llfn: &'ll Value)586 fn apply_target_cpu_attr(&self, llfn: &'ll Value) {
587 let mut attrs = SmallVec::<[_; 2]>::new();
588 attrs.push(attributes::target_cpu_attr(self));
589 attrs.extend(attributes::tune_cpu_attr(self));
590 attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &attrs);
591 }
592
declare_c_main(&self, fn_type: Self::Type) -> Option<Self::Function>593 fn declare_c_main(&self, fn_type: Self::Type) -> Option<Self::Function> {
594 let entry_name = self.sess().target.entry_name.as_ref();
595 if self.get_declared_value(entry_name).is_none() {
596 Some(self.declare_entry_fn(
597 entry_name,
598 self.sess().target.entry_abi.into(),
599 llvm::UnnamedAddr::Global,
600 fn_type,
601 ))
602 } else {
603 // If the symbol already exists, it is an error: for example, the user wrote
604 // #[no_mangle] extern "C" fn main(..) {..}
605 // instead of #[start]
606 None
607 }
608 }
609 }
610
611 impl<'ll> CodegenCx<'ll, '_> {
get_intrinsic(&self, key: &str) -> (&'ll Type, &'ll Value)612 pub(crate) fn get_intrinsic(&self, key: &str) -> (&'ll Type, &'ll Value) {
613 if let Some(v) = self.intrinsics.borrow().get(key).cloned() {
614 return v;
615 }
616
617 self.declare_intrinsic(key).unwrap_or_else(|| bug!("unknown intrinsic '{}'", key))
618 }
619
insert_intrinsic( &self, name: &'static str, args: Option<&[&'ll llvm::Type]>, ret: &'ll llvm::Type, ) -> (&'ll llvm::Type, &'ll llvm::Value)620 fn insert_intrinsic(
621 &self,
622 name: &'static str,
623 args: Option<&[&'ll llvm::Type]>,
624 ret: &'ll llvm::Type,
625 ) -> (&'ll llvm::Type, &'ll llvm::Value) {
626 let fn_ty = if let Some(args) = args {
627 self.type_func(args, ret)
628 } else {
629 self.type_variadic_func(&[], ret)
630 };
631 let f = self.declare_cfn(name, llvm::UnnamedAddr::No, fn_ty);
632 self.intrinsics.borrow_mut().insert(name, (fn_ty, f));
633 (fn_ty, f)
634 }
635
declare_intrinsic(&self, key: &str) -> Option<(&'ll Type, &'ll Value)>636 fn declare_intrinsic(&self, key: &str) -> Option<(&'ll Type, &'ll Value)> {
637 macro_rules! ifn {
638 ($name:expr, fn() -> $ret:expr) => (
639 if key == $name {
640 return Some(self.insert_intrinsic($name, Some(&[]), $ret));
641 }
642 );
643 ($name:expr, fn(...) -> $ret:expr) => (
644 if key == $name {
645 return Some(self.insert_intrinsic($name, None, $ret));
646 }
647 );
648 ($name:expr, fn($($arg:expr),*) -> $ret:expr) => (
649 if key == $name {
650 return Some(self.insert_intrinsic($name, Some(&[$($arg),*]), $ret));
651 }
652 );
653 }
654 macro_rules! mk_struct {
655 ($($field_ty:expr),*) => (self.type_struct( &[$($field_ty),*], false))
656 }
657
658 let i8p = self.type_i8p();
659 let void = self.type_void();
660 let i1 = self.type_i1();
661 let t_i8 = self.type_i8();
662 let t_i16 = self.type_i16();
663 let t_i32 = self.type_i32();
664 let t_i64 = self.type_i64();
665 let t_i128 = self.type_i128();
666 let t_isize = self.type_isize();
667 let t_f32 = self.type_f32();
668 let t_f64 = self.type_f64();
669 let t_metadata = self.type_metadata();
670 let t_token = self.type_token();
671
672 ifn!("llvm.wasm.get.exception", fn(t_token) -> i8p);
673 ifn!("llvm.wasm.get.ehselector", fn(t_token) -> t_i32);
674
675 ifn!("llvm.wasm.trunc.unsigned.i32.f32", fn(t_f32) -> t_i32);
676 ifn!("llvm.wasm.trunc.unsigned.i32.f64", fn(t_f64) -> t_i32);
677 ifn!("llvm.wasm.trunc.unsigned.i64.f32", fn(t_f32) -> t_i64);
678 ifn!("llvm.wasm.trunc.unsigned.i64.f64", fn(t_f64) -> t_i64);
679 ifn!("llvm.wasm.trunc.signed.i32.f32", fn(t_f32) -> t_i32);
680 ifn!("llvm.wasm.trunc.signed.i32.f64", fn(t_f64) -> t_i32);
681 ifn!("llvm.wasm.trunc.signed.i64.f32", fn(t_f32) -> t_i64);
682 ifn!("llvm.wasm.trunc.signed.i64.f64", fn(t_f64) -> t_i64);
683
684 ifn!("llvm.fptosi.sat.i8.f32", fn(t_f32) -> t_i8);
685 ifn!("llvm.fptosi.sat.i16.f32", fn(t_f32) -> t_i16);
686 ifn!("llvm.fptosi.sat.i32.f32", fn(t_f32) -> t_i32);
687 ifn!("llvm.fptosi.sat.i64.f32", fn(t_f32) -> t_i64);
688 ifn!("llvm.fptosi.sat.i128.f32", fn(t_f32) -> t_i128);
689 ifn!("llvm.fptosi.sat.i8.f64", fn(t_f64) -> t_i8);
690 ifn!("llvm.fptosi.sat.i16.f64", fn(t_f64) -> t_i16);
691 ifn!("llvm.fptosi.sat.i32.f64", fn(t_f64) -> t_i32);
692 ifn!("llvm.fptosi.sat.i64.f64", fn(t_f64) -> t_i64);
693 ifn!("llvm.fptosi.sat.i128.f64", fn(t_f64) -> t_i128);
694
695 ifn!("llvm.fptoui.sat.i8.f32", fn(t_f32) -> t_i8);
696 ifn!("llvm.fptoui.sat.i16.f32", fn(t_f32) -> t_i16);
697 ifn!("llvm.fptoui.sat.i32.f32", fn(t_f32) -> t_i32);
698 ifn!("llvm.fptoui.sat.i64.f32", fn(t_f32) -> t_i64);
699 ifn!("llvm.fptoui.sat.i128.f32", fn(t_f32) -> t_i128);
700 ifn!("llvm.fptoui.sat.i8.f64", fn(t_f64) -> t_i8);
701 ifn!("llvm.fptoui.sat.i16.f64", fn(t_f64) -> t_i16);
702 ifn!("llvm.fptoui.sat.i32.f64", fn(t_f64) -> t_i32);
703 ifn!("llvm.fptoui.sat.i64.f64", fn(t_f64) -> t_i64);
704 ifn!("llvm.fptoui.sat.i128.f64", fn(t_f64) -> t_i128);
705
706 ifn!("llvm.trap", fn() -> void);
707 ifn!("llvm.debugtrap", fn() -> void);
708 ifn!("llvm.frameaddress", fn(t_i32) -> i8p);
709
710 ifn!("llvm.powi.f32", fn(t_f32, t_i32) -> t_f32);
711 ifn!("llvm.powi.f64", fn(t_f64, t_i32) -> t_f64);
712
713 ifn!("llvm.pow.f32", fn(t_f32, t_f32) -> t_f32);
714 ifn!("llvm.pow.f64", fn(t_f64, t_f64) -> t_f64);
715
716 ifn!("llvm.sqrt.f32", fn(t_f32) -> t_f32);
717 ifn!("llvm.sqrt.f64", fn(t_f64) -> t_f64);
718
719 ifn!("llvm.sin.f32", fn(t_f32) -> t_f32);
720 ifn!("llvm.sin.f64", fn(t_f64) -> t_f64);
721
722 ifn!("llvm.cos.f32", fn(t_f32) -> t_f32);
723 ifn!("llvm.cos.f64", fn(t_f64) -> t_f64);
724
725 ifn!("llvm.exp.f32", fn(t_f32) -> t_f32);
726 ifn!("llvm.exp.f64", fn(t_f64) -> t_f64);
727
728 ifn!("llvm.exp2.f32", fn(t_f32) -> t_f32);
729 ifn!("llvm.exp2.f64", fn(t_f64) -> t_f64);
730
731 ifn!("llvm.log.f32", fn(t_f32) -> t_f32);
732 ifn!("llvm.log.f64", fn(t_f64) -> t_f64);
733
734 ifn!("llvm.log10.f32", fn(t_f32) -> t_f32);
735 ifn!("llvm.log10.f64", fn(t_f64) -> t_f64);
736
737 ifn!("llvm.log2.f32", fn(t_f32) -> t_f32);
738 ifn!("llvm.log2.f64", fn(t_f64) -> t_f64);
739
740 ifn!("llvm.fma.f32", fn(t_f32, t_f32, t_f32) -> t_f32);
741 ifn!("llvm.fma.f64", fn(t_f64, t_f64, t_f64) -> t_f64);
742
743 ifn!("llvm.fabs.f32", fn(t_f32) -> t_f32);
744 ifn!("llvm.fabs.f64", fn(t_f64) -> t_f64);
745
746 ifn!("llvm.minnum.f32", fn(t_f32, t_f32) -> t_f32);
747 ifn!("llvm.minnum.f64", fn(t_f64, t_f64) -> t_f64);
748 ifn!("llvm.maxnum.f32", fn(t_f32, t_f32) -> t_f32);
749 ifn!("llvm.maxnum.f64", fn(t_f64, t_f64) -> t_f64);
750
751 ifn!("llvm.floor.f32", fn(t_f32) -> t_f32);
752 ifn!("llvm.floor.f64", fn(t_f64) -> t_f64);
753
754 ifn!("llvm.ceil.f32", fn(t_f32) -> t_f32);
755 ifn!("llvm.ceil.f64", fn(t_f64) -> t_f64);
756
757 ifn!("llvm.trunc.f32", fn(t_f32) -> t_f32);
758 ifn!("llvm.trunc.f64", fn(t_f64) -> t_f64);
759
760 ifn!("llvm.copysign.f32", fn(t_f32, t_f32) -> t_f32);
761 ifn!("llvm.copysign.f64", fn(t_f64, t_f64) -> t_f64);
762
763 ifn!("llvm.round.f32", fn(t_f32) -> t_f32);
764 ifn!("llvm.round.f64", fn(t_f64) -> t_f64);
765
766 ifn!("llvm.roundeven.f32", fn(t_f32) -> t_f32);
767 ifn!("llvm.roundeven.f64", fn(t_f64) -> t_f64);
768
769 ifn!("llvm.rint.f32", fn(t_f32) -> t_f32);
770 ifn!("llvm.rint.f64", fn(t_f64) -> t_f64);
771 ifn!("llvm.nearbyint.f32", fn(t_f32) -> t_f32);
772 ifn!("llvm.nearbyint.f64", fn(t_f64) -> t_f64);
773
774 ifn!("llvm.ctpop.i8", fn(t_i8) -> t_i8);
775 ifn!("llvm.ctpop.i16", fn(t_i16) -> t_i16);
776 ifn!("llvm.ctpop.i32", fn(t_i32) -> t_i32);
777 ifn!("llvm.ctpop.i64", fn(t_i64) -> t_i64);
778 ifn!("llvm.ctpop.i128", fn(t_i128) -> t_i128);
779
780 ifn!("llvm.ctlz.i8", fn(t_i8, i1) -> t_i8);
781 ifn!("llvm.ctlz.i16", fn(t_i16, i1) -> t_i16);
782 ifn!("llvm.ctlz.i32", fn(t_i32, i1) -> t_i32);
783 ifn!("llvm.ctlz.i64", fn(t_i64, i1) -> t_i64);
784 ifn!("llvm.ctlz.i128", fn(t_i128, i1) -> t_i128);
785
786 ifn!("llvm.cttz.i8", fn(t_i8, i1) -> t_i8);
787 ifn!("llvm.cttz.i16", fn(t_i16, i1) -> t_i16);
788 ifn!("llvm.cttz.i32", fn(t_i32, i1) -> t_i32);
789 ifn!("llvm.cttz.i64", fn(t_i64, i1) -> t_i64);
790 ifn!("llvm.cttz.i128", fn(t_i128, i1) -> t_i128);
791
792 ifn!("llvm.bswap.i16", fn(t_i16) -> t_i16);
793 ifn!("llvm.bswap.i32", fn(t_i32) -> t_i32);
794 ifn!("llvm.bswap.i64", fn(t_i64) -> t_i64);
795 ifn!("llvm.bswap.i128", fn(t_i128) -> t_i128);
796
797 ifn!("llvm.bitreverse.i8", fn(t_i8) -> t_i8);
798 ifn!("llvm.bitreverse.i16", fn(t_i16) -> t_i16);
799 ifn!("llvm.bitreverse.i32", fn(t_i32) -> t_i32);
800 ifn!("llvm.bitreverse.i64", fn(t_i64) -> t_i64);
801 ifn!("llvm.bitreverse.i128", fn(t_i128) -> t_i128);
802
803 ifn!("llvm.fshl.i8", fn(t_i8, t_i8, t_i8) -> t_i8);
804 ifn!("llvm.fshl.i16", fn(t_i16, t_i16, t_i16) -> t_i16);
805 ifn!("llvm.fshl.i32", fn(t_i32, t_i32, t_i32) -> t_i32);
806 ifn!("llvm.fshl.i64", fn(t_i64, t_i64, t_i64) -> t_i64);
807 ifn!("llvm.fshl.i128", fn(t_i128, t_i128, t_i128) -> t_i128);
808
809 ifn!("llvm.fshr.i8", fn(t_i8, t_i8, t_i8) -> t_i8);
810 ifn!("llvm.fshr.i16", fn(t_i16, t_i16, t_i16) -> t_i16);
811 ifn!("llvm.fshr.i32", fn(t_i32, t_i32, t_i32) -> t_i32);
812 ifn!("llvm.fshr.i64", fn(t_i64, t_i64, t_i64) -> t_i64);
813 ifn!("llvm.fshr.i128", fn(t_i128, t_i128, t_i128) -> t_i128);
814
815 ifn!("llvm.sadd.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct! {t_i8, i1});
816 ifn!("llvm.sadd.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct! {t_i16, i1});
817 ifn!("llvm.sadd.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct! {t_i32, i1});
818 ifn!("llvm.sadd.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct! {t_i64, i1});
819 ifn!("llvm.sadd.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct! {t_i128, i1});
820
821 ifn!("llvm.uadd.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct! {t_i8, i1});
822 ifn!("llvm.uadd.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct! {t_i16, i1});
823 ifn!("llvm.uadd.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct! {t_i32, i1});
824 ifn!("llvm.uadd.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct! {t_i64, i1});
825 ifn!("llvm.uadd.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct! {t_i128, i1});
826
827 ifn!("llvm.ssub.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct! {t_i8, i1});
828 ifn!("llvm.ssub.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct! {t_i16, i1});
829 ifn!("llvm.ssub.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct! {t_i32, i1});
830 ifn!("llvm.ssub.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct! {t_i64, i1});
831 ifn!("llvm.ssub.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct! {t_i128, i1});
832
833 ifn!("llvm.usub.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct! {t_i8, i1});
834 ifn!("llvm.usub.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct! {t_i16, i1});
835 ifn!("llvm.usub.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct! {t_i32, i1});
836 ifn!("llvm.usub.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct! {t_i64, i1});
837 ifn!("llvm.usub.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct! {t_i128, i1});
838
839 ifn!("llvm.smul.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct! {t_i8, i1});
840 ifn!("llvm.smul.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct! {t_i16, i1});
841 ifn!("llvm.smul.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct! {t_i32, i1});
842 ifn!("llvm.smul.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct! {t_i64, i1});
843 ifn!("llvm.smul.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct! {t_i128, i1});
844
845 ifn!("llvm.umul.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct! {t_i8, i1});
846 ifn!("llvm.umul.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct! {t_i16, i1});
847 ifn!("llvm.umul.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct! {t_i32, i1});
848 ifn!("llvm.umul.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct! {t_i64, i1});
849 ifn!("llvm.umul.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct! {t_i128, i1});
850
851 ifn!("llvm.sadd.sat.i8", fn(t_i8, t_i8) -> t_i8);
852 ifn!("llvm.sadd.sat.i16", fn(t_i16, t_i16) -> t_i16);
853 ifn!("llvm.sadd.sat.i32", fn(t_i32, t_i32) -> t_i32);
854 ifn!("llvm.sadd.sat.i64", fn(t_i64, t_i64) -> t_i64);
855 ifn!("llvm.sadd.sat.i128", fn(t_i128, t_i128) -> t_i128);
856
857 ifn!("llvm.uadd.sat.i8", fn(t_i8, t_i8) -> t_i8);
858 ifn!("llvm.uadd.sat.i16", fn(t_i16, t_i16) -> t_i16);
859 ifn!("llvm.uadd.sat.i32", fn(t_i32, t_i32) -> t_i32);
860 ifn!("llvm.uadd.sat.i64", fn(t_i64, t_i64) -> t_i64);
861 ifn!("llvm.uadd.sat.i128", fn(t_i128, t_i128) -> t_i128);
862
863 ifn!("llvm.ssub.sat.i8", fn(t_i8, t_i8) -> t_i8);
864 ifn!("llvm.ssub.sat.i16", fn(t_i16, t_i16) -> t_i16);
865 ifn!("llvm.ssub.sat.i32", fn(t_i32, t_i32) -> t_i32);
866 ifn!("llvm.ssub.sat.i64", fn(t_i64, t_i64) -> t_i64);
867 ifn!("llvm.ssub.sat.i128", fn(t_i128, t_i128) -> t_i128);
868
869 ifn!("llvm.usub.sat.i8", fn(t_i8, t_i8) -> t_i8);
870 ifn!("llvm.usub.sat.i16", fn(t_i16, t_i16) -> t_i16);
871 ifn!("llvm.usub.sat.i32", fn(t_i32, t_i32) -> t_i32);
872 ifn!("llvm.usub.sat.i64", fn(t_i64, t_i64) -> t_i64);
873 ifn!("llvm.usub.sat.i128", fn(t_i128, t_i128) -> t_i128);
874
875 ifn!("llvm.lifetime.start.p0i8", fn(t_i64, i8p) -> void);
876 ifn!("llvm.lifetime.end.p0i8", fn(t_i64, i8p) -> void);
877
878 ifn!("llvm.expect.i1", fn(i1, i1) -> i1);
879 ifn!("llvm.eh.typeid.for", fn(i8p) -> t_i32);
880 ifn!("llvm.localescape", fn(...) -> void);
881 ifn!("llvm.localrecover", fn(i8p, i8p, t_i32) -> i8p);
882 ifn!("llvm.x86.seh.recoverfp", fn(i8p, i8p) -> i8p);
883
884 ifn!("llvm.assume", fn(i1) -> void);
885 ifn!("llvm.prefetch", fn(i8p, t_i32, t_i32, t_i32) -> void);
886
887 // This isn't an "LLVM intrinsic", but LLVM's optimization passes
888 // recognize it like one and we assume it exists in `core::slice::cmp`
889 match self.sess().target.arch.as_ref() {
890 "avr" | "msp430" => ifn!("memcmp", fn(i8p, i8p, t_isize) -> t_i16),
891 _ => ifn!("memcmp", fn(i8p, i8p, t_isize) -> t_i32),
892 }
893
894 // variadic intrinsics
895 ifn!("llvm.va_start", fn(i8p) -> void);
896 ifn!("llvm.va_end", fn(i8p) -> void);
897 ifn!("llvm.va_copy", fn(i8p, i8p) -> void);
898
899 if self.sess().instrument_coverage() {
900 ifn!("llvm.instrprof.increment", fn(i8p, t_i64, t_i32, t_i32) -> void);
901 }
902
903 ifn!("llvm.type.test", fn(i8p, t_metadata) -> i1);
904 ifn!("llvm.type.checked.load", fn(i8p, t_i32, t_metadata) -> mk_struct! {i8p, i1});
905
906 if self.sess().opts.debuginfo != DebugInfo::None {
907 ifn!("llvm.dbg.declare", fn(t_metadata, t_metadata) -> void);
908 ifn!("llvm.dbg.value", fn(t_metadata, t_i64, t_metadata) -> void);
909 }
910
911 ifn!("llvm.ptrmask", fn(i8p, t_isize) -> i8p);
912
913 None
914 }
915
eh_catch_typeinfo(&self) -> &'ll Value916 pub(crate) fn eh_catch_typeinfo(&self) -> &'ll Value {
917 if let Some(eh_catch_typeinfo) = self.eh_catch_typeinfo.get() {
918 return eh_catch_typeinfo;
919 }
920 let tcx = self.tcx;
921 assert!(self.sess().target.os == "emscripten");
922 let eh_catch_typeinfo = match tcx.lang_items().eh_catch_typeinfo() {
923 Some(def_id) => self.get_static(def_id),
924 _ => {
925 let ty = self
926 .type_struct(&[self.type_ptr_to(self.type_isize()), self.type_i8p()], false);
927 self.declare_global("rust_eh_catch_typeinfo", ty)
928 }
929 };
930 let eh_catch_typeinfo = self.const_bitcast(eh_catch_typeinfo, self.type_i8p());
931 self.eh_catch_typeinfo.set(Some(eh_catch_typeinfo));
932 eh_catch_typeinfo
933 }
934 }
935
936 impl CodegenCx<'_, '_> {
937 /// Generates a new symbol name with the given prefix. This symbol name must
938 /// only be used for definitions with `internal` or `private` linkage.
generate_local_symbol_name(&self, prefix: &str) -> String939 pub fn generate_local_symbol_name(&self, prefix: &str) -> String {
940 let idx = self.local_gen_sym_counter.get();
941 self.local_gen_sym_counter.set(idx + 1);
942 // Include a '.' character, so there can be no accidental conflicts with
943 // user defined names
944 let mut name = String::with_capacity(prefix.len() + 6);
945 name.push_str(prefix);
946 name.push('.');
947 base_n::push_str(idx as u128, base_n::ALPHANUMERIC_ONLY, &mut name);
948 name
949 }
950 }
951
952 impl HasDataLayout for CodegenCx<'_, '_> {
953 #[inline]
data_layout(&self) -> &TargetDataLayout954 fn data_layout(&self) -> &TargetDataLayout {
955 &self.tcx.data_layout
956 }
957 }
958
959 impl HasTargetSpec for CodegenCx<'_, '_> {
960 #[inline]
target_spec(&self) -> &Target961 fn target_spec(&self) -> &Target {
962 &self.tcx.sess.target
963 }
964 }
965
966 impl<'tcx> ty::layout::HasTyCtxt<'tcx> for CodegenCx<'_, 'tcx> {
967 #[inline]
tcx(&self) -> TyCtxt<'tcx>968 fn tcx(&self) -> TyCtxt<'tcx> {
969 self.tcx
970 }
971 }
972
973 impl<'tcx, 'll> HasParamEnv<'tcx> for CodegenCx<'ll, 'tcx> {
param_env(&self) -> ty::ParamEnv<'tcx>974 fn param_env(&self) -> ty::ParamEnv<'tcx> {
975 ty::ParamEnv::reveal_all()
976 }
977 }
978
979 impl<'tcx> LayoutOfHelpers<'tcx> for CodegenCx<'_, 'tcx> {
980 type LayoutOfResult = TyAndLayout<'tcx>;
981
982 #[inline]
handle_layout_err(&self, err: LayoutError<'tcx>, span: Span, ty: Ty<'tcx>) -> !983 fn handle_layout_err(&self, err: LayoutError<'tcx>, span: Span, ty: Ty<'tcx>) -> ! {
984 if let LayoutError::SizeOverflow(_) = err {
985 self.sess().emit_fatal(Spanned { span, node: err.into_diagnostic() })
986 } else {
987 span_bug!(span, "failed to get layout for `{ty}`: {err:?}")
988 }
989 }
990 }
991
992 impl<'tcx> FnAbiOfHelpers<'tcx> for CodegenCx<'_, 'tcx> {
993 type FnAbiOfResult = &'tcx FnAbi<'tcx, Ty<'tcx>>;
994
995 #[inline]
handle_fn_abi_err( &self, err: FnAbiError<'tcx>, span: Span, fn_abi_request: FnAbiRequest<'tcx>, ) -> !996 fn handle_fn_abi_err(
997 &self,
998 err: FnAbiError<'tcx>,
999 span: Span,
1000 fn_abi_request: FnAbiRequest<'tcx>,
1001 ) -> ! {
1002 if let FnAbiError::Layout(LayoutError::SizeOverflow(_)) = err {
1003 self.sess().emit_fatal(Spanned { span, node: err })
1004 } else {
1005 match fn_abi_request {
1006 FnAbiRequest::OfFnPtr { sig, extra_args } => {
1007 span_bug!(span, "`fn_abi_of_fn_ptr({sig}, {extra_args:?})` failed: {err:?}",);
1008 }
1009 FnAbiRequest::OfInstance { instance, extra_args } => {
1010 span_bug!(
1011 span,
1012 "`fn_abi_of_instance({instance}, {extra_args:?})` failed: {err:?}",
1013 );
1014 }
1015 }
1016 }
1017 }
1018 }
1019