1 use crate::abi::FnAbiLlvmExt; 2 use crate::attributes; 3 use crate::common::Funclet; 4 use crate::context::CodegenCx; 5 use crate::llvm::{self, AtomicOrdering, AtomicRmwBinOp, BasicBlock, False, True}; 6 use crate::type_::Type; 7 use crate::type_of::LayoutLlvmExt; 8 use crate::value::Value; 9 use cstr::cstr; 10 use libc::{c_char, c_uint}; 11 use rustc_codegen_ssa::common::{IntPredicate, RealPredicate, SynchronizationScope, TypeKind}; 12 use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue}; 13 use rustc_codegen_ssa::mir::place::PlaceRef; 14 use rustc_codegen_ssa::traits::*; 15 use rustc_codegen_ssa::MemFlags; 16 use rustc_data_structures::small_c_str::SmallCStr; 17 use rustc_hir::def_id::DefId; 18 use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs; 19 use rustc_middle::ty::layout::{ 20 FnAbiError, FnAbiOfHelpers, FnAbiRequest, LayoutError, LayoutOfHelpers, TyAndLayout, 21 }; 22 use rustc_middle::ty::{self, Ty, TyCtxt}; 23 use rustc_span::Span; 24 use rustc_symbol_mangling::typeid::{kcfi_typeid_for_fnabi, typeid_for_fnabi, TypeIdOptions}; 25 use rustc_target::abi::{self, call::FnAbi, Align, Size, WrappingRange}; 26 use rustc_target::spec::{HasTargetSpec, SanitizerSet, Target}; 27 use smallvec::SmallVec; 28 use std::borrow::Cow; 29 use std::ffi::CStr; 30 use std::iter; 31 use std::ops::Deref; 32 use std::ptr; 33 34 // All Builders must have an llfn associated with them 35 #[must_use] 36 pub struct Builder<'a, 'll, 'tcx> { 37 pub llbuilder: &'ll mut llvm::Builder<'ll>, 38 pub cx: &'a CodegenCx<'ll, 'tcx>, 39 } 40 41 impl Drop for Builder<'_, '_, '_> { drop(&mut self)42 fn drop(&mut self) { 43 unsafe { 44 llvm::LLVMDisposeBuilder(&mut *(self.llbuilder as *mut _)); 45 } 46 } 47 } 48 49 // FIXME(eddyb) use a checked constructor when they become `const fn`. 50 const EMPTY_C_STR: &CStr = unsafe { CStr::from_bytes_with_nul_unchecked(b"\0") }; 51 52 /// Empty string, to be used where LLVM expects an instruction name, indicating 53 /// that the instruction is to be left unnamed (i.e. numbered, in textual IR). 54 // FIXME(eddyb) pass `&CStr` directly to FFI once it's a thin pointer. 55 const UNNAMED: *const c_char = EMPTY_C_STR.as_ptr(); 56 57 impl<'ll, 'tcx> BackendTypes for Builder<'_, 'll, 'tcx> { 58 type Value = <CodegenCx<'ll, 'tcx> as BackendTypes>::Value; 59 type Function = <CodegenCx<'ll, 'tcx> as BackendTypes>::Function; 60 type BasicBlock = <CodegenCx<'ll, 'tcx> as BackendTypes>::BasicBlock; 61 type Type = <CodegenCx<'ll, 'tcx> as BackendTypes>::Type; 62 type Funclet = <CodegenCx<'ll, 'tcx> as BackendTypes>::Funclet; 63 64 type DIScope = <CodegenCx<'ll, 'tcx> as BackendTypes>::DIScope; 65 type DILocation = <CodegenCx<'ll, 'tcx> as BackendTypes>::DILocation; 66 type DIVariable = <CodegenCx<'ll, 'tcx> as BackendTypes>::DIVariable; 67 } 68 69 impl abi::HasDataLayout for Builder<'_, '_, '_> { data_layout(&self) -> &abi::TargetDataLayout70 fn data_layout(&self) -> &abi::TargetDataLayout { 71 self.cx.data_layout() 72 } 73 } 74 75 impl<'tcx> ty::layout::HasTyCtxt<'tcx> for Builder<'_, '_, 'tcx> { 76 #[inline] tcx(&self) -> TyCtxt<'tcx>77 fn tcx(&self) -> TyCtxt<'tcx> { 78 self.cx.tcx 79 } 80 } 81 82 impl<'tcx> ty::layout::HasParamEnv<'tcx> for Builder<'_, '_, 'tcx> { param_env(&self) -> ty::ParamEnv<'tcx>83 fn param_env(&self) -> ty::ParamEnv<'tcx> { 84 self.cx.param_env() 85 } 86 } 87 88 impl HasTargetSpec for Builder<'_, '_, '_> { 89 #[inline] target_spec(&self) -> &Target90 fn target_spec(&self) -> &Target { 91 self.cx.target_spec() 92 } 93 } 94 95 impl<'tcx> LayoutOfHelpers<'tcx> for Builder<'_, '_, 'tcx> { 96 type LayoutOfResult = TyAndLayout<'tcx>; 97 98 #[inline] handle_layout_err(&self, err: LayoutError<'tcx>, span: Span, ty: Ty<'tcx>) -> !99 fn handle_layout_err(&self, err: LayoutError<'tcx>, span: Span, ty: Ty<'tcx>) -> ! { 100 self.cx.handle_layout_err(err, span, ty) 101 } 102 } 103 104 impl<'tcx> FnAbiOfHelpers<'tcx> for Builder<'_, '_, 'tcx> { 105 type FnAbiOfResult = &'tcx FnAbi<'tcx, Ty<'tcx>>; 106 107 #[inline] handle_fn_abi_err( &self, err: FnAbiError<'tcx>, span: Span, fn_abi_request: FnAbiRequest<'tcx>, ) -> !108 fn handle_fn_abi_err( 109 &self, 110 err: FnAbiError<'tcx>, 111 span: Span, 112 fn_abi_request: FnAbiRequest<'tcx>, 113 ) -> ! { 114 self.cx.handle_fn_abi_err(err, span, fn_abi_request) 115 } 116 } 117 118 impl<'ll, 'tcx> Deref for Builder<'_, 'll, 'tcx> { 119 type Target = CodegenCx<'ll, 'tcx>; 120 121 #[inline] deref(&self) -> &Self::Target122 fn deref(&self) -> &Self::Target { 123 self.cx 124 } 125 } 126 127 impl<'ll, 'tcx> HasCodegen<'tcx> for Builder<'_, 'll, 'tcx> { 128 type CodegenCx = CodegenCx<'ll, 'tcx>; 129 } 130 131 macro_rules! builder_methods_for_value_instructions { 132 ($($name:ident($($arg:ident),*) => $llvm_capi:ident),+ $(,)?) => { 133 $(fn $name(&mut self, $($arg: &'ll Value),*) -> &'ll Value { 134 unsafe { 135 llvm::$llvm_capi(self.llbuilder, $($arg,)* UNNAMED) 136 } 137 })+ 138 } 139 } 140 141 impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { build(cx: &'a CodegenCx<'ll, 'tcx>, llbb: &'ll BasicBlock) -> Self142 fn build(cx: &'a CodegenCx<'ll, 'tcx>, llbb: &'ll BasicBlock) -> Self { 143 let bx = Builder::with_cx(cx); 144 unsafe { 145 llvm::LLVMPositionBuilderAtEnd(bx.llbuilder, llbb); 146 } 147 bx 148 } 149 cx(&self) -> &CodegenCx<'ll, 'tcx>150 fn cx(&self) -> &CodegenCx<'ll, 'tcx> { 151 self.cx 152 } 153 llbb(&self) -> &'ll BasicBlock154 fn llbb(&self) -> &'ll BasicBlock { 155 unsafe { llvm::LLVMGetInsertBlock(self.llbuilder) } 156 } 157 set_span(&mut self, _span: Span)158 fn set_span(&mut self, _span: Span) {} 159 append_block(cx: &'a CodegenCx<'ll, 'tcx>, llfn: &'ll Value, name: &str) -> &'ll BasicBlock160 fn append_block(cx: &'a CodegenCx<'ll, 'tcx>, llfn: &'ll Value, name: &str) -> &'ll BasicBlock { 161 unsafe { 162 let name = SmallCStr::new(name); 163 llvm::LLVMAppendBasicBlockInContext(cx.llcx, llfn, name.as_ptr()) 164 } 165 } 166 append_sibling_block(&mut self, name: &str) -> &'ll BasicBlock167 fn append_sibling_block(&mut self, name: &str) -> &'ll BasicBlock { 168 Self::append_block(self.cx, self.llfn(), name) 169 } 170 switch_to_block(&mut self, llbb: Self::BasicBlock)171 fn switch_to_block(&mut self, llbb: Self::BasicBlock) { 172 *self = Self::build(self.cx, llbb) 173 } 174 ret_void(&mut self)175 fn ret_void(&mut self) { 176 unsafe { 177 llvm::LLVMBuildRetVoid(self.llbuilder); 178 } 179 } 180 ret(&mut self, v: &'ll Value)181 fn ret(&mut self, v: &'ll Value) { 182 unsafe { 183 llvm::LLVMBuildRet(self.llbuilder, v); 184 } 185 } 186 br(&mut self, dest: &'ll BasicBlock)187 fn br(&mut self, dest: &'ll BasicBlock) { 188 unsafe { 189 llvm::LLVMBuildBr(self.llbuilder, dest); 190 } 191 } 192 cond_br( &mut self, cond: &'ll Value, then_llbb: &'ll BasicBlock, else_llbb: &'ll BasicBlock, )193 fn cond_br( 194 &mut self, 195 cond: &'ll Value, 196 then_llbb: &'ll BasicBlock, 197 else_llbb: &'ll BasicBlock, 198 ) { 199 unsafe { 200 llvm::LLVMBuildCondBr(self.llbuilder, cond, then_llbb, else_llbb); 201 } 202 } 203 switch( &mut self, v: &'ll Value, else_llbb: &'ll BasicBlock, cases: impl ExactSizeIterator<Item = (u128, &'ll BasicBlock)>, )204 fn switch( 205 &mut self, 206 v: &'ll Value, 207 else_llbb: &'ll BasicBlock, 208 cases: impl ExactSizeIterator<Item = (u128, &'ll BasicBlock)>, 209 ) { 210 let switch = 211 unsafe { llvm::LLVMBuildSwitch(self.llbuilder, v, else_llbb, cases.len() as c_uint) }; 212 for (on_val, dest) in cases { 213 let on_val = self.const_uint_big(self.val_ty(v), on_val); 214 unsafe { llvm::LLVMAddCase(switch, on_val, dest) } 215 } 216 } 217 invoke( &mut self, llty: &'ll Type, fn_attrs: Option<&CodegenFnAttrs>, fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>, llfn: &'ll Value, args: &[&'ll Value], then: &'ll BasicBlock, catch: &'ll BasicBlock, funclet: Option<&Funclet<'ll>>, ) -> &'ll Value218 fn invoke( 219 &mut self, 220 llty: &'ll Type, 221 fn_attrs: Option<&CodegenFnAttrs>, 222 fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>, 223 llfn: &'ll Value, 224 args: &[&'ll Value], 225 then: &'ll BasicBlock, 226 catch: &'ll BasicBlock, 227 funclet: Option<&Funclet<'ll>>, 228 ) -> &'ll Value { 229 debug!("invoke {:?} with args ({:?})", llfn, args); 230 231 let args = self.check_call("invoke", llty, llfn, args); 232 let funclet_bundle = funclet.map(|funclet| funclet.bundle()); 233 let funclet_bundle = funclet_bundle.as_ref().map(|b| &*b.raw); 234 let mut bundles: SmallVec<[_; 2]> = SmallVec::new(); 235 if let Some(funclet_bundle) = funclet_bundle { 236 bundles.push(funclet_bundle); 237 } 238 239 // Emit CFI pointer type membership test 240 self.cfi_type_test(fn_attrs, fn_abi, llfn); 241 242 // Emit KCFI operand bundle 243 let kcfi_bundle = self.kcfi_operand_bundle(fn_attrs, fn_abi, llfn); 244 let kcfi_bundle = kcfi_bundle.as_ref().map(|b| &*b.raw); 245 if let Some(kcfi_bundle) = kcfi_bundle { 246 bundles.push(kcfi_bundle); 247 } 248 249 let invoke = unsafe { 250 llvm::LLVMRustBuildInvoke( 251 self.llbuilder, 252 llty, 253 llfn, 254 args.as_ptr(), 255 args.len() as c_uint, 256 then, 257 catch, 258 bundles.as_ptr(), 259 bundles.len() as c_uint, 260 UNNAMED, 261 ) 262 }; 263 if let Some(fn_abi) = fn_abi { 264 fn_abi.apply_attrs_callsite(self, invoke); 265 } 266 invoke 267 } 268 unreachable(&mut self)269 fn unreachable(&mut self) { 270 unsafe { 271 llvm::LLVMBuildUnreachable(self.llbuilder); 272 } 273 } 274 275 builder_methods_for_value_instructions! { 276 add(a, b) => LLVMBuildAdd, 277 fadd(a, b) => LLVMBuildFAdd, 278 sub(a, b) => LLVMBuildSub, 279 fsub(a, b) => LLVMBuildFSub, 280 mul(a, b) => LLVMBuildMul, 281 fmul(a, b) => LLVMBuildFMul, 282 udiv(a, b) => LLVMBuildUDiv, 283 exactudiv(a, b) => LLVMBuildExactUDiv, 284 sdiv(a, b) => LLVMBuildSDiv, 285 exactsdiv(a, b) => LLVMBuildExactSDiv, 286 fdiv(a, b) => LLVMBuildFDiv, 287 urem(a, b) => LLVMBuildURem, 288 srem(a, b) => LLVMBuildSRem, 289 frem(a, b) => LLVMBuildFRem, 290 shl(a, b) => LLVMBuildShl, 291 lshr(a, b) => LLVMBuildLShr, 292 ashr(a, b) => LLVMBuildAShr, 293 and(a, b) => LLVMBuildAnd, 294 or(a, b) => LLVMBuildOr, 295 xor(a, b) => LLVMBuildXor, 296 neg(x) => LLVMBuildNeg, 297 fneg(x) => LLVMBuildFNeg, 298 not(x) => LLVMBuildNot, 299 unchecked_sadd(x, y) => LLVMBuildNSWAdd, 300 unchecked_uadd(x, y) => LLVMBuildNUWAdd, 301 unchecked_ssub(x, y) => LLVMBuildNSWSub, 302 unchecked_usub(x, y) => LLVMBuildNUWSub, 303 unchecked_smul(x, y) => LLVMBuildNSWMul, 304 unchecked_umul(x, y) => LLVMBuildNUWMul, 305 } 306 fadd_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value307 fn fadd_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { 308 unsafe { 309 let instr = llvm::LLVMBuildFAdd(self.llbuilder, lhs, rhs, UNNAMED); 310 llvm::LLVMRustSetFastMath(instr); 311 instr 312 } 313 } 314 fsub_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value315 fn fsub_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { 316 unsafe { 317 let instr = llvm::LLVMBuildFSub(self.llbuilder, lhs, rhs, UNNAMED); 318 llvm::LLVMRustSetFastMath(instr); 319 instr 320 } 321 } 322 fmul_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value323 fn fmul_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { 324 unsafe { 325 let instr = llvm::LLVMBuildFMul(self.llbuilder, lhs, rhs, UNNAMED); 326 llvm::LLVMRustSetFastMath(instr); 327 instr 328 } 329 } 330 fdiv_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value331 fn fdiv_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { 332 unsafe { 333 let instr = llvm::LLVMBuildFDiv(self.llbuilder, lhs, rhs, UNNAMED); 334 llvm::LLVMRustSetFastMath(instr); 335 instr 336 } 337 } 338 frem_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value339 fn frem_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { 340 unsafe { 341 let instr = llvm::LLVMBuildFRem(self.llbuilder, lhs, rhs, UNNAMED); 342 llvm::LLVMRustSetFastMath(instr); 343 instr 344 } 345 } 346 checked_binop( &mut self, oop: OverflowOp, ty: Ty<'_>, lhs: Self::Value, rhs: Self::Value, ) -> (Self::Value, Self::Value)347 fn checked_binop( 348 &mut self, 349 oop: OverflowOp, 350 ty: Ty<'_>, 351 lhs: Self::Value, 352 rhs: Self::Value, 353 ) -> (Self::Value, Self::Value) { 354 use rustc_middle::ty::{Int, Uint}; 355 use rustc_middle::ty::{IntTy::*, UintTy::*}; 356 357 let new_kind = match ty.kind() { 358 Int(t @ Isize) => Int(t.normalize(self.tcx.sess.target.pointer_width)), 359 Uint(t @ Usize) => Uint(t.normalize(self.tcx.sess.target.pointer_width)), 360 t @ (Uint(_) | Int(_)) => t.clone(), 361 _ => panic!("tried to get overflow intrinsic for op applied to non-int type"), 362 }; 363 364 let name = match oop { 365 OverflowOp::Add => match new_kind { 366 Int(I8) => "llvm.sadd.with.overflow.i8", 367 Int(I16) => "llvm.sadd.with.overflow.i16", 368 Int(I32) => "llvm.sadd.with.overflow.i32", 369 Int(I64) => "llvm.sadd.with.overflow.i64", 370 Int(I128) => "llvm.sadd.with.overflow.i128", 371 372 Uint(U8) => "llvm.uadd.with.overflow.i8", 373 Uint(U16) => "llvm.uadd.with.overflow.i16", 374 Uint(U32) => "llvm.uadd.with.overflow.i32", 375 Uint(U64) => "llvm.uadd.with.overflow.i64", 376 Uint(U128) => "llvm.uadd.with.overflow.i128", 377 378 _ => unreachable!(), 379 }, 380 OverflowOp::Sub => match new_kind { 381 Int(I8) => "llvm.ssub.with.overflow.i8", 382 Int(I16) => "llvm.ssub.with.overflow.i16", 383 Int(I32) => "llvm.ssub.with.overflow.i32", 384 Int(I64) => "llvm.ssub.with.overflow.i64", 385 Int(I128) => "llvm.ssub.with.overflow.i128", 386 387 Uint(_) => { 388 // Emit sub and icmp instead of llvm.usub.with.overflow. LLVM considers these 389 // to be the canonical form. It will attempt to reform llvm.usub.with.overflow 390 // in the backend if profitable. 391 let sub = self.sub(lhs, rhs); 392 let cmp = self.icmp(IntPredicate::IntULT, lhs, rhs); 393 return (sub, cmp); 394 } 395 396 _ => unreachable!(), 397 }, 398 OverflowOp::Mul => match new_kind { 399 Int(I8) => "llvm.smul.with.overflow.i8", 400 Int(I16) => "llvm.smul.with.overflow.i16", 401 Int(I32) => "llvm.smul.with.overflow.i32", 402 Int(I64) => "llvm.smul.with.overflow.i64", 403 Int(I128) => "llvm.smul.with.overflow.i128", 404 405 Uint(U8) => "llvm.umul.with.overflow.i8", 406 Uint(U16) => "llvm.umul.with.overflow.i16", 407 Uint(U32) => "llvm.umul.with.overflow.i32", 408 Uint(U64) => "llvm.umul.with.overflow.i64", 409 Uint(U128) => "llvm.umul.with.overflow.i128", 410 411 _ => unreachable!(), 412 }, 413 }; 414 415 let res = self.call_intrinsic(name, &[lhs, rhs]); 416 (self.extract_value(res, 0), self.extract_value(res, 1)) 417 } 418 from_immediate(&mut self, val: Self::Value) -> Self::Value419 fn from_immediate(&mut self, val: Self::Value) -> Self::Value { 420 if self.cx().val_ty(val) == self.cx().type_i1() { 421 self.zext(val, self.cx().type_i8()) 422 } else { 423 val 424 } 425 } to_immediate_scalar(&mut self, val: Self::Value, scalar: abi::Scalar) -> Self::Value426 fn to_immediate_scalar(&mut self, val: Self::Value, scalar: abi::Scalar) -> Self::Value { 427 if scalar.is_bool() { 428 return self.trunc(val, self.cx().type_i1()); 429 } 430 val 431 } 432 alloca(&mut self, ty: &'ll Type, align: Align) -> &'ll Value433 fn alloca(&mut self, ty: &'ll Type, align: Align) -> &'ll Value { 434 let mut bx = Builder::with_cx(self.cx); 435 bx.position_at_start(unsafe { llvm::LLVMGetFirstBasicBlock(self.llfn()) }); 436 unsafe { 437 let alloca = llvm::LLVMBuildAlloca(bx.llbuilder, ty, UNNAMED); 438 llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint); 439 alloca 440 } 441 } 442 byte_array_alloca(&mut self, len: &'ll Value, align: Align) -> &'ll Value443 fn byte_array_alloca(&mut self, len: &'ll Value, align: Align) -> &'ll Value { 444 unsafe { 445 let alloca = 446 llvm::LLVMBuildArrayAlloca(self.llbuilder, self.cx().type_i8(), len, UNNAMED); 447 llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint); 448 alloca 449 } 450 } 451 load(&mut self, ty: &'ll Type, ptr: &'ll Value, align: Align) -> &'ll Value452 fn load(&mut self, ty: &'ll Type, ptr: &'ll Value, align: Align) -> &'ll Value { 453 unsafe { 454 let load = llvm::LLVMBuildLoad2(self.llbuilder, ty, ptr, UNNAMED); 455 llvm::LLVMSetAlignment(load, align.bytes() as c_uint); 456 load 457 } 458 } 459 volatile_load(&mut self, ty: &'ll Type, ptr: &'ll Value) -> &'ll Value460 fn volatile_load(&mut self, ty: &'ll Type, ptr: &'ll Value) -> &'ll Value { 461 unsafe { 462 let load = llvm::LLVMBuildLoad2(self.llbuilder, ty, ptr, UNNAMED); 463 llvm::LLVMSetVolatile(load, llvm::True); 464 load 465 } 466 } 467 atomic_load( &mut self, ty: &'ll Type, ptr: &'ll Value, order: rustc_codegen_ssa::common::AtomicOrdering, size: Size, ) -> &'ll Value468 fn atomic_load( 469 &mut self, 470 ty: &'ll Type, 471 ptr: &'ll Value, 472 order: rustc_codegen_ssa::common::AtomicOrdering, 473 size: Size, 474 ) -> &'ll Value { 475 unsafe { 476 let load = llvm::LLVMRustBuildAtomicLoad( 477 self.llbuilder, 478 ty, 479 ptr, 480 UNNAMED, 481 AtomicOrdering::from_generic(order), 482 ); 483 // LLVM requires the alignment of atomic loads to be at least the size of the type. 484 llvm::LLVMSetAlignment(load, size.bytes() as c_uint); 485 load 486 } 487 } 488 489 #[instrument(level = "trace", skip(self))] load_operand(&mut self, place: PlaceRef<'tcx, &'ll Value>) -> OperandRef<'tcx, &'ll Value>490 fn load_operand(&mut self, place: PlaceRef<'tcx, &'ll Value>) -> OperandRef<'tcx, &'ll Value> { 491 assert_eq!(place.llextra.is_some(), place.layout.is_unsized()); 492 493 if place.layout.is_zst() { 494 return OperandRef::zero_sized(place.layout); 495 } 496 497 #[instrument(level = "trace", skip(bx))] 498 fn scalar_load_metadata<'a, 'll, 'tcx>( 499 bx: &mut Builder<'a, 'll, 'tcx>, 500 load: &'ll Value, 501 scalar: abi::Scalar, 502 layout: TyAndLayout<'tcx>, 503 offset: Size, 504 ) { 505 if !scalar.is_uninit_valid() { 506 bx.noundef_metadata(load); 507 } 508 509 match scalar.primitive() { 510 abi::Int(..) => { 511 if !scalar.is_always_valid(bx) { 512 bx.range_metadata(load, scalar.valid_range(bx)); 513 } 514 } 515 abi::Pointer(_) => { 516 if !scalar.valid_range(bx).contains(0) { 517 bx.nonnull_metadata(load); 518 } 519 520 if let Some(pointee) = layout.pointee_info_at(bx, offset) { 521 if let Some(_) = pointee.safe { 522 bx.align_metadata(load, pointee.align); 523 } 524 } 525 } 526 abi::F32 | abi::F64 => {} 527 } 528 } 529 530 let val = if let Some(llextra) = place.llextra { 531 OperandValue::Ref(place.llval, Some(llextra), place.align) 532 } else if place.layout.is_llvm_immediate() { 533 let mut const_llval = None; 534 let llty = place.layout.llvm_type(self); 535 unsafe { 536 if let Some(global) = llvm::LLVMIsAGlobalVariable(place.llval) { 537 if llvm::LLVMIsGlobalConstant(global) == llvm::True { 538 if let Some(init) = llvm::LLVMGetInitializer(global) { 539 if self.val_ty(init) == llty { 540 const_llval = Some(init); 541 } 542 } 543 } 544 } 545 } 546 let llval = const_llval.unwrap_or_else(|| { 547 let load = self.load(llty, place.llval, place.align); 548 if let abi::Abi::Scalar(scalar) = place.layout.abi { 549 scalar_load_metadata(self, load, scalar, place.layout, Size::ZERO); 550 } 551 load 552 }); 553 OperandValue::Immediate(self.to_immediate(llval, place.layout)) 554 } else if let abi::Abi::ScalarPair(a, b) = place.layout.abi { 555 let b_offset = a.size(self).align_to(b.align(self).abi); 556 let pair_ty = place.layout.llvm_type(self); 557 558 let mut load = |i, scalar: abi::Scalar, layout, align, offset| { 559 let llptr = self.struct_gep(pair_ty, place.llval, i as u64); 560 let llty = place.layout.scalar_pair_element_llvm_type(self, i, false); 561 let load = self.load(llty, llptr, align); 562 scalar_load_metadata(self, load, scalar, layout, offset); 563 self.to_immediate_scalar(load, scalar) 564 }; 565 566 OperandValue::Pair( 567 load(0, a, place.layout, place.align, Size::ZERO), 568 load(1, b, place.layout, place.align.restrict_for_offset(b_offset), b_offset), 569 ) 570 } else { 571 OperandValue::Ref(place.llval, None, place.align) 572 }; 573 574 OperandRef { val, layout: place.layout } 575 } 576 write_operand_repeatedly( &mut self, cg_elem: OperandRef<'tcx, &'ll Value>, count: u64, dest: PlaceRef<'tcx, &'ll Value>, )577 fn write_operand_repeatedly( 578 &mut self, 579 cg_elem: OperandRef<'tcx, &'ll Value>, 580 count: u64, 581 dest: PlaceRef<'tcx, &'ll Value>, 582 ) { 583 let zero = self.const_usize(0); 584 let count = self.const_usize(count); 585 586 let header_bb = self.append_sibling_block("repeat_loop_header"); 587 let body_bb = self.append_sibling_block("repeat_loop_body"); 588 let next_bb = self.append_sibling_block("repeat_loop_next"); 589 590 self.br(header_bb); 591 592 let mut header_bx = Self::build(self.cx, header_bb); 593 let i = header_bx.phi(self.val_ty(zero), &[zero], &[self.llbb()]); 594 595 let keep_going = header_bx.icmp(IntPredicate::IntULT, i, count); 596 header_bx.cond_br(keep_going, body_bb, next_bb); 597 598 let mut body_bx = Self::build(self.cx, body_bb); 599 let dest_elem = dest.project_index(&mut body_bx, i); 600 cg_elem.val.store(&mut body_bx, dest_elem); 601 602 let next = body_bx.unchecked_uadd(i, self.const_usize(1)); 603 body_bx.br(header_bb); 604 header_bx.add_incoming_to_phi(i, next, body_bb); 605 606 *self = Self::build(self.cx, next_bb); 607 } 608 range_metadata(&mut self, load: &'ll Value, range: WrappingRange)609 fn range_metadata(&mut self, load: &'ll Value, range: WrappingRange) { 610 if self.sess().target.arch == "amdgpu" { 611 // amdgpu/LLVM does something weird and thinks an i64 value is 612 // split into a v2i32, halving the bitwidth LLVM expects, 613 // tripping an assertion. So, for now, just disable this 614 // optimization. 615 return; 616 } 617 618 unsafe { 619 let llty = self.cx.val_ty(load); 620 let v = [ 621 self.cx.const_uint_big(llty, range.start), 622 self.cx.const_uint_big(llty, range.end.wrapping_add(1)), 623 ]; 624 625 llvm::LLVMSetMetadata( 626 load, 627 llvm::MD_range as c_uint, 628 llvm::LLVMMDNodeInContext(self.cx.llcx, v.as_ptr(), v.len() as c_uint), 629 ); 630 } 631 } 632 nonnull_metadata(&mut self, load: &'ll Value)633 fn nonnull_metadata(&mut self, load: &'ll Value) { 634 unsafe { 635 llvm::LLVMSetMetadata( 636 load, 637 llvm::MD_nonnull as c_uint, 638 llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0), 639 ); 640 } 641 } 642 store(&mut self, val: &'ll Value, ptr: &'ll Value, align: Align) -> &'ll Value643 fn store(&mut self, val: &'ll Value, ptr: &'ll Value, align: Align) -> &'ll Value { 644 self.store_with_flags(val, ptr, align, MemFlags::empty()) 645 } 646 store_with_flags( &mut self, val: &'ll Value, ptr: &'ll Value, align: Align, flags: MemFlags, ) -> &'ll Value647 fn store_with_flags( 648 &mut self, 649 val: &'ll Value, 650 ptr: &'ll Value, 651 align: Align, 652 flags: MemFlags, 653 ) -> &'ll Value { 654 debug!("Store {:?} -> {:?} ({:?})", val, ptr, flags); 655 let ptr = self.check_store(val, ptr); 656 unsafe { 657 let store = llvm::LLVMBuildStore(self.llbuilder, val, ptr); 658 let align = 659 if flags.contains(MemFlags::UNALIGNED) { 1 } else { align.bytes() as c_uint }; 660 llvm::LLVMSetAlignment(store, align); 661 if flags.contains(MemFlags::VOLATILE) { 662 llvm::LLVMSetVolatile(store, llvm::True); 663 } 664 if flags.contains(MemFlags::NONTEMPORAL) { 665 // According to LLVM [1] building a nontemporal store must 666 // *always* point to a metadata value of the integer 1. 667 // 668 // [1]: https://llvm.org/docs/LangRef.html#store-instruction 669 let one = self.cx.const_i32(1); 670 let node = llvm::LLVMMDNodeInContext(self.cx.llcx, &one, 1); 671 llvm::LLVMSetMetadata(store, llvm::MD_nontemporal as c_uint, node); 672 } 673 store 674 } 675 } 676 atomic_store( &mut self, val: &'ll Value, ptr: &'ll Value, order: rustc_codegen_ssa::common::AtomicOrdering, size: Size, )677 fn atomic_store( 678 &mut self, 679 val: &'ll Value, 680 ptr: &'ll Value, 681 order: rustc_codegen_ssa::common::AtomicOrdering, 682 size: Size, 683 ) { 684 debug!("Store {:?} -> {:?}", val, ptr); 685 let ptr = self.check_store(val, ptr); 686 unsafe { 687 let store = llvm::LLVMRustBuildAtomicStore( 688 self.llbuilder, 689 val, 690 ptr, 691 AtomicOrdering::from_generic(order), 692 ); 693 // LLVM requires the alignment of atomic stores to be at least the size of the type. 694 llvm::LLVMSetAlignment(store, size.bytes() as c_uint); 695 } 696 } 697 gep(&mut self, ty: &'ll Type, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value698 fn gep(&mut self, ty: &'ll Type, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value { 699 unsafe { 700 llvm::LLVMBuildGEP2( 701 self.llbuilder, 702 ty, 703 ptr, 704 indices.as_ptr(), 705 indices.len() as c_uint, 706 UNNAMED, 707 ) 708 } 709 } 710 inbounds_gep( &mut self, ty: &'ll Type, ptr: &'ll Value, indices: &[&'ll Value], ) -> &'ll Value711 fn inbounds_gep( 712 &mut self, 713 ty: &'ll Type, 714 ptr: &'ll Value, 715 indices: &[&'ll Value], 716 ) -> &'ll Value { 717 unsafe { 718 llvm::LLVMBuildInBoundsGEP2( 719 self.llbuilder, 720 ty, 721 ptr, 722 indices.as_ptr(), 723 indices.len() as c_uint, 724 UNNAMED, 725 ) 726 } 727 } 728 struct_gep(&mut self, ty: &'ll Type, ptr: &'ll Value, idx: u64) -> &'ll Value729 fn struct_gep(&mut self, ty: &'ll Type, ptr: &'ll Value, idx: u64) -> &'ll Value { 730 assert_eq!(idx as c_uint as u64, idx); 731 unsafe { llvm::LLVMBuildStructGEP2(self.llbuilder, ty, ptr, idx as c_uint, UNNAMED) } 732 } 733 734 /* Casts */ trunc(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value735 fn trunc(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { 736 unsafe { llvm::LLVMBuildTrunc(self.llbuilder, val, dest_ty, UNNAMED) } 737 } 738 sext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value739 fn sext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { 740 unsafe { llvm::LLVMBuildSExt(self.llbuilder, val, dest_ty, UNNAMED) } 741 } 742 fptoui_sat(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value743 fn fptoui_sat(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { 744 self.fptoint_sat(false, val, dest_ty) 745 } 746 fptosi_sat(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value747 fn fptosi_sat(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { 748 self.fptoint_sat(true, val, dest_ty) 749 } 750 fptoui(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value751 fn fptoui(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { 752 // On WebAssembly the `fptoui` and `fptosi` instructions currently have 753 // poor codegen. The reason for this is that the corresponding wasm 754 // instructions, `i32.trunc_f32_s` for example, will trap when the float 755 // is out-of-bounds, infinity, or nan. This means that LLVM 756 // automatically inserts control flow around `fptoui` and `fptosi` 757 // because the LLVM instruction `fptoui` is defined as producing a 758 // poison value, not having UB on out-of-bounds values. 759 // 760 // This method, however, is only used with non-saturating casts that 761 // have UB on out-of-bounds values. This means that it's ok if we use 762 // the raw wasm instruction since out-of-bounds values can do whatever 763 // we like. To ensure that LLVM picks the right instruction we choose 764 // the raw wasm intrinsic functions which avoid LLVM inserting all the 765 // other control flow automatically. 766 if self.sess().target.is_like_wasm { 767 let src_ty = self.cx.val_ty(val); 768 if self.cx.type_kind(src_ty) != TypeKind::Vector { 769 let float_width = self.cx.float_width(src_ty); 770 let int_width = self.cx.int_width(dest_ty); 771 let name = match (int_width, float_width) { 772 (32, 32) => Some("llvm.wasm.trunc.unsigned.i32.f32"), 773 (32, 64) => Some("llvm.wasm.trunc.unsigned.i32.f64"), 774 (64, 32) => Some("llvm.wasm.trunc.unsigned.i64.f32"), 775 (64, 64) => Some("llvm.wasm.trunc.unsigned.i64.f64"), 776 _ => None, 777 }; 778 if let Some(name) = name { 779 return self.call_intrinsic(name, &[val]); 780 } 781 } 782 } 783 unsafe { llvm::LLVMBuildFPToUI(self.llbuilder, val, dest_ty, UNNAMED) } 784 } 785 fptosi(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value786 fn fptosi(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { 787 // see `fptoui` above for why wasm is different here 788 if self.sess().target.is_like_wasm { 789 let src_ty = self.cx.val_ty(val); 790 if self.cx.type_kind(src_ty) != TypeKind::Vector { 791 let float_width = self.cx.float_width(src_ty); 792 let int_width = self.cx.int_width(dest_ty); 793 let name = match (int_width, float_width) { 794 (32, 32) => Some("llvm.wasm.trunc.signed.i32.f32"), 795 (32, 64) => Some("llvm.wasm.trunc.signed.i32.f64"), 796 (64, 32) => Some("llvm.wasm.trunc.signed.i64.f32"), 797 (64, 64) => Some("llvm.wasm.trunc.signed.i64.f64"), 798 _ => None, 799 }; 800 if let Some(name) = name { 801 return self.call_intrinsic(name, &[val]); 802 } 803 } 804 } 805 unsafe { llvm::LLVMBuildFPToSI(self.llbuilder, val, dest_ty, UNNAMED) } 806 } 807 uitofp(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value808 fn uitofp(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { 809 unsafe { llvm::LLVMBuildUIToFP(self.llbuilder, val, dest_ty, UNNAMED) } 810 } 811 sitofp(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value812 fn sitofp(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { 813 unsafe { llvm::LLVMBuildSIToFP(self.llbuilder, val, dest_ty, UNNAMED) } 814 } 815 fptrunc(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value816 fn fptrunc(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { 817 unsafe { llvm::LLVMBuildFPTrunc(self.llbuilder, val, dest_ty, UNNAMED) } 818 } 819 fpext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value820 fn fpext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { 821 unsafe { llvm::LLVMBuildFPExt(self.llbuilder, val, dest_ty, UNNAMED) } 822 } 823 ptrtoint(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value824 fn ptrtoint(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { 825 unsafe { llvm::LLVMBuildPtrToInt(self.llbuilder, val, dest_ty, UNNAMED) } 826 } 827 inttoptr(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value828 fn inttoptr(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { 829 unsafe { llvm::LLVMBuildIntToPtr(self.llbuilder, val, dest_ty, UNNAMED) } 830 } 831 bitcast(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value832 fn bitcast(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { 833 unsafe { llvm::LLVMBuildBitCast(self.llbuilder, val, dest_ty, UNNAMED) } 834 } 835 intcast(&mut self, val: &'ll Value, dest_ty: &'ll Type, is_signed: bool) -> &'ll Value836 fn intcast(&mut self, val: &'ll Value, dest_ty: &'ll Type, is_signed: bool) -> &'ll Value { 837 unsafe { 838 llvm::LLVMBuildIntCast2( 839 self.llbuilder, 840 val, 841 dest_ty, 842 if is_signed { True } else { False }, 843 UNNAMED, 844 ) 845 } 846 } 847 pointercast(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value848 fn pointercast(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { 849 unsafe { llvm::LLVMBuildPointerCast(self.llbuilder, val, dest_ty, UNNAMED) } 850 } 851 852 /* Comparisons */ icmp(&mut self, op: IntPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value853 fn icmp(&mut self, op: IntPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { 854 let op = llvm::IntPredicate::from_generic(op); 855 unsafe { llvm::LLVMBuildICmp(self.llbuilder, op as c_uint, lhs, rhs, UNNAMED) } 856 } 857 fcmp(&mut self, op: RealPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value858 fn fcmp(&mut self, op: RealPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { 859 let op = llvm::RealPredicate::from_generic(op); 860 unsafe { llvm::LLVMBuildFCmp(self.llbuilder, op as c_uint, lhs, rhs, UNNAMED) } 861 } 862 863 /* Miscellaneous instructions */ memcpy( &mut self, dst: &'ll Value, dst_align: Align, src: &'ll Value, src_align: Align, size: &'ll Value, flags: MemFlags, )864 fn memcpy( 865 &mut self, 866 dst: &'ll Value, 867 dst_align: Align, 868 src: &'ll Value, 869 src_align: Align, 870 size: &'ll Value, 871 flags: MemFlags, 872 ) { 873 assert!(!flags.contains(MemFlags::NONTEMPORAL), "non-temporal memcpy not supported"); 874 let size = self.intcast(size, self.type_isize(), false); 875 let is_volatile = flags.contains(MemFlags::VOLATILE); 876 let dst = self.pointercast(dst, self.type_i8p()); 877 let src = self.pointercast(src, self.type_i8p()); 878 unsafe { 879 llvm::LLVMRustBuildMemCpy( 880 self.llbuilder, 881 dst, 882 dst_align.bytes() as c_uint, 883 src, 884 src_align.bytes() as c_uint, 885 size, 886 is_volatile, 887 ); 888 } 889 } 890 memmove( &mut self, dst: &'ll Value, dst_align: Align, src: &'ll Value, src_align: Align, size: &'ll Value, flags: MemFlags, )891 fn memmove( 892 &mut self, 893 dst: &'ll Value, 894 dst_align: Align, 895 src: &'ll Value, 896 src_align: Align, 897 size: &'ll Value, 898 flags: MemFlags, 899 ) { 900 assert!(!flags.contains(MemFlags::NONTEMPORAL), "non-temporal memmove not supported"); 901 let size = self.intcast(size, self.type_isize(), false); 902 let is_volatile = flags.contains(MemFlags::VOLATILE); 903 let dst = self.pointercast(dst, self.type_i8p()); 904 let src = self.pointercast(src, self.type_i8p()); 905 unsafe { 906 llvm::LLVMRustBuildMemMove( 907 self.llbuilder, 908 dst, 909 dst_align.bytes() as c_uint, 910 src, 911 src_align.bytes() as c_uint, 912 size, 913 is_volatile, 914 ); 915 } 916 } 917 memset( &mut self, ptr: &'ll Value, fill_byte: &'ll Value, size: &'ll Value, align: Align, flags: MemFlags, )918 fn memset( 919 &mut self, 920 ptr: &'ll Value, 921 fill_byte: &'ll Value, 922 size: &'ll Value, 923 align: Align, 924 flags: MemFlags, 925 ) { 926 let is_volatile = flags.contains(MemFlags::VOLATILE); 927 let ptr = self.pointercast(ptr, self.type_i8p()); 928 unsafe { 929 llvm::LLVMRustBuildMemSet( 930 self.llbuilder, 931 ptr, 932 align.bytes() as c_uint, 933 fill_byte, 934 size, 935 is_volatile, 936 ); 937 } 938 } 939 select( &mut self, cond: &'ll Value, then_val: &'ll Value, else_val: &'ll Value, ) -> &'ll Value940 fn select( 941 &mut self, 942 cond: &'ll Value, 943 then_val: &'ll Value, 944 else_val: &'ll Value, 945 ) -> &'ll Value { 946 unsafe { llvm::LLVMBuildSelect(self.llbuilder, cond, then_val, else_val, UNNAMED) } 947 } 948 va_arg(&mut self, list: &'ll Value, ty: &'ll Type) -> &'ll Value949 fn va_arg(&mut self, list: &'ll Value, ty: &'ll Type) -> &'ll Value { 950 unsafe { llvm::LLVMBuildVAArg(self.llbuilder, list, ty, UNNAMED) } 951 } 952 extract_element(&mut self, vec: &'ll Value, idx: &'ll Value) -> &'ll Value953 fn extract_element(&mut self, vec: &'ll Value, idx: &'ll Value) -> &'ll Value { 954 unsafe { llvm::LLVMBuildExtractElement(self.llbuilder, vec, idx, UNNAMED) } 955 } 956 vector_splat(&mut self, num_elts: usize, elt: &'ll Value) -> &'ll Value957 fn vector_splat(&mut self, num_elts: usize, elt: &'ll Value) -> &'ll Value { 958 unsafe { 959 let elt_ty = self.cx.val_ty(elt); 960 let undef = llvm::LLVMGetUndef(self.type_vector(elt_ty, num_elts as u64)); 961 let vec = self.insert_element(undef, elt, self.cx.const_i32(0)); 962 let vec_i32_ty = self.type_vector(self.type_i32(), num_elts as u64); 963 self.shuffle_vector(vec, undef, self.const_null(vec_i32_ty)) 964 } 965 } 966 extract_value(&mut self, agg_val: &'ll Value, idx: u64) -> &'ll Value967 fn extract_value(&mut self, agg_val: &'ll Value, idx: u64) -> &'ll Value { 968 assert_eq!(idx as c_uint as u64, idx); 969 unsafe { llvm::LLVMBuildExtractValue(self.llbuilder, agg_val, idx as c_uint, UNNAMED) } 970 } 971 insert_value(&mut self, agg_val: &'ll Value, elt: &'ll Value, idx: u64) -> &'ll Value972 fn insert_value(&mut self, agg_val: &'ll Value, elt: &'ll Value, idx: u64) -> &'ll Value { 973 assert_eq!(idx as c_uint as u64, idx); 974 unsafe { llvm::LLVMBuildInsertValue(self.llbuilder, agg_val, elt, idx as c_uint, UNNAMED) } 975 } 976 set_personality_fn(&mut self, personality: &'ll Value)977 fn set_personality_fn(&mut self, personality: &'ll Value) { 978 unsafe { 979 llvm::LLVMSetPersonalityFn(self.llfn(), personality); 980 } 981 } 982 cleanup_landing_pad(&mut self, pers_fn: &'ll Value) -> (&'ll Value, &'ll Value)983 fn cleanup_landing_pad(&mut self, pers_fn: &'ll Value) -> (&'ll Value, &'ll Value) { 984 let ty = self.type_struct(&[self.type_i8p(), self.type_i32()], false); 985 let landing_pad = self.landing_pad(ty, pers_fn, 0); 986 unsafe { 987 llvm::LLVMSetCleanup(landing_pad, llvm::True); 988 } 989 (self.extract_value(landing_pad, 0), self.extract_value(landing_pad, 1)) 990 } 991 filter_landing_pad(&mut self, pers_fn: &'ll Value) -> (&'ll Value, &'ll Value)992 fn filter_landing_pad(&mut self, pers_fn: &'ll Value) -> (&'ll Value, &'ll Value) { 993 let ty = self.type_struct(&[self.type_i8p(), self.type_i32()], false); 994 let landing_pad = self.landing_pad(ty, pers_fn, 1); 995 self.add_clause(landing_pad, self.const_array(self.type_i8p(), &[])); 996 (self.extract_value(landing_pad, 0), self.extract_value(landing_pad, 1)) 997 } 998 resume(&mut self, exn0: &'ll Value, exn1: &'ll Value)999 fn resume(&mut self, exn0: &'ll Value, exn1: &'ll Value) { 1000 let ty = self.type_struct(&[self.type_i8p(), self.type_i32()], false); 1001 let mut exn = self.const_poison(ty); 1002 exn = self.insert_value(exn, exn0, 0); 1003 exn = self.insert_value(exn, exn1, 1); 1004 unsafe { 1005 llvm::LLVMBuildResume(self.llbuilder, exn); 1006 } 1007 } 1008 cleanup_pad(&mut self, parent: Option<&'ll Value>, args: &[&'ll Value]) -> Funclet<'ll>1009 fn cleanup_pad(&mut self, parent: Option<&'ll Value>, args: &[&'ll Value]) -> Funclet<'ll> { 1010 let name = cstr!("cleanuppad"); 1011 let ret = unsafe { 1012 llvm::LLVMBuildCleanupPad( 1013 self.llbuilder, 1014 parent, 1015 args.as_ptr(), 1016 args.len() as c_uint, 1017 name.as_ptr(), 1018 ) 1019 }; 1020 Funclet::new(ret.expect("LLVM does not have support for cleanuppad")) 1021 } 1022 cleanup_ret(&mut self, funclet: &Funclet<'ll>, unwind: Option<&'ll BasicBlock>)1023 fn cleanup_ret(&mut self, funclet: &Funclet<'ll>, unwind: Option<&'ll BasicBlock>) { 1024 unsafe { 1025 llvm::LLVMBuildCleanupRet(self.llbuilder, funclet.cleanuppad(), unwind) 1026 .expect("LLVM does not have support for cleanupret"); 1027 } 1028 } 1029 catch_pad(&mut self, parent: &'ll Value, args: &[&'ll Value]) -> Funclet<'ll>1030 fn catch_pad(&mut self, parent: &'ll Value, args: &[&'ll Value]) -> Funclet<'ll> { 1031 let name = cstr!("catchpad"); 1032 let ret = unsafe { 1033 llvm::LLVMBuildCatchPad( 1034 self.llbuilder, 1035 parent, 1036 args.as_ptr(), 1037 args.len() as c_uint, 1038 name.as_ptr(), 1039 ) 1040 }; 1041 Funclet::new(ret.expect("LLVM does not have support for catchpad")) 1042 } 1043 catch_switch( &mut self, parent: Option<&'ll Value>, unwind: Option<&'ll BasicBlock>, handlers: &[&'ll BasicBlock], ) -> &'ll Value1044 fn catch_switch( 1045 &mut self, 1046 parent: Option<&'ll Value>, 1047 unwind: Option<&'ll BasicBlock>, 1048 handlers: &[&'ll BasicBlock], 1049 ) -> &'ll Value { 1050 let name = cstr!("catchswitch"); 1051 let ret = unsafe { 1052 llvm::LLVMBuildCatchSwitch( 1053 self.llbuilder, 1054 parent, 1055 unwind, 1056 handlers.len() as c_uint, 1057 name.as_ptr(), 1058 ) 1059 }; 1060 let ret = ret.expect("LLVM does not have support for catchswitch"); 1061 for handler in handlers { 1062 unsafe { 1063 llvm::LLVMAddHandler(ret, handler); 1064 } 1065 } 1066 ret 1067 } 1068 1069 // Atomic Operations atomic_cmpxchg( &mut self, dst: &'ll Value, cmp: &'ll Value, src: &'ll Value, order: rustc_codegen_ssa::common::AtomicOrdering, failure_order: rustc_codegen_ssa::common::AtomicOrdering, weak: bool, ) -> &'ll Value1070 fn atomic_cmpxchg( 1071 &mut self, 1072 dst: &'ll Value, 1073 cmp: &'ll Value, 1074 src: &'ll Value, 1075 order: rustc_codegen_ssa::common::AtomicOrdering, 1076 failure_order: rustc_codegen_ssa::common::AtomicOrdering, 1077 weak: bool, 1078 ) -> &'ll Value { 1079 let weak = if weak { llvm::True } else { llvm::False }; 1080 unsafe { 1081 let value = llvm::LLVMBuildAtomicCmpXchg( 1082 self.llbuilder, 1083 dst, 1084 cmp, 1085 src, 1086 AtomicOrdering::from_generic(order), 1087 AtomicOrdering::from_generic(failure_order), 1088 llvm::False, // SingleThreaded 1089 ); 1090 llvm::LLVMSetWeak(value, weak); 1091 value 1092 } 1093 } atomic_rmw( &mut self, op: rustc_codegen_ssa::common::AtomicRmwBinOp, dst: &'ll Value, src: &'ll Value, order: rustc_codegen_ssa::common::AtomicOrdering, ) -> &'ll Value1094 fn atomic_rmw( 1095 &mut self, 1096 op: rustc_codegen_ssa::common::AtomicRmwBinOp, 1097 dst: &'ll Value, 1098 src: &'ll Value, 1099 order: rustc_codegen_ssa::common::AtomicOrdering, 1100 ) -> &'ll Value { 1101 unsafe { 1102 llvm::LLVMBuildAtomicRMW( 1103 self.llbuilder, 1104 AtomicRmwBinOp::from_generic(op), 1105 dst, 1106 src, 1107 AtomicOrdering::from_generic(order), 1108 llvm::False, // SingleThreaded 1109 ) 1110 } 1111 } 1112 atomic_fence( &mut self, order: rustc_codegen_ssa::common::AtomicOrdering, scope: SynchronizationScope, )1113 fn atomic_fence( 1114 &mut self, 1115 order: rustc_codegen_ssa::common::AtomicOrdering, 1116 scope: SynchronizationScope, 1117 ) { 1118 let single_threaded = match scope { 1119 SynchronizationScope::SingleThread => llvm::True, 1120 SynchronizationScope::CrossThread => llvm::False, 1121 }; 1122 unsafe { 1123 llvm::LLVMBuildFence( 1124 self.llbuilder, 1125 AtomicOrdering::from_generic(order), 1126 single_threaded, 1127 UNNAMED, 1128 ); 1129 } 1130 } 1131 set_invariant_load(&mut self, load: &'ll Value)1132 fn set_invariant_load(&mut self, load: &'ll Value) { 1133 unsafe { 1134 llvm::LLVMSetMetadata( 1135 load, 1136 llvm::MD_invariant_load as c_uint, 1137 llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0), 1138 ); 1139 } 1140 } 1141 lifetime_start(&mut self, ptr: &'ll Value, size: Size)1142 fn lifetime_start(&mut self, ptr: &'ll Value, size: Size) { 1143 self.call_lifetime_intrinsic("llvm.lifetime.start.p0i8", ptr, size); 1144 } 1145 lifetime_end(&mut self, ptr: &'ll Value, size: Size)1146 fn lifetime_end(&mut self, ptr: &'ll Value, size: Size) { 1147 self.call_lifetime_intrinsic("llvm.lifetime.end.p0i8", ptr, size); 1148 } 1149 instrprof_increment( &mut self, fn_name: &'ll Value, hash: &'ll Value, num_counters: &'ll Value, index: &'ll Value, )1150 fn instrprof_increment( 1151 &mut self, 1152 fn_name: &'ll Value, 1153 hash: &'ll Value, 1154 num_counters: &'ll Value, 1155 index: &'ll Value, 1156 ) { 1157 debug!( 1158 "instrprof_increment() with args ({:?}, {:?}, {:?}, {:?})", 1159 fn_name, hash, num_counters, index 1160 ); 1161 1162 let llfn = unsafe { llvm::LLVMRustGetInstrProfIncrementIntrinsic(self.cx().llmod) }; 1163 let llty = self.cx.type_func( 1164 &[self.cx.type_i8p(), self.cx.type_i64(), self.cx.type_i32(), self.cx.type_i32()], 1165 self.cx.type_void(), 1166 ); 1167 let args = &[fn_name, hash, num_counters, index]; 1168 let args = self.check_call("call", llty, llfn, args); 1169 1170 unsafe { 1171 let _ = llvm::LLVMRustBuildCall( 1172 self.llbuilder, 1173 llty, 1174 llfn, 1175 args.as_ptr() as *const &llvm::Value, 1176 args.len() as c_uint, 1177 [].as_ptr(), 1178 0 as c_uint, 1179 ); 1180 } 1181 } 1182 call( &mut self, llty: &'ll Type, fn_attrs: Option<&CodegenFnAttrs>, fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>, llfn: &'ll Value, args: &[&'ll Value], funclet: Option<&Funclet<'ll>>, ) -> &'ll Value1183 fn call( 1184 &mut self, 1185 llty: &'ll Type, 1186 fn_attrs: Option<&CodegenFnAttrs>, 1187 fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>, 1188 llfn: &'ll Value, 1189 args: &[&'ll Value], 1190 funclet: Option<&Funclet<'ll>>, 1191 ) -> &'ll Value { 1192 debug!("call {:?} with args ({:?})", llfn, args); 1193 1194 let args = self.check_call("call", llty, llfn, args); 1195 let funclet_bundle = funclet.map(|funclet| funclet.bundle()); 1196 let funclet_bundle = funclet_bundle.as_ref().map(|b| &*b.raw); 1197 let mut bundles: SmallVec<[_; 2]> = SmallVec::new(); 1198 if let Some(funclet_bundle) = funclet_bundle { 1199 bundles.push(funclet_bundle); 1200 } 1201 1202 // Emit CFI pointer type membership test 1203 self.cfi_type_test(fn_attrs, fn_abi, llfn); 1204 1205 // Emit KCFI operand bundle 1206 let kcfi_bundle = self.kcfi_operand_bundle(fn_attrs, fn_abi, llfn); 1207 let kcfi_bundle = kcfi_bundle.as_ref().map(|b| &*b.raw); 1208 if let Some(kcfi_bundle) = kcfi_bundle { 1209 bundles.push(kcfi_bundle); 1210 } 1211 1212 let call = unsafe { 1213 llvm::LLVMRustBuildCall( 1214 self.llbuilder, 1215 llty, 1216 llfn, 1217 args.as_ptr() as *const &llvm::Value, 1218 args.len() as c_uint, 1219 bundles.as_ptr(), 1220 bundles.len() as c_uint, 1221 ) 1222 }; 1223 if let Some(fn_abi) = fn_abi { 1224 fn_abi.apply_attrs_callsite(self, call); 1225 } 1226 call 1227 } 1228 zext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value1229 fn zext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { 1230 unsafe { llvm::LLVMBuildZExt(self.llbuilder, val, dest_ty, UNNAMED) } 1231 } 1232 do_not_inline(&mut self, llret: &'ll Value)1233 fn do_not_inline(&mut self, llret: &'ll Value) { 1234 let noinline = llvm::AttributeKind::NoInline.create_attr(self.llcx); 1235 attributes::apply_to_callsite(llret, llvm::AttributePlace::Function, &[noinline]); 1236 } 1237 } 1238 1239 impl<'ll> StaticBuilderMethods for Builder<'_, 'll, '_> { get_static(&mut self, def_id: DefId) -> &'ll Value1240 fn get_static(&mut self, def_id: DefId) -> &'ll Value { 1241 // Forward to the `get_static` method of `CodegenCx` 1242 self.cx().get_static(def_id) 1243 } 1244 } 1245 1246 impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { with_cx(cx: &'a CodegenCx<'ll, 'tcx>) -> Self1247 fn with_cx(cx: &'a CodegenCx<'ll, 'tcx>) -> Self { 1248 // Create a fresh builder from the crate context. 1249 let llbuilder = unsafe { llvm::LLVMCreateBuilderInContext(cx.llcx) }; 1250 Builder { llbuilder, cx } 1251 } 1252 llfn(&self) -> &'ll Value1253 pub fn llfn(&self) -> &'ll Value { 1254 unsafe { llvm::LLVMGetBasicBlockParent(self.llbb()) } 1255 } 1256 position_at_start(&mut self, llbb: &'ll BasicBlock)1257 fn position_at_start(&mut self, llbb: &'ll BasicBlock) { 1258 unsafe { 1259 llvm::LLVMRustPositionBuilderAtStart(self.llbuilder, llbb); 1260 } 1261 } 1262 align_metadata(&mut self, load: &'ll Value, align: Align)1263 fn align_metadata(&mut self, load: &'ll Value, align: Align) { 1264 unsafe { 1265 let v = [self.cx.const_u64(align.bytes())]; 1266 1267 llvm::LLVMSetMetadata( 1268 load, 1269 llvm::MD_align as c_uint, 1270 llvm::LLVMMDNodeInContext(self.cx.llcx, v.as_ptr(), v.len() as c_uint), 1271 ); 1272 } 1273 } 1274 noundef_metadata(&mut self, load: &'ll Value)1275 fn noundef_metadata(&mut self, load: &'ll Value) { 1276 unsafe { 1277 llvm::LLVMSetMetadata( 1278 load, 1279 llvm::MD_noundef as c_uint, 1280 llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0), 1281 ); 1282 } 1283 } 1284 minnum(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value1285 pub fn minnum(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { 1286 unsafe { llvm::LLVMRustBuildMinNum(self.llbuilder, lhs, rhs) } 1287 } 1288 maxnum(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value1289 pub fn maxnum(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { 1290 unsafe { llvm::LLVMRustBuildMaxNum(self.llbuilder, lhs, rhs) } 1291 } 1292 insert_element( &mut self, vec: &'ll Value, elt: &'ll Value, idx: &'ll Value, ) -> &'ll Value1293 pub fn insert_element( 1294 &mut self, 1295 vec: &'ll Value, 1296 elt: &'ll Value, 1297 idx: &'ll Value, 1298 ) -> &'ll Value { 1299 unsafe { llvm::LLVMBuildInsertElement(self.llbuilder, vec, elt, idx, UNNAMED) } 1300 } 1301 shuffle_vector( &mut self, v1: &'ll Value, v2: &'ll Value, mask: &'ll Value, ) -> &'ll Value1302 pub fn shuffle_vector( 1303 &mut self, 1304 v1: &'ll Value, 1305 v2: &'ll Value, 1306 mask: &'ll Value, 1307 ) -> &'ll Value { 1308 unsafe { llvm::LLVMBuildShuffleVector(self.llbuilder, v1, v2, mask, UNNAMED) } 1309 } 1310 vector_reduce_fadd(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value1311 pub fn vector_reduce_fadd(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value { 1312 unsafe { llvm::LLVMRustBuildVectorReduceFAdd(self.llbuilder, acc, src) } 1313 } vector_reduce_fmul(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value1314 pub fn vector_reduce_fmul(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value { 1315 unsafe { llvm::LLVMRustBuildVectorReduceFMul(self.llbuilder, acc, src) } 1316 } vector_reduce_fadd_fast(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value1317 pub fn vector_reduce_fadd_fast(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value { 1318 unsafe { 1319 let instr = llvm::LLVMRustBuildVectorReduceFAdd(self.llbuilder, acc, src); 1320 llvm::LLVMRustSetFastMath(instr); 1321 instr 1322 } 1323 } vector_reduce_fmul_fast(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value1324 pub fn vector_reduce_fmul_fast(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value { 1325 unsafe { 1326 let instr = llvm::LLVMRustBuildVectorReduceFMul(self.llbuilder, acc, src); 1327 llvm::LLVMRustSetFastMath(instr); 1328 instr 1329 } 1330 } vector_reduce_add(&mut self, src: &'ll Value) -> &'ll Value1331 pub fn vector_reduce_add(&mut self, src: &'ll Value) -> &'ll Value { 1332 unsafe { llvm::LLVMRustBuildVectorReduceAdd(self.llbuilder, src) } 1333 } vector_reduce_mul(&mut self, src: &'ll Value) -> &'ll Value1334 pub fn vector_reduce_mul(&mut self, src: &'ll Value) -> &'ll Value { 1335 unsafe { llvm::LLVMRustBuildVectorReduceMul(self.llbuilder, src) } 1336 } vector_reduce_and(&mut self, src: &'ll Value) -> &'ll Value1337 pub fn vector_reduce_and(&mut self, src: &'ll Value) -> &'ll Value { 1338 unsafe { llvm::LLVMRustBuildVectorReduceAnd(self.llbuilder, src) } 1339 } vector_reduce_or(&mut self, src: &'ll Value) -> &'ll Value1340 pub fn vector_reduce_or(&mut self, src: &'ll Value) -> &'ll Value { 1341 unsafe { llvm::LLVMRustBuildVectorReduceOr(self.llbuilder, src) } 1342 } vector_reduce_xor(&mut self, src: &'ll Value) -> &'ll Value1343 pub fn vector_reduce_xor(&mut self, src: &'ll Value) -> &'ll Value { 1344 unsafe { llvm::LLVMRustBuildVectorReduceXor(self.llbuilder, src) } 1345 } vector_reduce_fmin(&mut self, src: &'ll Value) -> &'ll Value1346 pub fn vector_reduce_fmin(&mut self, src: &'ll Value) -> &'ll Value { 1347 unsafe { 1348 llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ false) 1349 } 1350 } vector_reduce_fmax(&mut self, src: &'ll Value) -> &'ll Value1351 pub fn vector_reduce_fmax(&mut self, src: &'ll Value) -> &'ll Value { 1352 unsafe { 1353 llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ false) 1354 } 1355 } vector_reduce_fmin_fast(&mut self, src: &'ll Value) -> &'ll Value1356 pub fn vector_reduce_fmin_fast(&mut self, src: &'ll Value) -> &'ll Value { 1357 unsafe { 1358 let instr = 1359 llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ true); 1360 llvm::LLVMRustSetFastMath(instr); 1361 instr 1362 } 1363 } vector_reduce_fmax_fast(&mut self, src: &'ll Value) -> &'ll Value1364 pub fn vector_reduce_fmax_fast(&mut self, src: &'ll Value) -> &'ll Value { 1365 unsafe { 1366 let instr = 1367 llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ true); 1368 llvm::LLVMRustSetFastMath(instr); 1369 instr 1370 } 1371 } vector_reduce_min(&mut self, src: &'ll Value, is_signed: bool) -> &'ll Value1372 pub fn vector_reduce_min(&mut self, src: &'ll Value, is_signed: bool) -> &'ll Value { 1373 unsafe { llvm::LLVMRustBuildVectorReduceMin(self.llbuilder, src, is_signed) } 1374 } vector_reduce_max(&mut self, src: &'ll Value, is_signed: bool) -> &'ll Value1375 pub fn vector_reduce_max(&mut self, src: &'ll Value, is_signed: bool) -> &'ll Value { 1376 unsafe { llvm::LLVMRustBuildVectorReduceMax(self.llbuilder, src, is_signed) } 1377 } 1378 add_clause(&mut self, landing_pad: &'ll Value, clause: &'ll Value)1379 pub fn add_clause(&mut self, landing_pad: &'ll Value, clause: &'ll Value) { 1380 unsafe { 1381 llvm::LLVMAddClause(landing_pad, clause); 1382 } 1383 } 1384 catch_ret(&mut self, funclet: &Funclet<'ll>, unwind: &'ll BasicBlock) -> &'ll Value1385 pub fn catch_ret(&mut self, funclet: &Funclet<'ll>, unwind: &'ll BasicBlock) -> &'ll Value { 1386 let ret = unsafe { llvm::LLVMBuildCatchRet(self.llbuilder, funclet.cleanuppad(), unwind) }; 1387 ret.expect("LLVM does not have support for catchret") 1388 } 1389 check_store(&mut self, val: &'ll Value, ptr: &'ll Value) -> &'ll Value1390 fn check_store(&mut self, val: &'ll Value, ptr: &'ll Value) -> &'ll Value { 1391 let dest_ptr_ty = self.cx.val_ty(ptr); 1392 let stored_ty = self.cx.val_ty(val); 1393 let stored_ptr_ty = self.cx.type_ptr_to(stored_ty); 1394 1395 assert_eq!(self.cx.type_kind(dest_ptr_ty), TypeKind::Pointer); 1396 1397 if dest_ptr_ty == stored_ptr_ty { 1398 ptr 1399 } else { 1400 debug!( 1401 "type mismatch in store. \ 1402 Expected {:?}, got {:?}; inserting bitcast", 1403 dest_ptr_ty, stored_ptr_ty 1404 ); 1405 self.bitcast(ptr, stored_ptr_ty) 1406 } 1407 } 1408 check_call<'b>( &mut self, typ: &str, fn_ty: &'ll Type, llfn: &'ll Value, args: &'b [&'ll Value], ) -> Cow<'b, [&'ll Value]>1409 fn check_call<'b>( 1410 &mut self, 1411 typ: &str, 1412 fn_ty: &'ll Type, 1413 llfn: &'ll Value, 1414 args: &'b [&'ll Value], 1415 ) -> Cow<'b, [&'ll Value]> { 1416 assert!( 1417 self.cx.type_kind(fn_ty) == TypeKind::Function, 1418 "builder::{} not passed a function, but {:?}", 1419 typ, 1420 fn_ty 1421 ); 1422 1423 let param_tys = self.cx.func_params_types(fn_ty); 1424 1425 let all_args_match = iter::zip(¶m_tys, args.iter().map(|&v| self.val_ty(v))) 1426 .all(|(expected_ty, actual_ty)| *expected_ty == actual_ty); 1427 1428 if all_args_match { 1429 return Cow::Borrowed(args); 1430 } 1431 1432 let casted_args: Vec<_> = iter::zip(param_tys, args) 1433 .enumerate() 1434 .map(|(i, (expected_ty, &actual_val))| { 1435 let actual_ty = self.val_ty(actual_val); 1436 if expected_ty != actual_ty { 1437 debug!( 1438 "type mismatch in function call of {:?}. \ 1439 Expected {:?} for param {}, got {:?}; injecting bitcast", 1440 llfn, expected_ty, i, actual_ty 1441 ); 1442 self.bitcast(actual_val, expected_ty) 1443 } else { 1444 actual_val 1445 } 1446 }) 1447 .collect(); 1448 1449 Cow::Owned(casted_args) 1450 } 1451 va_arg(&mut self, list: &'ll Value, ty: &'ll Type) -> &'ll Value1452 pub fn va_arg(&mut self, list: &'ll Value, ty: &'ll Type) -> &'ll Value { 1453 unsafe { llvm::LLVMBuildVAArg(self.llbuilder, list, ty, UNNAMED) } 1454 } 1455 call_intrinsic(&mut self, intrinsic: &str, args: &[&'ll Value]) -> &'ll Value1456 pub(crate) fn call_intrinsic(&mut self, intrinsic: &str, args: &[&'ll Value]) -> &'ll Value { 1457 let (ty, f) = self.cx.get_intrinsic(intrinsic); 1458 self.call(ty, None, None, f, args, None) 1459 } 1460 call_lifetime_intrinsic(&mut self, intrinsic: &str, ptr: &'ll Value, size: Size)1461 fn call_lifetime_intrinsic(&mut self, intrinsic: &str, ptr: &'ll Value, size: Size) { 1462 let size = size.bytes(); 1463 if size == 0 { 1464 return; 1465 } 1466 1467 if !self.cx().sess().emit_lifetime_markers() { 1468 return; 1469 } 1470 1471 let ptr = self.pointercast(ptr, self.cx.type_i8p()); 1472 self.call_intrinsic(intrinsic, &[self.cx.const_u64(size), ptr]); 1473 } 1474 phi( &mut self, ty: &'ll Type, vals: &[&'ll Value], bbs: &[&'ll BasicBlock], ) -> &'ll Value1475 pub(crate) fn phi( 1476 &mut self, 1477 ty: &'ll Type, 1478 vals: &[&'ll Value], 1479 bbs: &[&'ll BasicBlock], 1480 ) -> &'ll Value { 1481 assert_eq!(vals.len(), bbs.len()); 1482 let phi = unsafe { llvm::LLVMBuildPhi(self.llbuilder, ty, UNNAMED) }; 1483 unsafe { 1484 llvm::LLVMAddIncoming(phi, vals.as_ptr(), bbs.as_ptr(), vals.len() as c_uint); 1485 phi 1486 } 1487 } 1488 add_incoming_to_phi(&mut self, phi: &'ll Value, val: &'ll Value, bb: &'ll BasicBlock)1489 fn add_incoming_to_phi(&mut self, phi: &'ll Value, val: &'ll Value, bb: &'ll BasicBlock) { 1490 unsafe { 1491 llvm::LLVMAddIncoming(phi, &val, &bb, 1 as c_uint); 1492 } 1493 } 1494 fptoint_sat(&mut self, signed: bool, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value1495 fn fptoint_sat(&mut self, signed: bool, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { 1496 let src_ty = self.cx.val_ty(val); 1497 let (float_ty, int_ty, vector_length) = if self.cx.type_kind(src_ty) == TypeKind::Vector { 1498 assert_eq!(self.cx.vector_length(src_ty), self.cx.vector_length(dest_ty)); 1499 ( 1500 self.cx.element_type(src_ty), 1501 self.cx.element_type(dest_ty), 1502 Some(self.cx.vector_length(src_ty)), 1503 ) 1504 } else { 1505 (src_ty, dest_ty, None) 1506 }; 1507 let float_width = self.cx.float_width(float_ty); 1508 let int_width = self.cx.int_width(int_ty); 1509 1510 let instr = if signed { "fptosi" } else { "fptoui" }; 1511 let name = if let Some(vector_length) = vector_length { 1512 format!( 1513 "llvm.{}.sat.v{}i{}.v{}f{}", 1514 instr, vector_length, int_width, vector_length, float_width 1515 ) 1516 } else { 1517 format!("llvm.{}.sat.i{}.f{}", instr, int_width, float_width) 1518 }; 1519 let f = self.declare_cfn(&name, llvm::UnnamedAddr::No, self.type_func(&[src_ty], dest_ty)); 1520 self.call(self.type_func(&[src_ty], dest_ty), None, None, f, &[val], None) 1521 } 1522 landing_pad( &mut self, ty: &'ll Type, pers_fn: &'ll Value, num_clauses: usize, ) -> &'ll Value1523 pub(crate) fn landing_pad( 1524 &mut self, 1525 ty: &'ll Type, 1526 pers_fn: &'ll Value, 1527 num_clauses: usize, 1528 ) -> &'ll Value { 1529 // Use LLVMSetPersonalityFn to set the personality. It supports arbitrary Consts while, 1530 // LLVMBuildLandingPad requires the argument to be a Function (as of LLVM 12). The 1531 // personality lives on the parent function anyway. 1532 self.set_personality_fn(pers_fn); 1533 unsafe { 1534 llvm::LLVMBuildLandingPad(self.llbuilder, ty, None, num_clauses as c_uint, UNNAMED) 1535 } 1536 } 1537 1538 // Emits CFI pointer type membership tests. cfi_type_test( &mut self, fn_attrs: Option<&CodegenFnAttrs>, fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>, llfn: &'ll Value, )1539 fn cfi_type_test( 1540 &mut self, 1541 fn_attrs: Option<&CodegenFnAttrs>, 1542 fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>, 1543 llfn: &'ll Value, 1544 ) { 1545 let is_indirect_call = unsafe { llvm::LLVMIsAFunction(llfn).is_none() }; 1546 if is_indirect_call && fn_abi.is_some() && self.tcx.sess.is_sanitizer_cfi_enabled() { 1547 if fn_attrs.is_some() && fn_attrs.unwrap().no_sanitize.contains(SanitizerSet::CFI) { 1548 return; 1549 } 1550 1551 let mut options = TypeIdOptions::empty(); 1552 if self.tcx.sess.is_sanitizer_cfi_generalize_pointers_enabled() { 1553 options.insert(TypeIdOptions::GENERALIZE_POINTERS); 1554 } 1555 if self.tcx.sess.is_sanitizer_cfi_normalize_integers_enabled() { 1556 options.insert(TypeIdOptions::NORMALIZE_INTEGERS); 1557 } 1558 1559 let typeid = typeid_for_fnabi(self.tcx, fn_abi.unwrap(), options); 1560 let typeid_metadata = self.cx.typeid_metadata(typeid).unwrap(); 1561 1562 // Test whether the function pointer is associated with the type identifier. 1563 let cond = self.type_test(llfn, typeid_metadata); 1564 let bb_pass = self.append_sibling_block("type_test.pass"); 1565 let bb_fail = self.append_sibling_block("type_test.fail"); 1566 self.cond_br(cond, bb_pass, bb_fail); 1567 1568 self.switch_to_block(bb_fail); 1569 self.abort(); 1570 self.unreachable(); 1571 1572 self.switch_to_block(bb_pass); 1573 } 1574 } 1575 1576 // Emits KCFI operand bundles. kcfi_operand_bundle( &mut self, fn_attrs: Option<&CodegenFnAttrs>, fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>, llfn: &'ll Value, ) -> Option<llvm::OperandBundleDef<'ll>>1577 fn kcfi_operand_bundle( 1578 &mut self, 1579 fn_attrs: Option<&CodegenFnAttrs>, 1580 fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>, 1581 llfn: &'ll Value, 1582 ) -> Option<llvm::OperandBundleDef<'ll>> { 1583 let is_indirect_call = unsafe { llvm::LLVMIsAFunction(llfn).is_none() }; 1584 let kcfi_bundle = if is_indirect_call && self.tcx.sess.is_sanitizer_kcfi_enabled() { 1585 if fn_attrs.is_some() && fn_attrs.unwrap().no_sanitize.contains(SanitizerSet::KCFI) { 1586 return None; 1587 } 1588 1589 let mut options = TypeIdOptions::empty(); 1590 if self.tcx.sess.is_sanitizer_cfi_generalize_pointers_enabled() { 1591 options.insert(TypeIdOptions::GENERALIZE_POINTERS); 1592 } 1593 if self.tcx.sess.is_sanitizer_cfi_normalize_integers_enabled() { 1594 options.insert(TypeIdOptions::NORMALIZE_INTEGERS); 1595 } 1596 1597 let kcfi_typeid = kcfi_typeid_for_fnabi(self.tcx, fn_abi.unwrap(), options); 1598 Some(llvm::OperandBundleDef::new("kcfi", &[self.const_u32(kcfi_typeid)])) 1599 } else { 1600 None 1601 }; 1602 kcfi_bundle 1603 } 1604 } 1605