1 use rustc_apfloat::Float; 2 use rustc_middle::mir; 3 use rustc_middle::mir::interpret::{InterpResult, Scalar}; 4 use rustc_middle::ty::layout::{LayoutOf, TyAndLayout}; 5 use rustc_middle::ty::{self, FloatTy, Ty}; 6 use rustc_span::symbol::sym; 7 use rustc_target::abi::Abi; 8 9 use super::{ImmTy, Immediate, InterpCx, Machine, PlaceTy}; 10 11 use crate::fluent_generated as fluent; 12 13 impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { 14 /// Applies the binary operation `op` to the two operands and writes a tuple of the result 15 /// and a boolean signifying the potential overflow to the destination. binop_with_overflow( &mut self, op: mir::BinOp, left: &ImmTy<'tcx, M::Provenance>, right: &ImmTy<'tcx, M::Provenance>, dest: &PlaceTy<'tcx, M::Provenance>, ) -> InterpResult<'tcx>16 pub fn binop_with_overflow( 17 &mut self, 18 op: mir::BinOp, 19 left: &ImmTy<'tcx, M::Provenance>, 20 right: &ImmTy<'tcx, M::Provenance>, 21 dest: &PlaceTy<'tcx, M::Provenance>, 22 ) -> InterpResult<'tcx> { 23 let (val, overflowed, ty) = self.overflowing_binary_op(op, &left, &right)?; 24 debug_assert_eq!( 25 Ty::new_tup(self.tcx.tcx, &[ty, self.tcx.types.bool]), 26 dest.layout.ty, 27 "type mismatch for result of {:?}", 28 op, 29 ); 30 // Write the result to `dest`. 31 if let Abi::ScalarPair(..) = dest.layout.abi { 32 // We can use the optimized path and avoid `place_field` (which might do 33 // `force_allocation`). 34 let pair = Immediate::ScalarPair(val, Scalar::from_bool(overflowed)); 35 self.write_immediate(pair, dest)?; 36 } else { 37 assert!(self.tcx.sess.opts.unstable_opts.randomize_layout); 38 // With randomized layout, `(int, bool)` might cease to be a `ScalarPair`, so we have to 39 // do a component-wise write here. This code path is slower than the above because 40 // `place_field` will have to `force_allocate` locals here. 41 let val_field = self.place_field(&dest, 0)?; 42 self.write_scalar(val, &val_field)?; 43 let overflowed_field = self.place_field(&dest, 1)?; 44 self.write_scalar(Scalar::from_bool(overflowed), &overflowed_field)?; 45 } 46 Ok(()) 47 } 48 49 /// Applies the binary operation `op` to the arguments and writes the result to the 50 /// destination. binop_ignore_overflow( &mut self, op: mir::BinOp, left: &ImmTy<'tcx, M::Provenance>, right: &ImmTy<'tcx, M::Provenance>, dest: &PlaceTy<'tcx, M::Provenance>, ) -> InterpResult<'tcx>51 pub fn binop_ignore_overflow( 52 &mut self, 53 op: mir::BinOp, 54 left: &ImmTy<'tcx, M::Provenance>, 55 right: &ImmTy<'tcx, M::Provenance>, 56 dest: &PlaceTy<'tcx, M::Provenance>, 57 ) -> InterpResult<'tcx> { 58 let (val, _overflowed, ty) = self.overflowing_binary_op(op, left, right)?; 59 assert_eq!(ty, dest.layout.ty, "type mismatch for result of {:?}", op); 60 self.write_scalar(val, dest) 61 } 62 } 63 64 impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { binary_char_op( &self, bin_op: mir::BinOp, l: char, r: char, ) -> (Scalar<M::Provenance>, bool, Ty<'tcx>)65 fn binary_char_op( 66 &self, 67 bin_op: mir::BinOp, 68 l: char, 69 r: char, 70 ) -> (Scalar<M::Provenance>, bool, Ty<'tcx>) { 71 use rustc_middle::mir::BinOp::*; 72 73 let res = match bin_op { 74 Eq => l == r, 75 Ne => l != r, 76 Lt => l < r, 77 Le => l <= r, 78 Gt => l > r, 79 Ge => l >= r, 80 _ => span_bug!(self.cur_span(), "Invalid operation on char: {:?}", bin_op), 81 }; 82 (Scalar::from_bool(res), false, self.tcx.types.bool) 83 } 84 binary_bool_op( &self, bin_op: mir::BinOp, l: bool, r: bool, ) -> (Scalar<M::Provenance>, bool, Ty<'tcx>)85 fn binary_bool_op( 86 &self, 87 bin_op: mir::BinOp, 88 l: bool, 89 r: bool, 90 ) -> (Scalar<M::Provenance>, bool, Ty<'tcx>) { 91 use rustc_middle::mir::BinOp::*; 92 93 let res = match bin_op { 94 Eq => l == r, 95 Ne => l != r, 96 Lt => l < r, 97 Le => l <= r, 98 Gt => l > r, 99 Ge => l >= r, 100 BitAnd => l & r, 101 BitOr => l | r, 102 BitXor => l ^ r, 103 _ => span_bug!(self.cur_span(), "Invalid operation on bool: {:?}", bin_op), 104 }; 105 (Scalar::from_bool(res), false, self.tcx.types.bool) 106 } 107 binary_float_op<F: Float + Into<Scalar<M::Provenance>>>( &self, bin_op: mir::BinOp, ty: Ty<'tcx>, l: F, r: F, ) -> (Scalar<M::Provenance>, bool, Ty<'tcx>)108 fn binary_float_op<F: Float + Into<Scalar<M::Provenance>>>( 109 &self, 110 bin_op: mir::BinOp, 111 ty: Ty<'tcx>, 112 l: F, 113 r: F, 114 ) -> (Scalar<M::Provenance>, bool, Ty<'tcx>) { 115 use rustc_middle::mir::BinOp::*; 116 117 let (val, ty) = match bin_op { 118 Eq => (Scalar::from_bool(l == r), self.tcx.types.bool), 119 Ne => (Scalar::from_bool(l != r), self.tcx.types.bool), 120 Lt => (Scalar::from_bool(l < r), self.tcx.types.bool), 121 Le => (Scalar::from_bool(l <= r), self.tcx.types.bool), 122 Gt => (Scalar::from_bool(l > r), self.tcx.types.bool), 123 Ge => (Scalar::from_bool(l >= r), self.tcx.types.bool), 124 Add => ((l + r).value.into(), ty), 125 Sub => ((l - r).value.into(), ty), 126 Mul => ((l * r).value.into(), ty), 127 Div => ((l / r).value.into(), ty), 128 Rem => ((l % r).value.into(), ty), 129 _ => span_bug!(self.cur_span(), "invalid float op: `{:?}`", bin_op), 130 }; 131 (val, false, ty) 132 } 133 binary_int_op( &self, bin_op: mir::BinOp, l: u128, left_layout: TyAndLayout<'tcx>, r: u128, right_layout: TyAndLayout<'tcx>, ) -> InterpResult<'tcx, (Scalar<M::Provenance>, bool, Ty<'tcx>)>134 fn binary_int_op( 135 &self, 136 bin_op: mir::BinOp, 137 // passing in raw bits 138 l: u128, 139 left_layout: TyAndLayout<'tcx>, 140 r: u128, 141 right_layout: TyAndLayout<'tcx>, 142 ) -> InterpResult<'tcx, (Scalar<M::Provenance>, bool, Ty<'tcx>)> { 143 use rustc_middle::mir::BinOp::*; 144 145 let throw_ub_on_overflow = match bin_op { 146 AddUnchecked => Some(sym::unchecked_add), 147 SubUnchecked => Some(sym::unchecked_sub), 148 MulUnchecked => Some(sym::unchecked_mul), 149 ShlUnchecked => Some(sym::unchecked_shl), 150 ShrUnchecked => Some(sym::unchecked_shr), 151 _ => None, 152 }; 153 154 // Shift ops can have an RHS with a different numeric type. 155 if matches!(bin_op, Shl | ShlUnchecked | Shr | ShrUnchecked) { 156 let size = u128::from(left_layout.size.bits()); 157 // Even if `r` is signed, we treat it as if it was unsigned (i.e., we use its 158 // zero-extended form). This matches the codegen backend: 159 // <https://github.com/rust-lang/rust/blob/c274e4969f058b1c644243181ece9f829efa7594/compiler/rustc_codegen_ssa/src/base.rs#L315-L317>. 160 // The overflow check is also ignorant to the sign: 161 // <https://github.com/rust-lang/rust/blob/c274e4969f058b1c644243181ece9f829efa7594/compiler/rustc_codegen_ssa/src/mir/rvalue.rs#L728>. 162 // This would behave rather strangely if we had integer types of size 256: a shift by 163 // -1i8 would actually shift by 255, but that would *not* be considered overflowing. A 164 // shift by -1i16 though would be considered overflowing. If we had integers of size 165 // 512, then a shift by -1i8 would even produce a different result than one by -1i16: 166 // the first shifts by 255, the latter by u16::MAX % 512 = 511. Lucky enough, our 167 // integers are maximally 128bits wide, so negative shifts *always* overflow and we have 168 // consistent results for the same value represented at different bit widths. 169 assert!(size <= 128); 170 let original_r = r; 171 let overflow = r >= size; 172 // The shift offset is implicitly masked to the type size, to make sure this operation 173 // is always defined. This is the one MIR operator that does *not* directly map to a 174 // single LLVM operation. See 175 // <https://github.com/rust-lang/rust/blob/c274e4969f058b1c644243181ece9f829efa7594/compiler/rustc_codegen_ssa/src/common.rs#L131-L158> 176 // for the corresponding truncation in our codegen backends. 177 let r = r % size; 178 let r = u32::try_from(r).unwrap(); // we masked so this will always fit 179 let result = if left_layout.abi.is_signed() { 180 let l = self.sign_extend(l, left_layout) as i128; 181 let result = match bin_op { 182 Shl | ShlUnchecked => l.checked_shl(r).unwrap(), 183 Shr | ShrUnchecked => l.checked_shr(r).unwrap(), 184 _ => bug!(), 185 }; 186 result as u128 187 } else { 188 match bin_op { 189 Shl | ShlUnchecked => l.checked_shl(r).unwrap(), 190 Shr | ShrUnchecked => l.checked_shr(r).unwrap(), 191 _ => bug!(), 192 } 193 }; 194 let truncated = self.truncate(result, left_layout); 195 196 if overflow && let Some(intrinsic_name) = throw_ub_on_overflow { 197 throw_ub_custom!( 198 fluent::const_eval_overflow_shift, 199 val = original_r, 200 name = intrinsic_name 201 ); 202 } 203 204 return Ok((Scalar::from_uint(truncated, left_layout.size), overflow, left_layout.ty)); 205 } 206 207 // For the remaining ops, the types must be the same on both sides 208 if left_layout.ty != right_layout.ty { 209 span_bug!( 210 self.cur_span(), 211 "invalid asymmetric binary op {:?}: {:?} ({:?}), {:?} ({:?})", 212 bin_op, 213 l, 214 left_layout.ty, 215 r, 216 right_layout.ty, 217 ) 218 } 219 220 let size = left_layout.size; 221 222 // Operations that need special treatment for signed integers 223 if left_layout.abi.is_signed() { 224 let op: Option<fn(&i128, &i128) -> bool> = match bin_op { 225 Lt => Some(i128::lt), 226 Le => Some(i128::le), 227 Gt => Some(i128::gt), 228 Ge => Some(i128::ge), 229 _ => None, 230 }; 231 if let Some(op) = op { 232 let l = self.sign_extend(l, left_layout) as i128; 233 let r = self.sign_extend(r, right_layout) as i128; 234 return Ok((Scalar::from_bool(op(&l, &r)), false, self.tcx.types.bool)); 235 } 236 let op: Option<fn(i128, i128) -> (i128, bool)> = match bin_op { 237 Div if r == 0 => throw_ub!(DivisionByZero), 238 Rem if r == 0 => throw_ub!(RemainderByZero), 239 Div => Some(i128::overflowing_div), 240 Rem => Some(i128::overflowing_rem), 241 Add | AddUnchecked => Some(i128::overflowing_add), 242 Sub | SubUnchecked => Some(i128::overflowing_sub), 243 Mul | MulUnchecked => Some(i128::overflowing_mul), 244 _ => None, 245 }; 246 if let Some(op) = op { 247 let l = self.sign_extend(l, left_layout) as i128; 248 let r = self.sign_extend(r, right_layout) as i128; 249 250 // We need a special check for overflowing Rem and Div since they are *UB* 251 // on overflow, which can happen with "int_min $OP -1". 252 if matches!(bin_op, Rem | Div) { 253 if l == size.signed_int_min() && r == -1 { 254 if bin_op == Rem { 255 throw_ub!(RemainderOverflow) 256 } else { 257 throw_ub!(DivisionOverflow) 258 } 259 } 260 } 261 262 let (result, oflo) = op(l, r); 263 // This may be out-of-bounds for the result type, so we have to truncate ourselves. 264 // If that truncation loses any information, we have an overflow. 265 let result = result as u128; 266 let truncated = self.truncate(result, left_layout); 267 let overflow = oflo || self.sign_extend(truncated, left_layout) != result; 268 if overflow && let Some(intrinsic_name) = throw_ub_on_overflow { 269 throw_ub_custom!(fluent::const_eval_overflow, name = intrinsic_name); 270 } 271 return Ok((Scalar::from_uint(truncated, size), overflow, left_layout.ty)); 272 } 273 } 274 275 let (val, ty) = match bin_op { 276 Eq => (Scalar::from_bool(l == r), self.tcx.types.bool), 277 Ne => (Scalar::from_bool(l != r), self.tcx.types.bool), 278 279 Lt => (Scalar::from_bool(l < r), self.tcx.types.bool), 280 Le => (Scalar::from_bool(l <= r), self.tcx.types.bool), 281 Gt => (Scalar::from_bool(l > r), self.tcx.types.bool), 282 Ge => (Scalar::from_bool(l >= r), self.tcx.types.bool), 283 284 BitOr => (Scalar::from_uint(l | r, size), left_layout.ty), 285 BitAnd => (Scalar::from_uint(l & r, size), left_layout.ty), 286 BitXor => (Scalar::from_uint(l ^ r, size), left_layout.ty), 287 288 Add | AddUnchecked | Sub | SubUnchecked | Mul | MulUnchecked | Rem | Div => { 289 assert!(!left_layout.abi.is_signed()); 290 let op: fn(u128, u128) -> (u128, bool) = match bin_op { 291 Add | AddUnchecked => u128::overflowing_add, 292 Sub | SubUnchecked => u128::overflowing_sub, 293 Mul | MulUnchecked => u128::overflowing_mul, 294 Div if r == 0 => throw_ub!(DivisionByZero), 295 Rem if r == 0 => throw_ub!(RemainderByZero), 296 Div => u128::overflowing_div, 297 Rem => u128::overflowing_rem, 298 _ => bug!(), 299 }; 300 let (result, oflo) = op(l, r); 301 // Truncate to target type. 302 // If that truncation loses any information, we have an overflow. 303 let truncated = self.truncate(result, left_layout); 304 let overflow = oflo || truncated != result; 305 if overflow && let Some(intrinsic_name) = throw_ub_on_overflow { 306 throw_ub_custom!(fluent::const_eval_overflow, name = intrinsic_name); 307 } 308 return Ok((Scalar::from_uint(truncated, size), overflow, left_layout.ty)); 309 } 310 311 _ => span_bug!( 312 self.cur_span(), 313 "invalid binary op {:?}: {:?}, {:?} (both {:?})", 314 bin_op, 315 l, 316 r, 317 right_layout.ty, 318 ), 319 }; 320 321 Ok((val, false, ty)) 322 } 323 binary_ptr_op( &self, bin_op: mir::BinOp, left: &ImmTy<'tcx, M::Provenance>, right: &ImmTy<'tcx, M::Provenance>, ) -> InterpResult<'tcx, (Scalar<M::Provenance>, bool, Ty<'tcx>)>324 fn binary_ptr_op( 325 &self, 326 bin_op: mir::BinOp, 327 left: &ImmTy<'tcx, M::Provenance>, 328 right: &ImmTy<'tcx, M::Provenance>, 329 ) -> InterpResult<'tcx, (Scalar<M::Provenance>, bool, Ty<'tcx>)> { 330 use rustc_middle::mir::BinOp::*; 331 332 match bin_op { 333 // Pointer ops that are always supported. 334 Offset => { 335 let ptr = left.to_scalar().to_pointer(self)?; 336 let offset_count = right.to_scalar().to_target_isize(self)?; 337 let pointee_ty = left.layout.ty.builtin_deref(true).unwrap().ty; 338 339 let offset_ptr = self.ptr_offset_inbounds(ptr, pointee_ty, offset_count)?; 340 Ok((Scalar::from_maybe_pointer(offset_ptr, self), false, left.layout.ty)) 341 } 342 343 // Fall back to machine hook so Miri can support more pointer ops. 344 _ => M::binary_ptr_op(self, bin_op, left, right), 345 } 346 } 347 348 /// Returns the result of the specified operation, whether it overflowed, and 349 /// the result type. overflowing_binary_op( &self, bin_op: mir::BinOp, left: &ImmTy<'tcx, M::Provenance>, right: &ImmTy<'tcx, M::Provenance>, ) -> InterpResult<'tcx, (Scalar<M::Provenance>, bool, Ty<'tcx>)>350 pub fn overflowing_binary_op( 351 &self, 352 bin_op: mir::BinOp, 353 left: &ImmTy<'tcx, M::Provenance>, 354 right: &ImmTy<'tcx, M::Provenance>, 355 ) -> InterpResult<'tcx, (Scalar<M::Provenance>, bool, Ty<'tcx>)> { 356 trace!( 357 "Running binary op {:?}: {:?} ({:?}), {:?} ({:?})", 358 bin_op, 359 *left, 360 left.layout.ty, 361 *right, 362 right.layout.ty 363 ); 364 365 match left.layout.ty.kind() { 366 ty::Char => { 367 assert_eq!(left.layout.ty, right.layout.ty); 368 let left = left.to_scalar(); 369 let right = right.to_scalar(); 370 Ok(self.binary_char_op(bin_op, left.to_char()?, right.to_char()?)) 371 } 372 ty::Bool => { 373 assert_eq!(left.layout.ty, right.layout.ty); 374 let left = left.to_scalar(); 375 let right = right.to_scalar(); 376 Ok(self.binary_bool_op(bin_op, left.to_bool()?, right.to_bool()?)) 377 } 378 ty::Float(fty) => { 379 assert_eq!(left.layout.ty, right.layout.ty); 380 let ty = left.layout.ty; 381 let left = left.to_scalar(); 382 let right = right.to_scalar(); 383 Ok(match fty { 384 FloatTy::F32 => { 385 self.binary_float_op(bin_op, ty, left.to_f32()?, right.to_f32()?) 386 } 387 FloatTy::F64 => { 388 self.binary_float_op(bin_op, ty, left.to_f64()?, right.to_f64()?) 389 } 390 }) 391 } 392 _ if left.layout.ty.is_integral() => { 393 // the RHS type can be different, e.g. for shifts -- but it has to be integral, too 394 assert!( 395 right.layout.ty.is_integral(), 396 "Unexpected types for BinOp: {:?} {:?} {:?}", 397 left.layout.ty, 398 bin_op, 399 right.layout.ty 400 ); 401 402 let l = left.to_scalar().to_bits(left.layout.size)?; 403 let r = right.to_scalar().to_bits(right.layout.size)?; 404 self.binary_int_op(bin_op, l, left.layout, r, right.layout) 405 } 406 _ if left.layout.ty.is_any_ptr() => { 407 // The RHS type must be a `pointer` *or an integer type* (for `Offset`). 408 // (Even when both sides are pointers, their type might differ, see issue #91636) 409 assert!( 410 right.layout.ty.is_any_ptr() || right.layout.ty.is_integral(), 411 "Unexpected types for BinOp: {:?} {:?} {:?}", 412 left.layout.ty, 413 bin_op, 414 right.layout.ty 415 ); 416 417 self.binary_ptr_op(bin_op, left, right) 418 } 419 _ => span_bug!( 420 self.cur_span(), 421 "Invalid MIR: bad LHS type for binop: {:?}", 422 left.layout.ty 423 ), 424 } 425 } 426 427 /// Typed version of `overflowing_binary_op`, returning an `ImmTy`. Also ignores overflows. 428 #[inline] binary_op( &self, bin_op: mir::BinOp, left: &ImmTy<'tcx, M::Provenance>, right: &ImmTy<'tcx, M::Provenance>, ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>>429 pub fn binary_op( 430 &self, 431 bin_op: mir::BinOp, 432 left: &ImmTy<'tcx, M::Provenance>, 433 right: &ImmTy<'tcx, M::Provenance>, 434 ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> { 435 let (val, _overflow, ty) = self.overflowing_binary_op(bin_op, left, right)?; 436 Ok(ImmTy::from_scalar(val, self.layout_of(ty)?)) 437 } 438 439 /// Returns the result of the specified operation, whether it overflowed, and 440 /// the result type. overflowing_unary_op( &self, un_op: mir::UnOp, val: &ImmTy<'tcx, M::Provenance>, ) -> InterpResult<'tcx, (Scalar<M::Provenance>, bool, Ty<'tcx>)>441 pub fn overflowing_unary_op( 442 &self, 443 un_op: mir::UnOp, 444 val: &ImmTy<'tcx, M::Provenance>, 445 ) -> InterpResult<'tcx, (Scalar<M::Provenance>, bool, Ty<'tcx>)> { 446 use rustc_middle::mir::UnOp::*; 447 448 let layout = val.layout; 449 let val = val.to_scalar(); 450 trace!("Running unary op {:?}: {:?} ({:?})", un_op, val, layout.ty); 451 452 match layout.ty.kind() { 453 ty::Bool => { 454 let val = val.to_bool()?; 455 let res = match un_op { 456 Not => !val, 457 _ => span_bug!(self.cur_span(), "Invalid bool op {:?}", un_op), 458 }; 459 Ok((Scalar::from_bool(res), false, self.tcx.types.bool)) 460 } 461 ty::Float(fty) => { 462 let res = match (un_op, fty) { 463 (Neg, FloatTy::F32) => Scalar::from_f32(-val.to_f32()?), 464 (Neg, FloatTy::F64) => Scalar::from_f64(-val.to_f64()?), 465 _ => span_bug!(self.cur_span(), "Invalid float op {:?}", un_op), 466 }; 467 Ok((res, false, layout.ty)) 468 } 469 _ => { 470 assert!(layout.ty.is_integral()); 471 let val = val.to_bits(layout.size)?; 472 let (res, overflow) = match un_op { 473 Not => (self.truncate(!val, layout), false), // bitwise negation, then truncate 474 Neg => { 475 // arithmetic negation 476 assert!(layout.abi.is_signed()); 477 let val = self.sign_extend(val, layout) as i128; 478 let (res, overflow) = val.overflowing_neg(); 479 let res = res as u128; 480 // Truncate to target type. 481 // If that truncation loses any information, we have an overflow. 482 let truncated = self.truncate(res, layout); 483 (truncated, overflow || self.sign_extend(truncated, layout) != res) 484 } 485 }; 486 Ok((Scalar::from_uint(res, layout.size), overflow, layout.ty)) 487 } 488 } 489 } 490 unary_op( &self, un_op: mir::UnOp, val: &ImmTy<'tcx, M::Provenance>, ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>>491 pub fn unary_op( 492 &self, 493 un_op: mir::UnOp, 494 val: &ImmTy<'tcx, M::Provenance>, 495 ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> { 496 let (val, _overflow, ty) = self.overflowing_unary_op(un_op, val)?; 497 Ok(ImmTy::from_scalar(val, self.layout_of(ty)?)) 498 } 499 } 500