1 //! Compiler intrinsics.
2 //!
3 //! The corresponding definitions are in <https://github.com/rust-lang/rust/blob/master/compiler/rustc_codegen_llvm/src/intrinsic.rs>.
4 //! The corresponding const implementations are in <https://github.com/rust-lang/rust/blob/master/compiler/rustc_const_eval/src/interpret/intrinsics.rs>.
5 //!
6 //! # Const intrinsics
7 //!
8 //! Note: any changes to the constness of intrinsics should be discussed with the language team.
9 //! This includes changes in the stability of the constness.
10 //!
11 //! In order to make an intrinsic usable at compile-time, one needs to copy the implementation
12 //! from <https://github.com/rust-lang/miri/blob/master/src/shims/intrinsics.rs> to
13 //! <https://github.com/rust-lang/rust/blob/master/compiler/rustc_const_eval/src/interpret/intrinsics.rs> and add a
14 //! `#[rustc_const_unstable(feature = "const_such_and_such", issue = "01234")]` to the intrinsic declaration.
15 //!
16 //! If an intrinsic is supposed to be used from a `const fn` with a `rustc_const_stable` attribute,
17 //! the intrinsic's attribute must be `rustc_const_stable`, too. Such a change should not be done
18 //! without T-lang consultation, because it bakes a feature into the language that cannot be
19 //! replicated in user code without compiler support.
20 //!
21 //! # Volatiles
22 //!
23 //! The volatile intrinsics provide operations intended to act on I/O
24 //! memory, which are guaranteed to not be reordered by the compiler
25 //! across other volatile intrinsics. See the LLVM documentation on
26 //! [[volatile]].
27 //!
28 //! [volatile]: https://llvm.org/docs/LangRef.html#volatile-memory-accesses
29 //!
30 //! # Atomics
31 //!
32 //! The atomic intrinsics provide common atomic operations on machine
33 //! words, with multiple possible memory orderings. They obey the same
34 //! semantics as C++11. See the LLVM documentation on [[atomics]].
35 //!
36 //! [atomics]: https://llvm.org/docs/Atomics.html
37 //!
38 //! A quick refresher on memory ordering:
39 //!
40 //! * Acquire - a barrier for acquiring a lock. Subsequent reads and writes
41 //! take place after the barrier.
42 //! * Release - a barrier for releasing a lock. Preceding reads and writes
43 //! take place before the barrier.
44 //! * Sequentially consistent - sequentially consistent operations are
45 //! guaranteed to happen in order. This is the standard mode for working
46 //! with atomic types and is equivalent to Java's `volatile`.
47
48 #![unstable(
49 feature = "core_intrinsics",
50 reason = "intrinsics are unlikely to ever be stabilized, instead \
51 they should be used through stabilized interfaces \
52 in the rest of the standard library",
53 issue = "none"
54 )]
55 #![allow(missing_docs)]
56
57 use crate::marker::DiscriminantKind;
58 use crate::marker::Tuple;
59 use crate::mem;
60
61 pub mod mir;
62
63 // These imports are used for simplifying intra-doc links
64 #[allow(unused_imports)]
65 #[cfg(all(target_has_atomic = "8", target_has_atomic = "32", target_has_atomic = "ptr"))]
66 use crate::sync::atomic::{self, AtomicBool, AtomicI32, AtomicIsize, AtomicU32, Ordering};
67
68 #[stable(feature = "drop_in_place", since = "1.8.0")]
69 #[rustc_allowed_through_unstable_modules]
70 #[deprecated(note = "no longer an intrinsic - use `ptr::drop_in_place` directly", since = "1.52.0")]
71 #[inline]
drop_in_place<T: ?Sized>(to_drop: *mut T)72 pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) {
73 // SAFETY: see `ptr::drop_in_place`
74 unsafe { crate::ptr::drop_in_place(to_drop) }
75 }
76
77 extern "rust-intrinsic" {
78 // N.B., these intrinsics take raw pointers because they mutate aliased
79 // memory, which is not valid for either `&` or `&mut`.
80
81 /// Stores a value if the current value is the same as the `old` value.
82 ///
83 /// The stabilized version of this intrinsic is available on the
84 /// [`atomic`] types via the `compare_exchange` method by passing
85 /// [`Ordering::Relaxed`] as both the success and failure parameters.
86 /// For example, [`AtomicBool::compare_exchange`].
87 #[rustc_nounwind]
atomic_cxchg_relaxed_relaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool)88 pub fn atomic_cxchg_relaxed_relaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
89 /// Stores a value if the current value is the same as the `old` value.
90 ///
91 /// The stabilized version of this intrinsic is available on the
92 /// [`atomic`] types via the `compare_exchange` method by passing
93 /// [`Ordering::Relaxed`] and [`Ordering::Acquire`] as the success and failure parameters.
94 /// For example, [`AtomicBool::compare_exchange`].
95 #[rustc_nounwind]
atomic_cxchg_relaxed_acquire<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool)96 pub fn atomic_cxchg_relaxed_acquire<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
97 /// Stores a value if the current value is the same as the `old` value.
98 ///
99 /// The stabilized version of this intrinsic is available on the
100 /// [`atomic`] types via the `compare_exchange` method by passing
101 /// [`Ordering::Relaxed`] and [`Ordering::SeqCst`] as the success and failure parameters.
102 /// For example, [`AtomicBool::compare_exchange`].
103 #[rustc_nounwind]
atomic_cxchg_relaxed_seqcst<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool)104 pub fn atomic_cxchg_relaxed_seqcst<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
105 /// Stores a value if the current value is the same as the `old` value.
106 ///
107 /// The stabilized version of this intrinsic is available on the
108 /// [`atomic`] types via the `compare_exchange` method by passing
109 /// [`Ordering::Acquire`] and [`Ordering::Relaxed`] as the success and failure parameters.
110 /// For example, [`AtomicBool::compare_exchange`].
111 #[rustc_nounwind]
atomic_cxchg_acquire_relaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool)112 pub fn atomic_cxchg_acquire_relaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
113 /// Stores a value if the current value is the same as the `old` value.
114 ///
115 /// The stabilized version of this intrinsic is available on the
116 /// [`atomic`] types via the `compare_exchange` method by passing
117 /// [`Ordering::Acquire`] as both the success and failure parameters.
118 /// For example, [`AtomicBool::compare_exchange`].
119 #[rustc_nounwind]
atomic_cxchg_acquire_acquire<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool)120 pub fn atomic_cxchg_acquire_acquire<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
121 /// Stores a value if the current value is the same as the `old` value.
122 ///
123 /// The stabilized version of this intrinsic is available on the
124 /// [`atomic`] types via the `compare_exchange` method by passing
125 /// [`Ordering::Acquire`] and [`Ordering::SeqCst`] as the success and failure parameters.
126 /// For example, [`AtomicBool::compare_exchange`].
127 #[rustc_nounwind]
atomic_cxchg_acquire_seqcst<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool)128 pub fn atomic_cxchg_acquire_seqcst<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
129 /// Stores a value if the current value is the same as the `old` value.
130 ///
131 /// The stabilized version of this intrinsic is available on the
132 /// [`atomic`] types via the `compare_exchange` method by passing
133 /// [`Ordering::Release`] and [`Ordering::Relaxed`] as the success and failure parameters.
134 /// For example, [`AtomicBool::compare_exchange`].
135 #[rustc_nounwind]
atomic_cxchg_release_relaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool)136 pub fn atomic_cxchg_release_relaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
137 /// Stores a value if the current value is the same as the `old` value.
138 ///
139 /// The stabilized version of this intrinsic is available on the
140 /// [`atomic`] types via the `compare_exchange` method by passing
141 /// [`Ordering::Release`] and [`Ordering::Acquire`] as the success and failure parameters.
142 /// For example, [`AtomicBool::compare_exchange`].
143 #[rustc_nounwind]
atomic_cxchg_release_acquire<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool)144 pub fn atomic_cxchg_release_acquire<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
145 /// Stores a value if the current value is the same as the `old` value.
146 ///
147 /// The stabilized version of this intrinsic is available on the
148 /// [`atomic`] types via the `compare_exchange` method by passing
149 /// [`Ordering::Release`] and [`Ordering::SeqCst`] as the success and failure parameters.
150 /// For example, [`AtomicBool::compare_exchange`].
151 #[rustc_nounwind]
atomic_cxchg_release_seqcst<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool)152 pub fn atomic_cxchg_release_seqcst<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
153 /// Stores a value if the current value is the same as the `old` value.
154 ///
155 /// The stabilized version of this intrinsic is available on the
156 /// [`atomic`] types via the `compare_exchange` method by passing
157 /// [`Ordering::AcqRel`] and [`Ordering::Relaxed`] as the success and failure parameters.
158 /// For example, [`AtomicBool::compare_exchange`].
159 #[rustc_nounwind]
atomic_cxchg_acqrel_relaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool)160 pub fn atomic_cxchg_acqrel_relaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
161 /// Stores a value if the current value is the same as the `old` value.
162 ///
163 /// The stabilized version of this intrinsic is available on the
164 /// [`atomic`] types via the `compare_exchange` method by passing
165 /// [`Ordering::AcqRel`] and [`Ordering::Acquire`] as the success and failure parameters.
166 /// For example, [`AtomicBool::compare_exchange`].
167 #[rustc_nounwind]
atomic_cxchg_acqrel_acquire<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool)168 pub fn atomic_cxchg_acqrel_acquire<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
169 /// Stores a value if the current value is the same as the `old` value.
170 ///
171 /// The stabilized version of this intrinsic is available on the
172 /// [`atomic`] types via the `compare_exchange` method by passing
173 /// [`Ordering::AcqRel`] and [`Ordering::SeqCst`] as the success and failure parameters.
174 /// For example, [`AtomicBool::compare_exchange`].
175 #[rustc_nounwind]
atomic_cxchg_acqrel_seqcst<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool)176 pub fn atomic_cxchg_acqrel_seqcst<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
177 /// Stores a value if the current value is the same as the `old` value.
178 ///
179 /// The stabilized version of this intrinsic is available on the
180 /// [`atomic`] types via the `compare_exchange` method by passing
181 /// [`Ordering::SeqCst`] and [`Ordering::Relaxed`] as the success and failure parameters.
182 /// For example, [`AtomicBool::compare_exchange`].
183 #[rustc_nounwind]
atomic_cxchg_seqcst_relaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool)184 pub fn atomic_cxchg_seqcst_relaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
185 /// Stores a value if the current value is the same as the `old` value.
186 ///
187 /// The stabilized version of this intrinsic is available on the
188 /// [`atomic`] types via the `compare_exchange` method by passing
189 /// [`Ordering::SeqCst`] and [`Ordering::Acquire`] as the success and failure parameters.
190 /// For example, [`AtomicBool::compare_exchange`].
191 #[rustc_nounwind]
atomic_cxchg_seqcst_acquire<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool)192 pub fn atomic_cxchg_seqcst_acquire<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
193 /// Stores a value if the current value is the same as the `old` value.
194 ///
195 /// The stabilized version of this intrinsic is available on the
196 /// [`atomic`] types via the `compare_exchange` method by passing
197 /// [`Ordering::SeqCst`] as both the success and failure parameters.
198 /// For example, [`AtomicBool::compare_exchange`].
199 #[rustc_nounwind]
atomic_cxchg_seqcst_seqcst<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool)200 pub fn atomic_cxchg_seqcst_seqcst<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
201
202 /// Stores a value if the current value is the same as the `old` value.
203 ///
204 /// The stabilized version of this intrinsic is available on the
205 /// [`atomic`] types via the `compare_exchange_weak` method by passing
206 /// [`Ordering::Relaxed`] as both the success and failure parameters.
207 /// For example, [`AtomicBool::compare_exchange_weak`].
208 #[rustc_nounwind]
atomic_cxchgweak_relaxed_relaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool)209 pub fn atomic_cxchgweak_relaxed_relaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
210 /// Stores a value if the current value is the same as the `old` value.
211 ///
212 /// The stabilized version of this intrinsic is available on the
213 /// [`atomic`] types via the `compare_exchange_weak` method by passing
214 /// [`Ordering::Relaxed`] and [`Ordering::Acquire`] as the success and failure parameters.
215 /// For example, [`AtomicBool::compare_exchange_weak`].
216 #[rustc_nounwind]
atomic_cxchgweak_relaxed_acquire<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool)217 pub fn atomic_cxchgweak_relaxed_acquire<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
218 /// Stores a value if the current value is the same as the `old` value.
219 ///
220 /// The stabilized version of this intrinsic is available on the
221 /// [`atomic`] types via the `compare_exchange_weak` method by passing
222 /// [`Ordering::Relaxed`] and [`Ordering::SeqCst`] as the success and failure parameters.
223 /// For example, [`AtomicBool::compare_exchange_weak`].
224 #[rustc_nounwind]
atomic_cxchgweak_relaxed_seqcst<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool)225 pub fn atomic_cxchgweak_relaxed_seqcst<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
226 /// Stores a value if the current value is the same as the `old` value.
227 ///
228 /// The stabilized version of this intrinsic is available on the
229 /// [`atomic`] types via the `compare_exchange_weak` method by passing
230 /// [`Ordering::Acquire`] and [`Ordering::Relaxed`] as the success and failure parameters.
231 /// For example, [`AtomicBool::compare_exchange_weak`].
232 #[rustc_nounwind]
atomic_cxchgweak_acquire_relaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool)233 pub fn atomic_cxchgweak_acquire_relaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
234 /// Stores a value if the current value is the same as the `old` value.
235 ///
236 /// The stabilized version of this intrinsic is available on the
237 /// [`atomic`] types via the `compare_exchange_weak` method by passing
238 /// [`Ordering::Acquire`] as both the success and failure parameters.
239 /// For example, [`AtomicBool::compare_exchange_weak`].
240 #[rustc_nounwind]
atomic_cxchgweak_acquire_acquire<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool)241 pub fn atomic_cxchgweak_acquire_acquire<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
242 /// Stores a value if the current value is the same as the `old` value.
243 ///
244 /// The stabilized version of this intrinsic is available on the
245 /// [`atomic`] types via the `compare_exchange_weak` method by passing
246 /// [`Ordering::Acquire`] and [`Ordering::SeqCst`] as the success and failure parameters.
247 /// For example, [`AtomicBool::compare_exchange_weak`].
248 #[rustc_nounwind]
atomic_cxchgweak_acquire_seqcst<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool)249 pub fn atomic_cxchgweak_acquire_seqcst<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
250 /// Stores a value if the current value is the same as the `old` value.
251 ///
252 /// The stabilized version of this intrinsic is available on the
253 /// [`atomic`] types via the `compare_exchange_weak` method by passing
254 /// [`Ordering::Release`] and [`Ordering::Relaxed`] as the success and failure parameters.
255 /// For example, [`AtomicBool::compare_exchange_weak`].
256 #[rustc_nounwind]
atomic_cxchgweak_release_relaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool)257 pub fn atomic_cxchgweak_release_relaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
258 /// Stores a value if the current value is the same as the `old` value.
259 ///
260 /// The stabilized version of this intrinsic is available on the
261 /// [`atomic`] types via the `compare_exchange_weak` method by passing
262 /// [`Ordering::Release`] and [`Ordering::Acquire`] as the success and failure parameters.
263 /// For example, [`AtomicBool::compare_exchange_weak`].
264 #[rustc_nounwind]
atomic_cxchgweak_release_acquire<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool)265 pub fn atomic_cxchgweak_release_acquire<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
266 /// Stores a value if the current value is the same as the `old` value.
267 ///
268 /// The stabilized version of this intrinsic is available on the
269 /// [`atomic`] types via the `compare_exchange_weak` method by passing
270 /// [`Ordering::Release`] and [`Ordering::SeqCst`] as the success and failure parameters.
271 /// For example, [`AtomicBool::compare_exchange_weak`].
272 #[rustc_nounwind]
atomic_cxchgweak_release_seqcst<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool)273 pub fn atomic_cxchgweak_release_seqcst<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
274 /// Stores a value if the current value is the same as the `old` value.
275 ///
276 /// The stabilized version of this intrinsic is available on the
277 /// [`atomic`] types via the `compare_exchange_weak` method by passing
278 /// [`Ordering::AcqRel`] and [`Ordering::Relaxed`] as the success and failure parameters.
279 /// For example, [`AtomicBool::compare_exchange_weak`].
280 #[rustc_nounwind]
atomic_cxchgweak_acqrel_relaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool)281 pub fn atomic_cxchgweak_acqrel_relaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
282 /// Stores a value if the current value is the same as the `old` value.
283 ///
284 /// The stabilized version of this intrinsic is available on the
285 /// [`atomic`] types via the `compare_exchange_weak` method by passing
286 /// [`Ordering::AcqRel`] and [`Ordering::Acquire`] as the success and failure parameters.
287 /// For example, [`AtomicBool::compare_exchange_weak`].
288 #[rustc_nounwind]
atomic_cxchgweak_acqrel_acquire<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool)289 pub fn atomic_cxchgweak_acqrel_acquire<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
290 /// Stores a value if the current value is the same as the `old` value.
291 ///
292 /// The stabilized version of this intrinsic is available on the
293 /// [`atomic`] types via the `compare_exchange_weak` method by passing
294 /// [`Ordering::AcqRel`] and [`Ordering::SeqCst`] as the success and failure parameters.
295 /// For example, [`AtomicBool::compare_exchange_weak`].
296 #[rustc_nounwind]
atomic_cxchgweak_acqrel_seqcst<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool)297 pub fn atomic_cxchgweak_acqrel_seqcst<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
298 /// Stores a value if the current value is the same as the `old` value.
299 ///
300 /// The stabilized version of this intrinsic is available on the
301 /// [`atomic`] types via the `compare_exchange_weak` method by passing
302 /// [`Ordering::SeqCst`] and [`Ordering::Relaxed`] as the success and failure parameters.
303 /// For example, [`AtomicBool::compare_exchange_weak`].
304 #[rustc_nounwind]
atomic_cxchgweak_seqcst_relaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool)305 pub fn atomic_cxchgweak_seqcst_relaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
306 /// Stores a value if the current value is the same as the `old` value.
307 ///
308 /// The stabilized version of this intrinsic is available on the
309 /// [`atomic`] types via the `compare_exchange_weak` method by passing
310 /// [`Ordering::SeqCst`] and [`Ordering::Acquire`] as the success and failure parameters.
311 /// For example, [`AtomicBool::compare_exchange_weak`].
312 #[rustc_nounwind]
atomic_cxchgweak_seqcst_acquire<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool)313 pub fn atomic_cxchgweak_seqcst_acquire<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
314 /// Stores a value if the current value is the same as the `old` value.
315 ///
316 /// The stabilized version of this intrinsic is available on the
317 /// [`atomic`] types via the `compare_exchange_weak` method by passing
318 /// [`Ordering::SeqCst`] as both the success and failure parameters.
319 /// For example, [`AtomicBool::compare_exchange_weak`].
320 #[rustc_nounwind]
atomic_cxchgweak_seqcst_seqcst<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool)321 pub fn atomic_cxchgweak_seqcst_seqcst<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
322
323 /// Loads the current value of the pointer.
324 ///
325 /// The stabilized version of this intrinsic is available on the
326 /// [`atomic`] types via the `load` method by passing
327 /// [`Ordering::SeqCst`] as the `order`. For example, [`AtomicBool::load`].
328 #[rustc_nounwind]
atomic_load_seqcst<T: Copy>(src: *const T) -> T329 pub fn atomic_load_seqcst<T: Copy>(src: *const T) -> T;
330 /// Loads the current value of the pointer.
331 ///
332 /// The stabilized version of this intrinsic is available on the
333 /// [`atomic`] types via the `load` method by passing
334 /// [`Ordering::Acquire`] as the `order`. For example, [`AtomicBool::load`].
335 #[rustc_nounwind]
atomic_load_acquire<T: Copy>(src: *const T) -> T336 pub fn atomic_load_acquire<T: Copy>(src: *const T) -> T;
337 /// Loads the current value of the pointer.
338 ///
339 /// The stabilized version of this intrinsic is available on the
340 /// [`atomic`] types via the `load` method by passing
341 /// [`Ordering::Relaxed`] as the `order`. For example, [`AtomicBool::load`].
342 #[rustc_nounwind]
atomic_load_relaxed<T: Copy>(src: *const T) -> T343 pub fn atomic_load_relaxed<T: Copy>(src: *const T) -> T;
344 #[rustc_nounwind]
atomic_load_unordered<T: Copy>(src: *const T) -> T345 pub fn atomic_load_unordered<T: Copy>(src: *const T) -> T;
346
347 /// Stores the value at the specified memory location.
348 ///
349 /// The stabilized version of this intrinsic is available on the
350 /// [`atomic`] types via the `store` method by passing
351 /// [`Ordering::SeqCst`] as the `order`. For example, [`AtomicBool::store`].
352 #[rustc_nounwind]
atomic_store_seqcst<T: Copy>(dst: *mut T, val: T)353 pub fn atomic_store_seqcst<T: Copy>(dst: *mut T, val: T);
354 /// Stores the value at the specified memory location.
355 ///
356 /// The stabilized version of this intrinsic is available on the
357 /// [`atomic`] types via the `store` method by passing
358 /// [`Ordering::Release`] as the `order`. For example, [`AtomicBool::store`].
359 #[rustc_nounwind]
atomic_store_release<T: Copy>(dst: *mut T, val: T)360 pub fn atomic_store_release<T: Copy>(dst: *mut T, val: T);
361 /// Stores the value at the specified memory location.
362 ///
363 /// The stabilized version of this intrinsic is available on the
364 /// [`atomic`] types via the `store` method by passing
365 /// [`Ordering::Relaxed`] as the `order`. For example, [`AtomicBool::store`].
366 #[rustc_nounwind]
atomic_store_relaxed<T: Copy>(dst: *mut T, val: T)367 pub fn atomic_store_relaxed<T: Copy>(dst: *mut T, val: T);
368 #[rustc_nounwind]
atomic_store_unordered<T: Copy>(dst: *mut T, val: T)369 pub fn atomic_store_unordered<T: Copy>(dst: *mut T, val: T);
370
371 /// Stores the value at the specified memory location, returning the old value.
372 ///
373 /// The stabilized version of this intrinsic is available on the
374 /// [`atomic`] types via the `swap` method by passing
375 /// [`Ordering::SeqCst`] as the `order`. For example, [`AtomicBool::swap`].
376 #[rustc_nounwind]
atomic_xchg_seqcst<T: Copy>(dst: *mut T, src: T) -> T377 pub fn atomic_xchg_seqcst<T: Copy>(dst: *mut T, src: T) -> T;
378 /// Stores the value at the specified memory location, returning the old value.
379 ///
380 /// The stabilized version of this intrinsic is available on the
381 /// [`atomic`] types via the `swap` method by passing
382 /// [`Ordering::Acquire`] as the `order`. For example, [`AtomicBool::swap`].
383 #[rustc_nounwind]
atomic_xchg_acquire<T: Copy>(dst: *mut T, src: T) -> T384 pub fn atomic_xchg_acquire<T: Copy>(dst: *mut T, src: T) -> T;
385 /// Stores the value at the specified memory location, returning the old value.
386 ///
387 /// The stabilized version of this intrinsic is available on the
388 /// [`atomic`] types via the `swap` method by passing
389 /// [`Ordering::Release`] as the `order`. For example, [`AtomicBool::swap`].
390 #[rustc_nounwind]
atomic_xchg_release<T: Copy>(dst: *mut T, src: T) -> T391 pub fn atomic_xchg_release<T: Copy>(dst: *mut T, src: T) -> T;
392 /// Stores the value at the specified memory location, returning the old value.
393 ///
394 /// The stabilized version of this intrinsic is available on the
395 /// [`atomic`] types via the `swap` method by passing
396 /// [`Ordering::AcqRel`] as the `order`. For example, [`AtomicBool::swap`].
397 #[rustc_nounwind]
atomic_xchg_acqrel<T: Copy>(dst: *mut T, src: T) -> T398 pub fn atomic_xchg_acqrel<T: Copy>(dst: *mut T, src: T) -> T;
399 /// Stores the value at the specified memory location, returning the old value.
400 ///
401 /// The stabilized version of this intrinsic is available on the
402 /// [`atomic`] types via the `swap` method by passing
403 /// [`Ordering::Relaxed`] as the `order`. For example, [`AtomicBool::swap`].
404 #[rustc_nounwind]
atomic_xchg_relaxed<T: Copy>(dst: *mut T, src: T) -> T405 pub fn atomic_xchg_relaxed<T: Copy>(dst: *mut T, src: T) -> T;
406
407 /// Adds to the current value, returning the previous value.
408 ///
409 /// The stabilized version of this intrinsic is available on the
410 /// [`atomic`] types via the `fetch_add` method by passing
411 /// [`Ordering::SeqCst`] as the `order`. For example, [`AtomicIsize::fetch_add`].
412 #[rustc_nounwind]
atomic_xadd_seqcst<T: Copy>(dst: *mut T, src: T) -> T413 pub fn atomic_xadd_seqcst<T: Copy>(dst: *mut T, src: T) -> T;
414 /// Adds to the current value, returning the previous value.
415 ///
416 /// The stabilized version of this intrinsic is available on the
417 /// [`atomic`] types via the `fetch_add` method by passing
418 /// [`Ordering::Acquire`] as the `order`. For example, [`AtomicIsize::fetch_add`].
419 #[rustc_nounwind]
atomic_xadd_acquire<T: Copy>(dst: *mut T, src: T) -> T420 pub fn atomic_xadd_acquire<T: Copy>(dst: *mut T, src: T) -> T;
421 /// Adds to the current value, returning the previous value.
422 ///
423 /// The stabilized version of this intrinsic is available on the
424 /// [`atomic`] types via the `fetch_add` method by passing
425 /// [`Ordering::Release`] as the `order`. For example, [`AtomicIsize::fetch_add`].
426 #[rustc_nounwind]
atomic_xadd_release<T: Copy>(dst: *mut T, src: T) -> T427 pub fn atomic_xadd_release<T: Copy>(dst: *mut T, src: T) -> T;
428 /// Adds to the current value, returning the previous value.
429 ///
430 /// The stabilized version of this intrinsic is available on the
431 /// [`atomic`] types via the `fetch_add` method by passing
432 /// [`Ordering::AcqRel`] as the `order`. For example, [`AtomicIsize::fetch_add`].
433 #[rustc_nounwind]
atomic_xadd_acqrel<T: Copy>(dst: *mut T, src: T) -> T434 pub fn atomic_xadd_acqrel<T: Copy>(dst: *mut T, src: T) -> T;
435 /// Adds to the current value, returning the previous value.
436 ///
437 /// The stabilized version of this intrinsic is available on the
438 /// [`atomic`] types via the `fetch_add` method by passing
439 /// [`Ordering::Relaxed`] as the `order`. For example, [`AtomicIsize::fetch_add`].
440 #[rustc_nounwind]
atomic_xadd_relaxed<T: Copy>(dst: *mut T, src: T) -> T441 pub fn atomic_xadd_relaxed<T: Copy>(dst: *mut T, src: T) -> T;
442
443 /// Subtract from the current value, returning the previous value.
444 ///
445 /// The stabilized version of this intrinsic is available on the
446 /// [`atomic`] types via the `fetch_sub` method by passing
447 /// [`Ordering::SeqCst`] as the `order`. For example, [`AtomicIsize::fetch_sub`].
448 #[rustc_nounwind]
atomic_xsub_seqcst<T: Copy>(dst: *mut T, src: T) -> T449 pub fn atomic_xsub_seqcst<T: Copy>(dst: *mut T, src: T) -> T;
450 /// Subtract from the current value, returning the previous value.
451 ///
452 /// The stabilized version of this intrinsic is available on the
453 /// [`atomic`] types via the `fetch_sub` method by passing
454 /// [`Ordering::Acquire`] as the `order`. For example, [`AtomicIsize::fetch_sub`].
455 #[rustc_nounwind]
atomic_xsub_acquire<T: Copy>(dst: *mut T, src: T) -> T456 pub fn atomic_xsub_acquire<T: Copy>(dst: *mut T, src: T) -> T;
457 /// Subtract from the current value, returning the previous value.
458 ///
459 /// The stabilized version of this intrinsic is available on the
460 /// [`atomic`] types via the `fetch_sub` method by passing
461 /// [`Ordering::Release`] as the `order`. For example, [`AtomicIsize::fetch_sub`].
462 #[rustc_nounwind]
atomic_xsub_release<T: Copy>(dst: *mut T, src: T) -> T463 pub fn atomic_xsub_release<T: Copy>(dst: *mut T, src: T) -> T;
464 /// Subtract from the current value, returning the previous value.
465 ///
466 /// The stabilized version of this intrinsic is available on the
467 /// [`atomic`] types via the `fetch_sub` method by passing
468 /// [`Ordering::AcqRel`] as the `order`. For example, [`AtomicIsize::fetch_sub`].
469 #[rustc_nounwind]
atomic_xsub_acqrel<T: Copy>(dst: *mut T, src: T) -> T470 pub fn atomic_xsub_acqrel<T: Copy>(dst: *mut T, src: T) -> T;
471 /// Subtract from the current value, returning the previous value.
472 ///
473 /// The stabilized version of this intrinsic is available on the
474 /// [`atomic`] types via the `fetch_sub` method by passing
475 /// [`Ordering::Relaxed`] as the `order`. For example, [`AtomicIsize::fetch_sub`].
476 #[rustc_nounwind]
atomic_xsub_relaxed<T: Copy>(dst: *mut T, src: T) -> T477 pub fn atomic_xsub_relaxed<T: Copy>(dst: *mut T, src: T) -> T;
478
479 /// Bitwise and with the current value, returning the previous value.
480 ///
481 /// The stabilized version of this intrinsic is available on the
482 /// [`atomic`] types via the `fetch_and` method by passing
483 /// [`Ordering::SeqCst`] as the `order`. For example, [`AtomicBool::fetch_and`].
484 #[rustc_nounwind]
atomic_and_seqcst<T: Copy>(dst: *mut T, src: T) -> T485 pub fn atomic_and_seqcst<T: Copy>(dst: *mut T, src: T) -> T;
486 /// Bitwise and with the current value, returning the previous value.
487 ///
488 /// The stabilized version of this intrinsic is available on the
489 /// [`atomic`] types via the `fetch_and` method by passing
490 /// [`Ordering::Acquire`] as the `order`. For example, [`AtomicBool::fetch_and`].
491 #[rustc_nounwind]
atomic_and_acquire<T: Copy>(dst: *mut T, src: T) -> T492 pub fn atomic_and_acquire<T: Copy>(dst: *mut T, src: T) -> T;
493 /// Bitwise and with the current value, returning the previous value.
494 ///
495 /// The stabilized version of this intrinsic is available on the
496 /// [`atomic`] types via the `fetch_and` method by passing
497 /// [`Ordering::Release`] as the `order`. For example, [`AtomicBool::fetch_and`].
498 #[rustc_nounwind]
atomic_and_release<T: Copy>(dst: *mut T, src: T) -> T499 pub fn atomic_and_release<T: Copy>(dst: *mut T, src: T) -> T;
500 /// Bitwise and with the current value, returning the previous value.
501 ///
502 /// The stabilized version of this intrinsic is available on the
503 /// [`atomic`] types via the `fetch_and` method by passing
504 /// [`Ordering::AcqRel`] as the `order`. For example, [`AtomicBool::fetch_and`].
505 #[rustc_nounwind]
atomic_and_acqrel<T: Copy>(dst: *mut T, src: T) -> T506 pub fn atomic_and_acqrel<T: Copy>(dst: *mut T, src: T) -> T;
507 /// Bitwise and with the current value, returning the previous value.
508 ///
509 /// The stabilized version of this intrinsic is available on the
510 /// [`atomic`] types via the `fetch_and` method by passing
511 /// [`Ordering::Relaxed`] as the `order`. For example, [`AtomicBool::fetch_and`].
512 #[rustc_nounwind]
atomic_and_relaxed<T: Copy>(dst: *mut T, src: T) -> T513 pub fn atomic_and_relaxed<T: Copy>(dst: *mut T, src: T) -> T;
514
515 /// Bitwise nand with the current value, returning the previous value.
516 ///
517 /// The stabilized version of this intrinsic is available on the
518 /// [`AtomicBool`] type via the `fetch_nand` method by passing
519 /// [`Ordering::SeqCst`] as the `order`. For example, [`AtomicBool::fetch_nand`].
520 #[rustc_nounwind]
atomic_nand_seqcst<T: Copy>(dst: *mut T, src: T) -> T521 pub fn atomic_nand_seqcst<T: Copy>(dst: *mut T, src: T) -> T;
522 /// Bitwise nand with the current value, returning the previous value.
523 ///
524 /// The stabilized version of this intrinsic is available on the
525 /// [`AtomicBool`] type via the `fetch_nand` method by passing
526 /// [`Ordering::Acquire`] as the `order`. For example, [`AtomicBool::fetch_nand`].
527 #[rustc_nounwind]
atomic_nand_acquire<T: Copy>(dst: *mut T, src: T) -> T528 pub fn atomic_nand_acquire<T: Copy>(dst: *mut T, src: T) -> T;
529 /// Bitwise nand with the current value, returning the previous value.
530 ///
531 /// The stabilized version of this intrinsic is available on the
532 /// [`AtomicBool`] type via the `fetch_nand` method by passing
533 /// [`Ordering::Release`] as the `order`. For example, [`AtomicBool::fetch_nand`].
534 #[rustc_nounwind]
atomic_nand_release<T: Copy>(dst: *mut T, src: T) -> T535 pub fn atomic_nand_release<T: Copy>(dst: *mut T, src: T) -> T;
536 /// Bitwise nand with the current value, returning the previous value.
537 ///
538 /// The stabilized version of this intrinsic is available on the
539 /// [`AtomicBool`] type via the `fetch_nand` method by passing
540 /// [`Ordering::AcqRel`] as the `order`. For example, [`AtomicBool::fetch_nand`].
541 #[rustc_nounwind]
atomic_nand_acqrel<T: Copy>(dst: *mut T, src: T) -> T542 pub fn atomic_nand_acqrel<T: Copy>(dst: *mut T, src: T) -> T;
543 /// Bitwise nand with the current value, returning the previous value.
544 ///
545 /// The stabilized version of this intrinsic is available on the
546 /// [`AtomicBool`] type via the `fetch_nand` method by passing
547 /// [`Ordering::Relaxed`] as the `order`. For example, [`AtomicBool::fetch_nand`].
548 #[rustc_nounwind]
atomic_nand_relaxed<T: Copy>(dst: *mut T, src: T) -> T549 pub fn atomic_nand_relaxed<T: Copy>(dst: *mut T, src: T) -> T;
550
551 /// Bitwise or with the current value, returning the previous value.
552 ///
553 /// The stabilized version of this intrinsic is available on the
554 /// [`atomic`] types via the `fetch_or` method by passing
555 /// [`Ordering::SeqCst`] as the `order`. For example, [`AtomicBool::fetch_or`].
556 #[rustc_nounwind]
atomic_or_seqcst<T: Copy>(dst: *mut T, src: T) -> T557 pub fn atomic_or_seqcst<T: Copy>(dst: *mut T, src: T) -> T;
558 /// Bitwise or with the current value, returning the previous value.
559 ///
560 /// The stabilized version of this intrinsic is available on the
561 /// [`atomic`] types via the `fetch_or` method by passing
562 /// [`Ordering::Acquire`] as the `order`. For example, [`AtomicBool::fetch_or`].
563 #[rustc_nounwind]
atomic_or_acquire<T: Copy>(dst: *mut T, src: T) -> T564 pub fn atomic_or_acquire<T: Copy>(dst: *mut T, src: T) -> T;
565 /// Bitwise or with the current value, returning the previous value.
566 ///
567 /// The stabilized version of this intrinsic is available on the
568 /// [`atomic`] types via the `fetch_or` method by passing
569 /// [`Ordering::Release`] as the `order`. For example, [`AtomicBool::fetch_or`].
570 #[rustc_nounwind]
atomic_or_release<T: Copy>(dst: *mut T, src: T) -> T571 pub fn atomic_or_release<T: Copy>(dst: *mut T, src: T) -> T;
572 /// Bitwise or with the current value, returning the previous value.
573 ///
574 /// The stabilized version of this intrinsic is available on the
575 /// [`atomic`] types via the `fetch_or` method by passing
576 /// [`Ordering::AcqRel`] as the `order`. For example, [`AtomicBool::fetch_or`].
577 #[rustc_nounwind]
atomic_or_acqrel<T: Copy>(dst: *mut T, src: T) -> T578 pub fn atomic_or_acqrel<T: Copy>(dst: *mut T, src: T) -> T;
579 /// Bitwise or with the current value, returning the previous value.
580 ///
581 /// The stabilized version of this intrinsic is available on the
582 /// [`atomic`] types via the `fetch_or` method by passing
583 /// [`Ordering::Relaxed`] as the `order`. For example, [`AtomicBool::fetch_or`].
584 #[rustc_nounwind]
atomic_or_relaxed<T: Copy>(dst: *mut T, src: T) -> T585 pub fn atomic_or_relaxed<T: Copy>(dst: *mut T, src: T) -> T;
586
587 /// Bitwise xor with the current value, returning the previous value.
588 ///
589 /// The stabilized version of this intrinsic is available on the
590 /// [`atomic`] types via the `fetch_xor` method by passing
591 /// [`Ordering::SeqCst`] as the `order`. For example, [`AtomicBool::fetch_xor`].
592 #[rustc_nounwind]
atomic_xor_seqcst<T: Copy>(dst: *mut T, src: T) -> T593 pub fn atomic_xor_seqcst<T: Copy>(dst: *mut T, src: T) -> T;
594 /// Bitwise xor with the current value, returning the previous value.
595 ///
596 /// The stabilized version of this intrinsic is available on the
597 /// [`atomic`] types via the `fetch_xor` method by passing
598 /// [`Ordering::Acquire`] as the `order`. For example, [`AtomicBool::fetch_xor`].
599 #[rustc_nounwind]
atomic_xor_acquire<T: Copy>(dst: *mut T, src: T) -> T600 pub fn atomic_xor_acquire<T: Copy>(dst: *mut T, src: T) -> T;
601 /// Bitwise xor with the current value, returning the previous value.
602 ///
603 /// The stabilized version of this intrinsic is available on the
604 /// [`atomic`] types via the `fetch_xor` method by passing
605 /// [`Ordering::Release`] as the `order`. For example, [`AtomicBool::fetch_xor`].
606 #[rustc_nounwind]
atomic_xor_release<T: Copy>(dst: *mut T, src: T) -> T607 pub fn atomic_xor_release<T: Copy>(dst: *mut T, src: T) -> T;
608 /// Bitwise xor with the current value, returning the previous value.
609 ///
610 /// The stabilized version of this intrinsic is available on the
611 /// [`atomic`] types via the `fetch_xor` method by passing
612 /// [`Ordering::AcqRel`] as the `order`. For example, [`AtomicBool::fetch_xor`].
613 #[rustc_nounwind]
atomic_xor_acqrel<T: Copy>(dst: *mut T, src: T) -> T614 pub fn atomic_xor_acqrel<T: Copy>(dst: *mut T, src: T) -> T;
615 /// Bitwise xor with the current value, returning the previous value.
616 ///
617 /// The stabilized version of this intrinsic is available on the
618 /// [`atomic`] types via the `fetch_xor` method by passing
619 /// [`Ordering::Relaxed`] as the `order`. For example, [`AtomicBool::fetch_xor`].
620 #[rustc_nounwind]
atomic_xor_relaxed<T: Copy>(dst: *mut T, src: T) -> T621 pub fn atomic_xor_relaxed<T: Copy>(dst: *mut T, src: T) -> T;
622
623 /// Maximum with the current value using a signed comparison.
624 ///
625 /// The stabilized version of this intrinsic is available on the
626 /// [`atomic`] signed integer types via the `fetch_max` method by passing
627 /// [`Ordering::SeqCst`] as the `order`. For example, [`AtomicI32::fetch_max`].
628 #[rustc_nounwind]
atomic_max_seqcst<T: Copy>(dst: *mut T, src: T) -> T629 pub fn atomic_max_seqcst<T: Copy>(dst: *mut T, src: T) -> T;
630 /// Maximum with the current value using a signed comparison.
631 ///
632 /// The stabilized version of this intrinsic is available on the
633 /// [`atomic`] signed integer types via the `fetch_max` method by passing
634 /// [`Ordering::Acquire`] as the `order`. For example, [`AtomicI32::fetch_max`].
635 #[rustc_nounwind]
atomic_max_acquire<T: Copy>(dst: *mut T, src: T) -> T636 pub fn atomic_max_acquire<T: Copy>(dst: *mut T, src: T) -> T;
637 /// Maximum with the current value using a signed comparison.
638 ///
639 /// The stabilized version of this intrinsic is available on the
640 /// [`atomic`] signed integer types via the `fetch_max` method by passing
641 /// [`Ordering::Release`] as the `order`. For example, [`AtomicI32::fetch_max`].
642 #[rustc_nounwind]
atomic_max_release<T: Copy>(dst: *mut T, src: T) -> T643 pub fn atomic_max_release<T: Copy>(dst: *mut T, src: T) -> T;
644 /// Maximum with the current value using a signed comparison.
645 ///
646 /// The stabilized version of this intrinsic is available on the
647 /// [`atomic`] signed integer types via the `fetch_max` method by passing
648 /// [`Ordering::AcqRel`] as the `order`. For example, [`AtomicI32::fetch_max`].
649 #[rustc_nounwind]
atomic_max_acqrel<T: Copy>(dst: *mut T, src: T) -> T650 pub fn atomic_max_acqrel<T: Copy>(dst: *mut T, src: T) -> T;
651 /// Maximum with the current value.
652 ///
653 /// The stabilized version of this intrinsic is available on the
654 /// [`atomic`] signed integer types via the `fetch_max` method by passing
655 /// [`Ordering::Relaxed`] as the `order`. For example, [`AtomicI32::fetch_max`].
656 #[rustc_nounwind]
atomic_max_relaxed<T: Copy>(dst: *mut T, src: T) -> T657 pub fn atomic_max_relaxed<T: Copy>(dst: *mut T, src: T) -> T;
658
659 /// Minimum with the current value using a signed comparison.
660 ///
661 /// The stabilized version of this intrinsic is available on the
662 /// [`atomic`] signed integer types via the `fetch_min` method by passing
663 /// [`Ordering::SeqCst`] as the `order`. For example, [`AtomicI32::fetch_min`].
664 #[rustc_nounwind]
atomic_min_seqcst<T: Copy>(dst: *mut T, src: T) -> T665 pub fn atomic_min_seqcst<T: Copy>(dst: *mut T, src: T) -> T;
666 /// Minimum with the current value using a signed comparison.
667 ///
668 /// The stabilized version of this intrinsic is available on the
669 /// [`atomic`] signed integer types via the `fetch_min` method by passing
670 /// [`Ordering::Acquire`] as the `order`. For example, [`AtomicI32::fetch_min`].
671 #[rustc_nounwind]
atomic_min_acquire<T: Copy>(dst: *mut T, src: T) -> T672 pub fn atomic_min_acquire<T: Copy>(dst: *mut T, src: T) -> T;
673 /// Minimum with the current value using a signed comparison.
674 ///
675 /// The stabilized version of this intrinsic is available on the
676 /// [`atomic`] signed integer types via the `fetch_min` method by passing
677 /// [`Ordering::Release`] as the `order`. For example, [`AtomicI32::fetch_min`].
678 #[rustc_nounwind]
atomic_min_release<T: Copy>(dst: *mut T, src: T) -> T679 pub fn atomic_min_release<T: Copy>(dst: *mut T, src: T) -> T;
680 /// Minimum with the current value using a signed comparison.
681 ///
682 /// The stabilized version of this intrinsic is available on the
683 /// [`atomic`] signed integer types via the `fetch_min` method by passing
684 /// [`Ordering::AcqRel`] as the `order`. For example, [`AtomicI32::fetch_min`].
atomic_min_acqrel<T: Copy>(dst: *mut T, src: T) -> T685 pub fn atomic_min_acqrel<T: Copy>(dst: *mut T, src: T) -> T;
686 /// Minimum with the current value using a signed comparison.
687 ///
688 /// The stabilized version of this intrinsic is available on the
689 /// [`atomic`] signed integer types via the `fetch_min` method by passing
690 /// [`Ordering::Relaxed`] as the `order`. For example, [`AtomicI32::fetch_min`].
691 #[rustc_nounwind]
atomic_min_relaxed<T: Copy>(dst: *mut T, src: T) -> T692 pub fn atomic_min_relaxed<T: Copy>(dst: *mut T, src: T) -> T;
693
694 /// Minimum with the current value using an unsigned comparison.
695 ///
696 /// The stabilized version of this intrinsic is available on the
697 /// [`atomic`] unsigned integer types via the `fetch_min` method by passing
698 /// [`Ordering::SeqCst`] as the `order`. For example, [`AtomicU32::fetch_min`].
699 #[rustc_nounwind]
atomic_umin_seqcst<T: Copy>(dst: *mut T, src: T) -> T700 pub fn atomic_umin_seqcst<T: Copy>(dst: *mut T, src: T) -> T;
701 /// Minimum with the current value using an unsigned comparison.
702 ///
703 /// The stabilized version of this intrinsic is available on the
704 /// [`atomic`] unsigned integer types via the `fetch_min` method by passing
705 /// [`Ordering::Acquire`] as the `order`. For example, [`AtomicU32::fetch_min`].
706 #[rustc_nounwind]
atomic_umin_acquire<T: Copy>(dst: *mut T, src: T) -> T707 pub fn atomic_umin_acquire<T: Copy>(dst: *mut T, src: T) -> T;
708 /// Minimum with the current value using an unsigned comparison.
709 ///
710 /// The stabilized version of this intrinsic is available on the
711 /// [`atomic`] unsigned integer types via the `fetch_min` method by passing
712 /// [`Ordering::Release`] as the `order`. For example, [`AtomicU32::fetch_min`].
713 #[rustc_nounwind]
atomic_umin_release<T: Copy>(dst: *mut T, src: T) -> T714 pub fn atomic_umin_release<T: Copy>(dst: *mut T, src: T) -> T;
715 /// Minimum with the current value using an unsigned comparison.
716 ///
717 /// The stabilized version of this intrinsic is available on the
718 /// [`atomic`] unsigned integer types via the `fetch_min` method by passing
719 /// [`Ordering::AcqRel`] as the `order`. For example, [`AtomicU32::fetch_min`].
720 #[rustc_nounwind]
atomic_umin_acqrel<T: Copy>(dst: *mut T, src: T) -> T721 pub fn atomic_umin_acqrel<T: Copy>(dst: *mut T, src: T) -> T;
722 /// Minimum with the current value using an unsigned comparison.
723 ///
724 /// The stabilized version of this intrinsic is available on the
725 /// [`atomic`] unsigned integer types via the `fetch_min` method by passing
726 /// [`Ordering::Relaxed`] as the `order`. For example, [`AtomicU32::fetch_min`].
727 #[rustc_nounwind]
atomic_umin_relaxed<T: Copy>(dst: *mut T, src: T) -> T728 pub fn atomic_umin_relaxed<T: Copy>(dst: *mut T, src: T) -> T;
729
730 /// Maximum with the current value using an unsigned comparison.
731 ///
732 /// The stabilized version of this intrinsic is available on the
733 /// [`atomic`] unsigned integer types via the `fetch_max` method by passing
734 /// [`Ordering::SeqCst`] as the `order`. For example, [`AtomicU32::fetch_max`].
735 #[rustc_nounwind]
atomic_umax_seqcst<T: Copy>(dst: *mut T, src: T) -> T736 pub fn atomic_umax_seqcst<T: Copy>(dst: *mut T, src: T) -> T;
737 /// Maximum with the current value using an unsigned comparison.
738 ///
739 /// The stabilized version of this intrinsic is available on the
740 /// [`atomic`] unsigned integer types via the `fetch_max` method by passing
741 /// [`Ordering::Acquire`] as the `order`. For example, [`AtomicU32::fetch_max`].
742 #[rustc_nounwind]
atomic_umax_acquire<T: Copy>(dst: *mut T, src: T) -> T743 pub fn atomic_umax_acquire<T: Copy>(dst: *mut T, src: T) -> T;
744 /// Maximum with the current value using an unsigned comparison.
745 ///
746 /// The stabilized version of this intrinsic is available on the
747 /// [`atomic`] unsigned integer types via the `fetch_max` method by passing
748 /// [`Ordering::Release`] as the `order`. For example, [`AtomicU32::fetch_max`].
749 #[rustc_nounwind]
atomic_umax_release<T: Copy>(dst: *mut T, src: T) -> T750 pub fn atomic_umax_release<T: Copy>(dst: *mut T, src: T) -> T;
751 /// Maximum with the current value using an unsigned comparison.
752 ///
753 /// The stabilized version of this intrinsic is available on the
754 /// [`atomic`] unsigned integer types via the `fetch_max` method by passing
755 /// [`Ordering::AcqRel`] as the `order`. For example, [`AtomicU32::fetch_max`].
756 #[rustc_nounwind]
atomic_umax_acqrel<T: Copy>(dst: *mut T, src: T) -> T757 pub fn atomic_umax_acqrel<T: Copy>(dst: *mut T, src: T) -> T;
758 /// Maximum with the current value using an unsigned comparison.
759 ///
760 /// The stabilized version of this intrinsic is available on the
761 /// [`atomic`] unsigned integer types via the `fetch_max` method by passing
762 /// [`Ordering::Relaxed`] as the `order`. For example, [`AtomicU32::fetch_max`].
763 #[rustc_nounwind]
atomic_umax_relaxed<T: Copy>(dst: *mut T, src: T) -> T764 pub fn atomic_umax_relaxed<T: Copy>(dst: *mut T, src: T) -> T;
765
766 /// An atomic fence.
767 ///
768 /// The stabilized version of this intrinsic is available in
769 /// [`atomic::fence`] by passing [`Ordering::SeqCst`]
770 /// as the `order`.
771 #[rustc_nounwind]
atomic_fence_seqcst()772 pub fn atomic_fence_seqcst();
773 /// An atomic fence.
774 ///
775 /// The stabilized version of this intrinsic is available in
776 /// [`atomic::fence`] by passing [`Ordering::Acquire`]
777 /// as the `order`.
778 #[rustc_nounwind]
atomic_fence_acquire()779 pub fn atomic_fence_acquire();
780 /// An atomic fence.
781 ///
782 /// The stabilized version of this intrinsic is available in
783 /// [`atomic::fence`] by passing [`Ordering::Release`]
784 /// as the `order`.
785 #[rustc_nounwind]
atomic_fence_release()786 pub fn atomic_fence_release();
787 /// An atomic fence.
788 ///
789 /// The stabilized version of this intrinsic is available in
790 /// [`atomic::fence`] by passing [`Ordering::AcqRel`]
791 /// as the `order`.
792 #[rustc_nounwind]
atomic_fence_acqrel()793 pub fn atomic_fence_acqrel();
794
795 /// A compiler-only memory barrier.
796 ///
797 /// Memory accesses will never be reordered across this barrier by the
798 /// compiler, but no instructions will be emitted for it. This is
799 /// appropriate for operations on the same thread that may be preempted,
800 /// such as when interacting with signal handlers.
801 ///
802 /// The stabilized version of this intrinsic is available in
803 /// [`atomic::compiler_fence`] by passing [`Ordering::SeqCst`]
804 /// as the `order`.
805 #[rustc_nounwind]
atomic_singlethreadfence_seqcst()806 pub fn atomic_singlethreadfence_seqcst();
807 /// A compiler-only memory barrier.
808 ///
809 /// Memory accesses will never be reordered across this barrier by the
810 /// compiler, but no instructions will be emitted for it. This is
811 /// appropriate for operations on the same thread that may be preempted,
812 /// such as when interacting with signal handlers.
813 ///
814 /// The stabilized version of this intrinsic is available in
815 /// [`atomic::compiler_fence`] by passing [`Ordering::Acquire`]
816 /// as the `order`.
817 #[rustc_nounwind]
atomic_singlethreadfence_acquire()818 pub fn atomic_singlethreadfence_acquire();
819 /// A compiler-only memory barrier.
820 ///
821 /// Memory accesses will never be reordered across this barrier by the
822 /// compiler, but no instructions will be emitted for it. This is
823 /// appropriate for operations on the same thread that may be preempted,
824 /// such as when interacting with signal handlers.
825 ///
826 /// The stabilized version of this intrinsic is available in
827 /// [`atomic::compiler_fence`] by passing [`Ordering::Release`]
828 /// as the `order`.
829 #[rustc_nounwind]
atomic_singlethreadfence_release()830 pub fn atomic_singlethreadfence_release();
831 /// A compiler-only memory barrier.
832 ///
833 /// Memory accesses will never be reordered across this barrier by the
834 /// compiler, but no instructions will be emitted for it. This is
835 /// appropriate for operations on the same thread that may be preempted,
836 /// such as when interacting with signal handlers.
837 ///
838 /// The stabilized version of this intrinsic is available in
839 /// [`atomic::compiler_fence`] by passing [`Ordering::AcqRel`]
840 /// as the `order`.
841 #[rustc_nounwind]
atomic_singlethreadfence_acqrel()842 pub fn atomic_singlethreadfence_acqrel();
843
844 /// The `prefetch` intrinsic is a hint to the code generator to insert a prefetch instruction
845 /// if supported; otherwise, it is a no-op.
846 /// Prefetches have no effect on the behavior of the program but can change its performance
847 /// characteristics.
848 ///
849 /// The `locality` argument must be a constant integer and is a temporal locality specifier
850 /// ranging from (0) - no locality, to (3) - extremely local keep in cache.
851 ///
852 /// This intrinsic does not have a stable counterpart.
853 #[rustc_nounwind]
prefetch_read_data<T>(data: *const T, locality: i32)854 pub fn prefetch_read_data<T>(data: *const T, locality: i32);
855 /// The `prefetch` intrinsic is a hint to the code generator to insert a prefetch instruction
856 /// if supported; otherwise, it is a no-op.
857 /// Prefetches have no effect on the behavior of the program but can change its performance
858 /// characteristics.
859 ///
860 /// The `locality` argument must be a constant integer and is a temporal locality specifier
861 /// ranging from (0) - no locality, to (3) - extremely local keep in cache.
862 ///
863 /// This intrinsic does not have a stable counterpart.
864 #[rustc_nounwind]
prefetch_write_data<T>(data: *const T, locality: i32)865 pub fn prefetch_write_data<T>(data: *const T, locality: i32);
866 /// The `prefetch` intrinsic is a hint to the code generator to insert a prefetch instruction
867 /// if supported; otherwise, it is a no-op.
868 /// Prefetches have no effect on the behavior of the program but can change its performance
869 /// characteristics.
870 ///
871 /// The `locality` argument must be a constant integer and is a temporal locality specifier
872 /// ranging from (0) - no locality, to (3) - extremely local keep in cache.
873 ///
874 /// This intrinsic does not have a stable counterpart.
875 #[rustc_nounwind]
prefetch_read_instruction<T>(data: *const T, locality: i32)876 pub fn prefetch_read_instruction<T>(data: *const T, locality: i32);
877 /// The `prefetch` intrinsic is a hint to the code generator to insert a prefetch instruction
878 /// if supported; otherwise, it is a no-op.
879 /// Prefetches have no effect on the behavior of the program but can change its performance
880 /// characteristics.
881 ///
882 /// The `locality` argument must be a constant integer and is a temporal locality specifier
883 /// ranging from (0) - no locality, to (3) - extremely local keep in cache.
884 ///
885 /// This intrinsic does not have a stable counterpart.
886 #[rustc_nounwind]
prefetch_write_instruction<T>(data: *const T, locality: i32)887 pub fn prefetch_write_instruction<T>(data: *const T, locality: i32);
888
889 /// Magic intrinsic that derives its meaning from attributes
890 /// attached to the function.
891 ///
892 /// For example, dataflow uses this to inject static assertions so
893 /// that `rustc_peek(potentially_uninitialized)` would actually
894 /// double-check that dataflow did indeed compute that it is
895 /// uninitialized at that point in the control flow.
896 ///
897 /// This intrinsic should not be used outside of the compiler.
898 #[rustc_safe_intrinsic]
899 #[rustc_nounwind]
rustc_peek<T>(_: T) -> T900 pub fn rustc_peek<T>(_: T) -> T;
901
902 /// Aborts the execution of the process.
903 ///
904 /// Note that, unlike most intrinsics, this is safe to call;
905 /// it does not require an `unsafe` block.
906 /// Therefore, implementations must not require the user to uphold
907 /// any safety invariants.
908 ///
909 /// [`std::process::abort`](../../std/process/fn.abort.html) is to be preferred if possible,
910 /// as its behavior is more user-friendly and more stable.
911 ///
912 /// The current implementation of `intrinsics::abort` is to invoke an invalid instruction,
913 /// on most platforms.
914 /// On Unix, the
915 /// process will probably terminate with a signal like `SIGABRT`, `SIGILL`, `SIGTRAP`, `SIGSEGV` or
916 /// `SIGBUS`. The precise behaviour is not guaranteed and not stable.
917 #[rustc_safe_intrinsic]
918 #[rustc_nounwind]
abort() -> !919 pub fn abort() -> !;
920
921 /// Informs the optimizer that this point in the code is not reachable,
922 /// enabling further optimizations.
923 ///
924 /// N.B., this is very different from the `unreachable!()` macro: Unlike the
925 /// macro, which panics when it is executed, it is *undefined behavior* to
926 /// reach code marked with this function.
927 ///
928 /// The stabilized version of this intrinsic is [`core::hint::unreachable_unchecked`].
929 #[rustc_const_stable(feature = "const_unreachable_unchecked", since = "1.57.0")]
930 #[rustc_nounwind]
unreachable() -> !931 pub fn unreachable() -> !;
932
933 /// Informs the optimizer that a condition is always true.
934 /// If the condition is false, the behavior is undefined.
935 ///
936 /// No code is generated for this intrinsic, but the optimizer will try
937 /// to preserve it (and its condition) between passes, which may interfere
938 /// with optimization of surrounding code and reduce performance. It should
939 /// not be used if the invariant can be discovered by the optimizer on its
940 /// own, or if it does not enable any significant optimizations.
941 ///
942 /// This intrinsic does not have a stable counterpart.
943 #[rustc_const_unstable(feature = "const_assume", issue = "76972")]
944 #[rustc_nounwind]
assume(b: bool)945 pub fn assume(b: bool);
946
947 /// Hints to the compiler that branch condition is likely to be true.
948 /// Returns the value passed to it.
949 ///
950 /// Any use other than with `if` statements will probably not have an effect.
951 ///
952 /// Note that, unlike most intrinsics, this is safe to call;
953 /// it does not require an `unsafe` block.
954 /// Therefore, implementations must not require the user to uphold
955 /// any safety invariants.
956 ///
957 /// This intrinsic does not have a stable counterpart.
958 #[rustc_const_unstable(feature = "const_likely", issue = "none")]
959 #[rustc_safe_intrinsic]
960 #[rustc_nounwind]
likely(b: bool) -> bool961 pub fn likely(b: bool) -> bool;
962
963 /// Hints to the compiler that branch condition is likely to be false.
964 /// Returns the value passed to it.
965 ///
966 /// Any use other than with `if` statements will probably not have an effect.
967 ///
968 /// Note that, unlike most intrinsics, this is safe to call;
969 /// it does not require an `unsafe` block.
970 /// Therefore, implementations must not require the user to uphold
971 /// any safety invariants.
972 ///
973 /// This intrinsic does not have a stable counterpart.
974 #[rustc_const_unstable(feature = "const_likely", issue = "none")]
975 #[rustc_safe_intrinsic]
976 #[rustc_nounwind]
unlikely(b: bool) -> bool977 pub fn unlikely(b: bool) -> bool;
978
979 /// Executes a breakpoint trap, for inspection by a debugger.
980 ///
981 /// This intrinsic does not have a stable counterpart.
982 #[rustc_nounwind]
breakpoint()983 pub fn breakpoint();
984
985 /// The size of a type in bytes.
986 ///
987 /// Note that, unlike most intrinsics, this is safe to call;
988 /// it does not require an `unsafe` block.
989 /// Therefore, implementations must not require the user to uphold
990 /// any safety invariants.
991 ///
992 /// More specifically, this is the offset in bytes between successive
993 /// items of the same type, including alignment padding.
994 ///
995 /// The stabilized version of this intrinsic is [`core::mem::size_of`].
996 #[rustc_const_stable(feature = "const_size_of", since = "1.40.0")]
997 #[rustc_safe_intrinsic]
998 #[rustc_nounwind]
size_of<T>() -> usize999 pub fn size_of<T>() -> usize;
1000
1001 /// The minimum alignment of a type.
1002 ///
1003 /// Note that, unlike most intrinsics, this is safe to call;
1004 /// it does not require an `unsafe` block.
1005 /// Therefore, implementations must not require the user to uphold
1006 /// any safety invariants.
1007 ///
1008 /// The stabilized version of this intrinsic is [`core::mem::align_of`].
1009 #[rustc_const_stable(feature = "const_min_align_of", since = "1.40.0")]
1010 #[rustc_safe_intrinsic]
1011 #[rustc_nounwind]
min_align_of<T>() -> usize1012 pub fn min_align_of<T>() -> usize;
1013 /// The preferred alignment of a type.
1014 ///
1015 /// This intrinsic does not have a stable counterpart.
1016 /// It's "tracking issue" is [#91971](https://github.com/rust-lang/rust/issues/91971).
1017 #[rustc_const_unstable(feature = "const_pref_align_of", issue = "91971")]
1018 #[rustc_nounwind]
pref_align_of<T>() -> usize1019 pub fn pref_align_of<T>() -> usize;
1020
1021 /// The size of the referenced value in bytes.
1022 ///
1023 /// The stabilized version of this intrinsic is [`mem::size_of_val`].
1024 #[rustc_const_unstable(feature = "const_size_of_val", issue = "46571")]
1025 #[rustc_nounwind]
size_of_val<T: ?Sized>(_: *const T) -> usize1026 pub fn size_of_val<T: ?Sized>(_: *const T) -> usize;
1027 /// The required alignment of the referenced value.
1028 ///
1029 /// The stabilized version of this intrinsic is [`core::mem::align_of_val`].
1030 #[rustc_const_unstable(feature = "const_align_of_val", issue = "46571")]
1031 #[rustc_nounwind]
min_align_of_val<T: ?Sized>(_: *const T) -> usize1032 pub fn min_align_of_val<T: ?Sized>(_: *const T) -> usize;
1033
1034 /// Gets a static string slice containing the name of a type.
1035 ///
1036 /// Note that, unlike most intrinsics, this is safe to call;
1037 /// it does not require an `unsafe` block.
1038 /// Therefore, implementations must not require the user to uphold
1039 /// any safety invariants.
1040 ///
1041 /// The stabilized version of this intrinsic is [`core::any::type_name`].
1042 #[rustc_const_unstable(feature = "const_type_name", issue = "63084")]
1043 #[rustc_safe_intrinsic]
1044 #[rustc_nounwind]
type_name<T: ?Sized>() -> &'static str1045 pub fn type_name<T: ?Sized>() -> &'static str;
1046
1047 /// Gets an identifier which is globally unique to the specified type. This
1048 /// function will return the same value for a type regardless of whichever
1049 /// crate it is invoked in.
1050 ///
1051 /// Note that, unlike most intrinsics, this is safe to call;
1052 /// it does not require an `unsafe` block.
1053 /// Therefore, implementations must not require the user to uphold
1054 /// any safety invariants.
1055 ///
1056 /// The stabilized version of this intrinsic is [`core::any::TypeId::of`].
1057 #[rustc_const_unstable(feature = "const_type_id", issue = "77125")]
1058 #[rustc_safe_intrinsic]
1059 #[rustc_nounwind]
1060 #[cfg(bootstrap)]
type_id<T: ?Sized + 'static>() -> u641061 pub fn type_id<T: ?Sized + 'static>() -> u64;
1062
1063 /// Gets an identifier which is globally unique to the specified type. This
1064 /// function will return the same value for a type regardless of whichever
1065 /// crate it is invoked in.
1066 ///
1067 /// Note that, unlike most intrinsics, this is safe to call;
1068 /// it does not require an `unsafe` block.
1069 /// Therefore, implementations must not require the user to uphold
1070 /// any safety invariants.
1071 ///
1072 /// The stabilized version of this intrinsic is [`core::any::TypeId::of`].
1073 #[rustc_const_unstable(feature = "const_type_id", issue = "77125")]
1074 #[rustc_safe_intrinsic]
1075 #[rustc_nounwind]
1076 #[cfg(not(bootstrap))]
type_id<T: ?Sized + 'static>() -> u1281077 pub fn type_id<T: ?Sized + 'static>() -> u128;
1078
1079 /// A guard for unsafe functions that cannot ever be executed if `T` is uninhabited:
1080 /// This will statically either panic, or do nothing.
1081 ///
1082 /// This intrinsic does not have a stable counterpart.
1083 #[rustc_const_stable(feature = "const_assert_type", since = "1.59.0")]
1084 #[rustc_safe_intrinsic]
1085 #[rustc_nounwind]
assert_inhabited<T>()1086 pub fn assert_inhabited<T>();
1087
1088 /// A guard for unsafe functions that cannot ever be executed if `T` does not permit
1089 /// zero-initialization: This will statically either panic, or do nothing.
1090 ///
1091 /// This intrinsic does not have a stable counterpart.
1092 #[rustc_const_unstable(feature = "const_assert_type2", issue = "none")]
1093 #[rustc_safe_intrinsic]
1094 #[rustc_nounwind]
assert_zero_valid<T>()1095 pub fn assert_zero_valid<T>();
1096
1097 /// A guard for `std::mem::uninitialized`. This will statically either panic, or do nothing.
1098 ///
1099 /// This intrinsic does not have a stable counterpart.
1100 #[rustc_const_unstable(feature = "const_assert_type2", issue = "none")]
1101 #[rustc_safe_intrinsic]
1102 #[rustc_nounwind]
assert_mem_uninitialized_valid<T>()1103 pub fn assert_mem_uninitialized_valid<T>();
1104
1105 /// Gets a reference to a static `Location` indicating where it was called.
1106 ///
1107 /// Note that, unlike most intrinsics, this is safe to call;
1108 /// it does not require an `unsafe` block.
1109 /// Therefore, implementations must not require the user to uphold
1110 /// any safety invariants.
1111 ///
1112 /// Consider using [`core::panic::Location::caller`] instead.
1113 #[rustc_const_unstable(feature = "const_caller_location", issue = "76156")]
1114 #[rustc_safe_intrinsic]
1115 #[rustc_nounwind]
caller_location() -> &'static crate::panic::Location<'static>1116 pub fn caller_location() -> &'static crate::panic::Location<'static>;
1117
1118 /// Moves a value out of scope without running drop glue.
1119 ///
1120 /// This exists solely for [`mem::forget_unsized`]; normal `forget` uses
1121 /// `ManuallyDrop` instead.
1122 ///
1123 /// Note that, unlike most intrinsics, this is safe to call;
1124 /// it does not require an `unsafe` block.
1125 /// Therefore, implementations must not require the user to uphold
1126 /// any safety invariants.
1127 #[rustc_const_unstable(feature = "const_intrinsic_forget", issue = "none")]
1128 #[rustc_safe_intrinsic]
1129 #[rustc_nounwind]
forget<T: ?Sized>(_: T)1130 pub fn forget<T: ?Sized>(_: T);
1131
1132 /// Reinterprets the bits of a value of one type as another type.
1133 ///
1134 /// Both types must have the same size. Compilation will fail if this is not guaranteed.
1135 ///
1136 /// `transmute` is semantically equivalent to a bitwise move of one type
1137 /// into another. It copies the bits from the source value into the
1138 /// destination value, then forgets the original. Note that source and destination
1139 /// are passed by-value, which means if `Src` or `Dst` contain padding, that padding
1140 /// is *not* guaranteed to be preserved by `transmute`.
1141 ///
1142 /// Both the argument and the result must be [valid](../../nomicon/what-unsafe-does.html) at
1143 /// their given type. Violating this condition leads to [undefined behavior][ub]. The compiler
1144 /// will generate code *assuming that you, the programmer, ensure that there will never be
1145 /// undefined behavior*. It is therefore your responsibility to guarantee that every value
1146 /// passed to `transmute` is valid at both types `Src` and `Dst`. Failing to uphold this condition
1147 /// may lead to unexpected and unstable compilation results. This makes `transmute` **incredibly
1148 /// unsafe**. `transmute` should be the absolute last resort.
1149 ///
1150 /// Transmuting pointers to integers in a `const` context is [undefined behavior][ub].
1151 /// Any attempt to use the resulting value for integer operations will abort const-evaluation.
1152 /// (And even outside `const`, such transmutation is touching on many unspecified aspects of the
1153 /// Rust memory model and should be avoided. See below for alternatives.)
1154 ///
1155 /// Because `transmute` is a by-value operation, alignment of the *transmuted values
1156 /// themselves* is not a concern. As with any other function, the compiler already ensures
1157 /// both `Src` and `Dst` are properly aligned. However, when transmuting values that *point
1158 /// elsewhere* (such as pointers, references, boxes…), the caller has to ensure proper
1159 /// alignment of the pointed-to values.
1160 ///
1161 /// The [nomicon](../../nomicon/transmutes.html) has additional documentation.
1162 ///
1163 /// [ub]: ../../reference/behavior-considered-undefined.html
1164 ///
1165 /// # Examples
1166 ///
1167 /// There are a few things that `transmute` is really useful for.
1168 ///
1169 /// Turning a pointer into a function pointer. This is *not* portable to
1170 /// machines where function pointers and data pointers have different sizes.
1171 ///
1172 /// ```
1173 /// fn foo() -> i32 {
1174 /// 0
1175 /// }
1176 /// // Crucially, we `as`-cast to a raw pointer before `transmute`ing to a function pointer.
1177 /// // This avoids an integer-to-pointer `transmute`, which can be problematic.
1178 /// // Transmuting between raw pointers and function pointers (i.e., two pointer types) is fine.
1179 /// let pointer = foo as *const ();
1180 /// let function = unsafe {
1181 /// std::mem::transmute::<*const (), fn() -> i32>(pointer)
1182 /// };
1183 /// assert_eq!(function(), 0);
1184 /// ```
1185 ///
1186 /// Extending a lifetime, or shortening an invariant lifetime. This is
1187 /// advanced, very unsafe Rust!
1188 ///
1189 /// ```
1190 /// struct R<'a>(&'a i32);
1191 /// unsafe fn extend_lifetime<'b>(r: R<'b>) -> R<'static> {
1192 /// std::mem::transmute::<R<'b>, R<'static>>(r)
1193 /// }
1194 ///
1195 /// unsafe fn shorten_invariant_lifetime<'b, 'c>(r: &'b mut R<'static>)
1196 /// -> &'b mut R<'c> {
1197 /// std::mem::transmute::<&'b mut R<'static>, &'b mut R<'c>>(r)
1198 /// }
1199 /// ```
1200 ///
1201 /// # Alternatives
1202 ///
1203 /// Don't despair: many uses of `transmute` can be achieved through other means.
1204 /// Below are common applications of `transmute` which can be replaced with safer
1205 /// constructs.
1206 ///
1207 /// Turning raw bytes (`[u8; SZ]`) into `u32`, `f64`, etc.:
1208 ///
1209 /// ```
1210 /// let raw_bytes = [0x78, 0x56, 0x34, 0x12];
1211 ///
1212 /// let num = unsafe {
1213 /// std::mem::transmute::<[u8; 4], u32>(raw_bytes)
1214 /// };
1215 ///
1216 /// // use `u32::from_ne_bytes` instead
1217 /// let num = u32::from_ne_bytes(raw_bytes);
1218 /// // or use `u32::from_le_bytes` or `u32::from_be_bytes` to specify the endianness
1219 /// let num = u32::from_le_bytes(raw_bytes);
1220 /// assert_eq!(num, 0x12345678);
1221 /// let num = u32::from_be_bytes(raw_bytes);
1222 /// assert_eq!(num, 0x78563412);
1223 /// ```
1224 ///
1225 /// Turning a pointer into a `usize`:
1226 ///
1227 /// ```no_run
1228 /// let ptr = &0;
1229 /// let ptr_num_transmute = unsafe {
1230 /// std::mem::transmute::<&i32, usize>(ptr)
1231 /// };
1232 ///
1233 /// // Use an `as` cast instead
1234 /// let ptr_num_cast = ptr as *const i32 as usize;
1235 /// ```
1236 ///
1237 /// Note that using `transmute` to turn a pointer to a `usize` is (as noted above) [undefined
1238 /// behavior][ub] in `const` contexts. Also outside of consts, this operation might not behave
1239 /// as expected -- this is touching on many unspecified aspects of the Rust memory model.
1240 /// Depending on what the code is doing, the following alternatives are preferable to
1241 /// pointer-to-integer transmutation:
1242 /// - If the code just wants to store data of arbitrary type in some buffer and needs to pick a
1243 /// type for that buffer, it can use [`MaybeUninit`][mem::MaybeUninit].
1244 /// - If the code actually wants to work on the address the pointer points to, it can use `as`
1245 /// casts or [`ptr.addr()`][pointer::addr].
1246 ///
1247 /// Turning a `*mut T` into an `&mut T`:
1248 ///
1249 /// ```
1250 /// let ptr: *mut i32 = &mut 0;
1251 /// let ref_transmuted = unsafe {
1252 /// std::mem::transmute::<*mut i32, &mut i32>(ptr)
1253 /// };
1254 ///
1255 /// // Use a reborrow instead
1256 /// let ref_casted = unsafe { &mut *ptr };
1257 /// ```
1258 ///
1259 /// Turning an `&mut T` into an `&mut U`:
1260 ///
1261 /// ```
1262 /// let ptr = &mut 0;
1263 /// let val_transmuted = unsafe {
1264 /// std::mem::transmute::<&mut i32, &mut u32>(ptr)
1265 /// };
1266 ///
1267 /// // Now, put together `as` and reborrowing - note the chaining of `as`
1268 /// // `as` is not transitive
1269 /// let val_casts = unsafe { &mut *(ptr as *mut i32 as *mut u32) };
1270 /// ```
1271 ///
1272 /// Turning an `&str` into a `&[u8]`:
1273 ///
1274 /// ```
1275 /// // this is not a good way to do this.
1276 /// let slice = unsafe { std::mem::transmute::<&str, &[u8]>("Rust") };
1277 /// assert_eq!(slice, &[82, 117, 115, 116]);
1278 ///
1279 /// // You could use `str::as_bytes`
1280 /// let slice = "Rust".as_bytes();
1281 /// assert_eq!(slice, &[82, 117, 115, 116]);
1282 ///
1283 /// // Or, just use a byte string, if you have control over the string
1284 /// // literal
1285 /// assert_eq!(b"Rust", &[82, 117, 115, 116]);
1286 /// ```
1287 ///
1288 /// Turning a `Vec<&T>` into a `Vec<Option<&T>>`.
1289 ///
1290 /// To transmute the inner type of the contents of a container, you must make sure to not
1291 /// violate any of the container's invariants. For `Vec`, this means that both the size
1292 /// *and alignment* of the inner types have to match. Other containers might rely on the
1293 /// size of the type, alignment, or even the `TypeId`, in which case transmuting wouldn't
1294 /// be possible at all without violating the container invariants.
1295 ///
1296 /// ```
1297 /// let store = [0, 1, 2, 3];
1298 /// let v_orig = store.iter().collect::<Vec<&i32>>();
1299 ///
1300 /// // clone the vector as we will reuse them later
1301 /// let v_clone = v_orig.clone();
1302 ///
1303 /// // Using transmute: this relies on the unspecified data layout of `Vec`, which is a
1304 /// // bad idea and could cause Undefined Behavior.
1305 /// // However, it is no-copy.
1306 /// let v_transmuted = unsafe {
1307 /// std::mem::transmute::<Vec<&i32>, Vec<Option<&i32>>>(v_clone)
1308 /// };
1309 ///
1310 /// let v_clone = v_orig.clone();
1311 ///
1312 /// // This is the suggested, safe way.
1313 /// // It does copy the entire vector, though, into a new array.
1314 /// let v_collected = v_clone.into_iter()
1315 /// .map(Some)
1316 /// .collect::<Vec<Option<&i32>>>();
1317 ///
1318 /// let v_clone = v_orig.clone();
1319 ///
1320 /// // This is the proper no-copy, unsafe way of "transmuting" a `Vec`, without relying on the
1321 /// // data layout. Instead of literally calling `transmute`, we perform a pointer cast, but
1322 /// // in terms of converting the original inner type (`&i32`) to the new one (`Option<&i32>`),
1323 /// // this has all the same caveats. Besides the information provided above, also consult the
1324 /// // [`from_raw_parts`] documentation.
1325 /// let v_from_raw = unsafe {
1326 // FIXME Update this when vec_into_raw_parts is stabilized
1327 /// // Ensure the original vector is not dropped.
1328 /// let mut v_clone = std::mem::ManuallyDrop::new(v_clone);
1329 /// Vec::from_raw_parts(v_clone.as_mut_ptr() as *mut Option<&i32>,
1330 /// v_clone.len(),
1331 /// v_clone.capacity())
1332 /// };
1333 /// ```
1334 ///
1335 /// [`from_raw_parts`]: ../../std/vec/struct.Vec.html#method.from_raw_parts
1336 ///
1337 /// Implementing `split_at_mut`:
1338 ///
1339 /// ```
1340 /// use std::{slice, mem};
1341 ///
1342 /// // There are multiple ways to do this, and there are multiple problems
1343 /// // with the following (transmute) way.
1344 /// fn split_at_mut_transmute<T>(slice: &mut [T], mid: usize)
1345 /// -> (&mut [T], &mut [T]) {
1346 /// let len = slice.len();
1347 /// assert!(mid <= len);
1348 /// unsafe {
1349 /// let slice2 = mem::transmute::<&mut [T], &mut [T]>(slice);
1350 /// // first: transmute is not type safe; all it checks is that T and
1351 /// // U are of the same size. Second, right here, you have two
1352 /// // mutable references pointing to the same memory.
1353 /// (&mut slice[0..mid], &mut slice2[mid..len])
1354 /// }
1355 /// }
1356 ///
1357 /// // This gets rid of the type safety problems; `&mut *` will *only* give
1358 /// // you an `&mut T` from an `&mut T` or `*mut T`.
1359 /// fn split_at_mut_casts<T>(slice: &mut [T], mid: usize)
1360 /// -> (&mut [T], &mut [T]) {
1361 /// let len = slice.len();
1362 /// assert!(mid <= len);
1363 /// unsafe {
1364 /// let slice2 = &mut *(slice as *mut [T]);
1365 /// // however, you still have two mutable references pointing to
1366 /// // the same memory.
1367 /// (&mut slice[0..mid], &mut slice2[mid..len])
1368 /// }
1369 /// }
1370 ///
1371 /// // This is how the standard library does it. This is the best method, if
1372 /// // you need to do something like this
1373 /// fn split_at_stdlib<T>(slice: &mut [T], mid: usize)
1374 /// -> (&mut [T], &mut [T]) {
1375 /// let len = slice.len();
1376 /// assert!(mid <= len);
1377 /// unsafe {
1378 /// let ptr = slice.as_mut_ptr();
1379 /// // This now has three mutable references pointing at the same
1380 /// // memory. `slice`, the rvalue ret.0, and the rvalue ret.1.
1381 /// // `slice` is never used after `let ptr = ...`, and so one can
1382 /// // treat it as "dead", and therefore, you only have two real
1383 /// // mutable slices.
1384 /// (slice::from_raw_parts_mut(ptr, mid),
1385 /// slice::from_raw_parts_mut(ptr.add(mid), len - mid))
1386 /// }
1387 /// }
1388 /// ```
1389 #[stable(feature = "rust1", since = "1.0.0")]
1390 #[rustc_allowed_through_unstable_modules]
1391 #[rustc_const_stable(feature = "const_transmute", since = "1.56.0")]
1392 #[rustc_diagnostic_item = "transmute"]
1393 #[rustc_nounwind]
transmute<Src, Dst>(src: Src) -> Dst1394 pub fn transmute<Src, Dst>(src: Src) -> Dst;
1395
1396 /// Like [`transmute`], but even less checked at compile-time: rather than
1397 /// giving an error for `size_of::<Src>() != size_of::<Dst>()`, it's
1398 /// **Undefined Behaviour** at runtime.
1399 ///
1400 /// Prefer normal `transmute` where possible, for the extra checking, since
1401 /// both do exactly the same thing at runtime, if they both compile.
1402 ///
1403 /// This is not expected to ever be exposed directly to users, rather it
1404 /// may eventually be exposed through some more-constrained API.
1405 #[rustc_const_stable(feature = "const_transmute", since = "1.56.0")]
1406 #[rustc_nounwind]
transmute_unchecked<Src, Dst>(src: Src) -> Dst1407 pub fn transmute_unchecked<Src, Dst>(src: Src) -> Dst;
1408
1409 /// Returns `true` if the actual type given as `T` requires drop
1410 /// glue; returns `false` if the actual type provided for `T`
1411 /// implements `Copy`.
1412 ///
1413 /// If the actual type neither requires drop glue nor implements
1414 /// `Copy`, then the return value of this function is unspecified.
1415 ///
1416 /// Note that, unlike most intrinsics, this is safe to call;
1417 /// it does not require an `unsafe` block.
1418 /// Therefore, implementations must not require the user to uphold
1419 /// any safety invariants.
1420 ///
1421 /// The stabilized version of this intrinsic is [`mem::needs_drop`](crate::mem::needs_drop).
1422 #[rustc_const_stable(feature = "const_needs_drop", since = "1.40.0")]
1423 #[rustc_safe_intrinsic]
1424 #[rustc_nounwind]
needs_drop<T: ?Sized>() -> bool1425 pub fn needs_drop<T: ?Sized>() -> bool;
1426
1427 /// Calculates the offset from a pointer.
1428 ///
1429 /// This is implemented as an intrinsic to avoid converting to and from an
1430 /// integer, since the conversion would throw away aliasing information.
1431 ///
1432 /// This can only be used with `Ptr` as a raw pointer type (`*mut` or `*const`)
1433 /// to a `Sized` pointee and with `Delta` as `usize` or `isize`. Any other
1434 /// instantiations may arbitrarily misbehave, and that's *not* a compiler bug.
1435 ///
1436 /// # Safety
1437 ///
1438 /// Both the starting and resulting pointer must be either in bounds or one
1439 /// byte past the end of an allocated object. If either pointer is out of
1440 /// bounds or arithmetic overflow occurs then any further use of the
1441 /// returned value will result in undefined behavior.
1442 ///
1443 /// The stabilized version of this intrinsic is [`pointer::offset`].
1444 #[must_use = "returns a new pointer rather than modifying its argument"]
1445 #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
1446 #[rustc_nounwind]
offset<Ptr, Delta>(dst: Ptr, offset: Delta) -> Ptr1447 pub fn offset<Ptr, Delta>(dst: Ptr, offset: Delta) -> Ptr;
1448
1449 /// Calculates the offset from a pointer, potentially wrapping.
1450 ///
1451 /// This is implemented as an intrinsic to avoid converting to and from an
1452 /// integer, since the conversion inhibits certain optimizations.
1453 ///
1454 /// # Safety
1455 ///
1456 /// Unlike the `offset` intrinsic, this intrinsic does not restrict the
1457 /// resulting pointer to point into or one byte past the end of an allocated
1458 /// object, and it wraps with two's complement arithmetic. The resulting
1459 /// value is not necessarily valid to be used to actually access memory.
1460 ///
1461 /// The stabilized version of this intrinsic is [`pointer::wrapping_offset`].
1462 #[must_use = "returns a new pointer rather than modifying its argument"]
1463 #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
1464 #[rustc_nounwind]
arith_offset<T>(dst: *const T, offset: isize) -> *const T1465 pub fn arith_offset<T>(dst: *const T, offset: isize) -> *const T;
1466
1467 /// Masks out bits of the pointer according to a mask.
1468 ///
1469 /// Note that, unlike most intrinsics, this is safe to call;
1470 /// it does not require an `unsafe` block.
1471 /// Therefore, implementations must not require the user to uphold
1472 /// any safety invariants.
1473 ///
1474 /// Consider using [`pointer::mask`] instead.
1475 #[rustc_safe_intrinsic]
1476 #[rustc_nounwind]
ptr_mask<T>(ptr: *const T, mask: usize) -> *const T1477 pub fn ptr_mask<T>(ptr: *const T, mask: usize) -> *const T;
1478
1479 /// Equivalent to the appropriate `llvm.memcpy.p0i8.0i8.*` intrinsic, with
1480 /// a size of `count` * `size_of::<T>()` and an alignment of
1481 /// `min_align_of::<T>()`
1482 ///
1483 /// The volatile parameter is set to `true`, so it will not be optimized out
1484 /// unless size is equal to zero.
1485 ///
1486 /// This intrinsic does not have a stable counterpart.
1487 #[rustc_nounwind]
volatile_copy_nonoverlapping_memory<T>(dst: *mut T, src: *const T, count: usize)1488 pub fn volatile_copy_nonoverlapping_memory<T>(dst: *mut T, src: *const T, count: usize);
1489 /// Equivalent to the appropriate `llvm.memmove.p0i8.0i8.*` intrinsic, with
1490 /// a size of `count * size_of::<T>()` and an alignment of
1491 /// `min_align_of::<T>()`
1492 ///
1493 /// The volatile parameter is set to `true`, so it will not be optimized out
1494 /// unless size is equal to zero.
1495 ///
1496 /// This intrinsic does not have a stable counterpart.
1497 #[rustc_nounwind]
volatile_copy_memory<T>(dst: *mut T, src: *const T, count: usize)1498 pub fn volatile_copy_memory<T>(dst: *mut T, src: *const T, count: usize);
1499 /// Equivalent to the appropriate `llvm.memset.p0i8.*` intrinsic, with a
1500 /// size of `count * size_of::<T>()` and an alignment of
1501 /// `min_align_of::<T>()`.
1502 ///
1503 /// The volatile parameter is set to `true`, so it will not be optimized out
1504 /// unless size is equal to zero.
1505 ///
1506 /// This intrinsic does not have a stable counterpart.
1507 #[rustc_nounwind]
volatile_set_memory<T>(dst: *mut T, val: u8, count: usize)1508 pub fn volatile_set_memory<T>(dst: *mut T, val: u8, count: usize);
1509
1510 /// Performs a volatile load from the `src` pointer.
1511 ///
1512 /// The stabilized version of this intrinsic is [`core::ptr::read_volatile`].
1513 #[rustc_nounwind]
volatile_load<T>(src: *const T) -> T1514 pub fn volatile_load<T>(src: *const T) -> T;
1515 /// Performs a volatile store to the `dst` pointer.
1516 ///
1517 /// The stabilized version of this intrinsic is [`core::ptr::write_volatile`].
1518 #[rustc_nounwind]
volatile_store<T>(dst: *mut T, val: T)1519 pub fn volatile_store<T>(dst: *mut T, val: T);
1520
1521 /// Performs a volatile load from the `src` pointer
1522 /// The pointer is not required to be aligned.
1523 ///
1524 /// This intrinsic does not have a stable counterpart.
1525 #[rustc_nounwind]
unaligned_volatile_load<T>(src: *const T) -> T1526 pub fn unaligned_volatile_load<T>(src: *const T) -> T;
1527 /// Performs a volatile store to the `dst` pointer.
1528 /// The pointer is not required to be aligned.
1529 ///
1530 /// This intrinsic does not have a stable counterpart.
1531 #[rustc_nounwind]
unaligned_volatile_store<T>(dst: *mut T, val: T)1532 pub fn unaligned_volatile_store<T>(dst: *mut T, val: T);
1533
1534 /// Returns the square root of an `f32`
1535 ///
1536 /// The stabilized version of this intrinsic is
1537 /// [`f32::sqrt`](../../std/primitive.f32.html#method.sqrt)
1538 #[rustc_nounwind]
sqrtf32(x: f32) -> f321539 pub fn sqrtf32(x: f32) -> f32;
1540 /// Returns the square root of an `f64`
1541 ///
1542 /// The stabilized version of this intrinsic is
1543 /// [`f64::sqrt`](../../std/primitive.f64.html#method.sqrt)
1544 #[rustc_nounwind]
sqrtf64(x: f64) -> f641545 pub fn sqrtf64(x: f64) -> f64;
1546
1547 /// Raises an `f32` to an integer power.
1548 ///
1549 /// The stabilized version of this intrinsic is
1550 /// [`f32::powi`](../../std/primitive.f32.html#method.powi)
1551 #[rustc_nounwind]
powif32(a: f32, x: i32) -> f321552 pub fn powif32(a: f32, x: i32) -> f32;
1553 /// Raises an `f64` to an integer power.
1554 ///
1555 /// The stabilized version of this intrinsic is
1556 /// [`f64::powi`](../../std/primitive.f64.html#method.powi)
1557 #[rustc_nounwind]
powif64(a: f64, x: i32) -> f641558 pub fn powif64(a: f64, x: i32) -> f64;
1559
1560 /// Returns the sine of an `f32`.
1561 ///
1562 /// The stabilized version of this intrinsic is
1563 /// [`f32::sin`](../../std/primitive.f32.html#method.sin)
1564 #[rustc_nounwind]
sinf32(x: f32) -> f321565 pub fn sinf32(x: f32) -> f32;
1566 /// Returns the sine of an `f64`.
1567 ///
1568 /// The stabilized version of this intrinsic is
1569 /// [`f64::sin`](../../std/primitive.f64.html#method.sin)
1570 #[rustc_nounwind]
sinf64(x: f64) -> f641571 pub fn sinf64(x: f64) -> f64;
1572
1573 /// Returns the cosine of an `f32`.
1574 ///
1575 /// The stabilized version of this intrinsic is
1576 /// [`f32::cos`](../../std/primitive.f32.html#method.cos)
1577 #[rustc_nounwind]
cosf32(x: f32) -> f321578 pub fn cosf32(x: f32) -> f32;
1579 /// Returns the cosine of an `f64`.
1580 ///
1581 /// The stabilized version of this intrinsic is
1582 /// [`f64::cos`](../../std/primitive.f64.html#method.cos)
1583 #[rustc_nounwind]
cosf64(x: f64) -> f641584 pub fn cosf64(x: f64) -> f64;
1585
1586 /// Raises an `f32` to an `f32` power.
1587 ///
1588 /// The stabilized version of this intrinsic is
1589 /// [`f32::powf`](../../std/primitive.f32.html#method.powf)
1590 #[rustc_nounwind]
powf32(a: f32, x: f32) -> f321591 pub fn powf32(a: f32, x: f32) -> f32;
1592 /// Raises an `f64` to an `f64` power.
1593 ///
1594 /// The stabilized version of this intrinsic is
1595 /// [`f64::powf`](../../std/primitive.f64.html#method.powf)
1596 #[rustc_nounwind]
powf64(a: f64, x: f64) -> f641597 pub fn powf64(a: f64, x: f64) -> f64;
1598
1599 /// Returns the exponential of an `f32`.
1600 ///
1601 /// The stabilized version of this intrinsic is
1602 /// [`f32::exp`](../../std/primitive.f32.html#method.exp)
1603 #[rustc_nounwind]
expf32(x: f32) -> f321604 pub fn expf32(x: f32) -> f32;
1605 /// Returns the exponential of an `f64`.
1606 ///
1607 /// The stabilized version of this intrinsic is
1608 /// [`f64::exp`](../../std/primitive.f64.html#method.exp)
1609 #[rustc_nounwind]
expf64(x: f64) -> f641610 pub fn expf64(x: f64) -> f64;
1611
1612 /// Returns 2 raised to the power of an `f32`.
1613 ///
1614 /// The stabilized version of this intrinsic is
1615 /// [`f32::exp2`](../../std/primitive.f32.html#method.exp2)
1616 #[rustc_nounwind]
exp2f32(x: f32) -> f321617 pub fn exp2f32(x: f32) -> f32;
1618 /// Returns 2 raised to the power of an `f64`.
1619 ///
1620 /// The stabilized version of this intrinsic is
1621 /// [`f64::exp2`](../../std/primitive.f64.html#method.exp2)
1622 #[rustc_nounwind]
exp2f64(x: f64) -> f641623 pub fn exp2f64(x: f64) -> f64;
1624
1625 /// Returns the natural logarithm of an `f32`.
1626 ///
1627 /// The stabilized version of this intrinsic is
1628 /// [`f32::ln`](../../std/primitive.f32.html#method.ln)
1629 #[rustc_nounwind]
logf32(x: f32) -> f321630 pub fn logf32(x: f32) -> f32;
1631 /// Returns the natural logarithm of an `f64`.
1632 ///
1633 /// The stabilized version of this intrinsic is
1634 /// [`f64::ln`](../../std/primitive.f64.html#method.ln)
1635 #[rustc_nounwind]
logf64(x: f64) -> f641636 pub fn logf64(x: f64) -> f64;
1637
1638 /// Returns the base 10 logarithm of an `f32`.
1639 ///
1640 /// The stabilized version of this intrinsic is
1641 /// [`f32::log10`](../../std/primitive.f32.html#method.log10)
1642 #[rustc_nounwind]
log10f32(x: f32) -> f321643 pub fn log10f32(x: f32) -> f32;
1644 /// Returns the base 10 logarithm of an `f64`.
1645 ///
1646 /// The stabilized version of this intrinsic is
1647 /// [`f64::log10`](../../std/primitive.f64.html#method.log10)
1648 #[rustc_nounwind]
log10f64(x: f64) -> f641649 pub fn log10f64(x: f64) -> f64;
1650
1651 /// Returns the base 2 logarithm of an `f32`.
1652 ///
1653 /// The stabilized version of this intrinsic is
1654 /// [`f32::log2`](../../std/primitive.f32.html#method.log2)
1655 #[rustc_nounwind]
log2f32(x: f32) -> f321656 pub fn log2f32(x: f32) -> f32;
1657 /// Returns the base 2 logarithm of an `f64`.
1658 ///
1659 /// The stabilized version of this intrinsic is
1660 /// [`f64::log2`](../../std/primitive.f64.html#method.log2)
1661 #[rustc_nounwind]
log2f64(x: f64) -> f641662 pub fn log2f64(x: f64) -> f64;
1663
1664 /// Returns `a * b + c` for `f32` values.
1665 ///
1666 /// The stabilized version of this intrinsic is
1667 /// [`f32::mul_add`](../../std/primitive.f32.html#method.mul_add)
1668 #[rustc_nounwind]
fmaf32(a: f32, b: f32, c: f32) -> f321669 pub fn fmaf32(a: f32, b: f32, c: f32) -> f32;
1670 /// Returns `a * b + c` for `f64` values.
1671 ///
1672 /// The stabilized version of this intrinsic is
1673 /// [`f64::mul_add`](../../std/primitive.f64.html#method.mul_add)
1674 #[rustc_nounwind]
fmaf64(a: f64, b: f64, c: f64) -> f641675 pub fn fmaf64(a: f64, b: f64, c: f64) -> f64;
1676
1677 /// Returns the absolute value of an `f32`.
1678 ///
1679 /// The stabilized version of this intrinsic is
1680 /// [`f32::abs`](../../std/primitive.f32.html#method.abs)
1681 #[rustc_nounwind]
fabsf32(x: f32) -> f321682 pub fn fabsf32(x: f32) -> f32;
1683 /// Returns the absolute value of an `f64`.
1684 ///
1685 /// The stabilized version of this intrinsic is
1686 /// [`f64::abs`](../../std/primitive.f64.html#method.abs)
1687 #[rustc_nounwind]
fabsf64(x: f64) -> f641688 pub fn fabsf64(x: f64) -> f64;
1689
1690 /// Returns the minimum of two `f32` values.
1691 ///
1692 /// Note that, unlike most intrinsics, this is safe to call;
1693 /// it does not require an `unsafe` block.
1694 /// Therefore, implementations must not require the user to uphold
1695 /// any safety invariants.
1696 ///
1697 /// The stabilized version of this intrinsic is
1698 /// [`f32::min`]
1699 #[rustc_safe_intrinsic]
1700 #[rustc_nounwind]
minnumf32(x: f32, y: f32) -> f321701 pub fn minnumf32(x: f32, y: f32) -> f32;
1702 /// Returns the minimum of two `f64` values.
1703 ///
1704 /// Note that, unlike most intrinsics, this is safe to call;
1705 /// it does not require an `unsafe` block.
1706 /// Therefore, implementations must not require the user to uphold
1707 /// any safety invariants.
1708 ///
1709 /// The stabilized version of this intrinsic is
1710 /// [`f64::min`]
1711 #[rustc_safe_intrinsic]
1712 #[rustc_nounwind]
minnumf64(x: f64, y: f64) -> f641713 pub fn minnumf64(x: f64, y: f64) -> f64;
1714 /// Returns the maximum of two `f32` values.
1715 ///
1716 /// Note that, unlike most intrinsics, this is safe to call;
1717 /// it does not require an `unsafe` block.
1718 /// Therefore, implementations must not require the user to uphold
1719 /// any safety invariants.
1720 ///
1721 /// The stabilized version of this intrinsic is
1722 /// [`f32::max`]
1723 #[rustc_safe_intrinsic]
1724 #[rustc_nounwind]
maxnumf32(x: f32, y: f32) -> f321725 pub fn maxnumf32(x: f32, y: f32) -> f32;
1726 /// Returns the maximum of two `f64` values.
1727 ///
1728 /// Note that, unlike most intrinsics, this is safe to call;
1729 /// it does not require an `unsafe` block.
1730 /// Therefore, implementations must not require the user to uphold
1731 /// any safety invariants.
1732 ///
1733 /// The stabilized version of this intrinsic is
1734 /// [`f64::max`]
1735 #[rustc_safe_intrinsic]
1736 #[rustc_nounwind]
maxnumf64(x: f64, y: f64) -> f641737 pub fn maxnumf64(x: f64, y: f64) -> f64;
1738
1739 /// Copies the sign from `y` to `x` for `f32` values.
1740 ///
1741 /// The stabilized version of this intrinsic is
1742 /// [`f32::copysign`](../../std/primitive.f32.html#method.copysign)
1743 #[rustc_nounwind]
copysignf32(x: f32, y: f32) -> f321744 pub fn copysignf32(x: f32, y: f32) -> f32;
1745 /// Copies the sign from `y` to `x` for `f64` values.
1746 ///
1747 /// The stabilized version of this intrinsic is
1748 /// [`f64::copysign`](../../std/primitive.f64.html#method.copysign)
1749 #[rustc_nounwind]
copysignf64(x: f64, y: f64) -> f641750 pub fn copysignf64(x: f64, y: f64) -> f64;
1751
1752 /// Returns the largest integer less than or equal to an `f32`.
1753 ///
1754 /// The stabilized version of this intrinsic is
1755 /// [`f32::floor`](../../std/primitive.f32.html#method.floor)
1756 #[rustc_nounwind]
floorf32(x: f32) -> f321757 pub fn floorf32(x: f32) -> f32;
1758 /// Returns the largest integer less than or equal to an `f64`.
1759 ///
1760 /// The stabilized version of this intrinsic is
1761 /// [`f64::floor`](../../std/primitive.f64.html#method.floor)
1762 #[rustc_nounwind]
floorf64(x: f64) -> f641763 pub fn floorf64(x: f64) -> f64;
1764
1765 /// Returns the smallest integer greater than or equal to an `f32`.
1766 ///
1767 /// The stabilized version of this intrinsic is
1768 /// [`f32::ceil`](../../std/primitive.f32.html#method.ceil)
1769 #[rustc_nounwind]
ceilf32(x: f32) -> f321770 pub fn ceilf32(x: f32) -> f32;
1771 /// Returns the smallest integer greater than or equal to an `f64`.
1772 ///
1773 /// The stabilized version of this intrinsic is
1774 /// [`f64::ceil`](../../std/primitive.f64.html#method.ceil)
1775 #[rustc_nounwind]
ceilf64(x: f64) -> f641776 pub fn ceilf64(x: f64) -> f64;
1777
1778 /// Returns the integer part of an `f32`.
1779 ///
1780 /// The stabilized version of this intrinsic is
1781 /// [`f32::trunc`](../../std/primitive.f32.html#method.trunc)
1782 #[rustc_nounwind]
truncf32(x: f32) -> f321783 pub fn truncf32(x: f32) -> f32;
1784 /// Returns the integer part of an `f64`.
1785 ///
1786 /// The stabilized version of this intrinsic is
1787 /// [`f64::trunc`](../../std/primitive.f64.html#method.trunc)
1788 #[rustc_nounwind]
truncf64(x: f64) -> f641789 pub fn truncf64(x: f64) -> f64;
1790
1791 /// Returns the nearest integer to an `f32`. May raise an inexact floating-point exception
1792 /// if the argument is not an integer.
1793 ///
1794 /// The stabilized version of this intrinsic is
1795 /// [`f32::round_ties_even`](../../std/primitive.f32.html#method.round_ties_even)
1796 #[rustc_nounwind]
rintf32(x: f32) -> f321797 pub fn rintf32(x: f32) -> f32;
1798 /// Returns the nearest integer to an `f64`. May raise an inexact floating-point exception
1799 /// if the argument is not an integer.
1800 ///
1801 /// The stabilized version of this intrinsic is
1802 /// [`f64::round_ties_even`](../../std/primitive.f64.html#method.round_ties_even)
1803 #[rustc_nounwind]
rintf64(x: f64) -> f641804 pub fn rintf64(x: f64) -> f64;
1805
1806 /// Returns the nearest integer to an `f32`.
1807 ///
1808 /// This intrinsic does not have a stable counterpart.
1809 #[rustc_nounwind]
nearbyintf32(x: f32) -> f321810 pub fn nearbyintf32(x: f32) -> f32;
1811 /// Returns the nearest integer to an `f64`.
1812 ///
1813 /// This intrinsic does not have a stable counterpart.
1814 #[rustc_nounwind]
nearbyintf64(x: f64) -> f641815 pub fn nearbyintf64(x: f64) -> f64;
1816
1817 /// Returns the nearest integer to an `f32`. Rounds half-way cases away from zero.
1818 ///
1819 /// The stabilized version of this intrinsic is
1820 /// [`f32::round`](../../std/primitive.f32.html#method.round)
1821 #[rustc_nounwind]
roundf32(x: f32) -> f321822 pub fn roundf32(x: f32) -> f32;
1823 /// Returns the nearest integer to an `f64`. Rounds half-way cases away from zero.
1824 ///
1825 /// The stabilized version of this intrinsic is
1826 /// [`f64::round`](../../std/primitive.f64.html#method.round)
1827 #[rustc_nounwind]
roundf64(x: f64) -> f641828 pub fn roundf64(x: f64) -> f64;
1829
1830 /// Returns the nearest integer to an `f32`. Rounds half-way cases to the number
1831 /// with an even least significant digit.
1832 ///
1833 /// This intrinsic does not have a stable counterpart.
1834 #[rustc_nounwind]
roundevenf32(x: f32) -> f321835 pub fn roundevenf32(x: f32) -> f32;
1836 /// Returns the nearest integer to an `f64`. Rounds half-way cases to the number
1837 /// with an even least significant digit.
1838 ///
1839 /// This intrinsic does not have a stable counterpart.
1840 #[rustc_nounwind]
roundevenf64(x: f64) -> f641841 pub fn roundevenf64(x: f64) -> f64;
1842
1843 /// Float addition that allows optimizations based on algebraic rules.
1844 /// May assume inputs are finite.
1845 ///
1846 /// This intrinsic does not have a stable counterpart.
1847 #[rustc_nounwind]
fadd_fast<T: Copy>(a: T, b: T) -> T1848 pub fn fadd_fast<T: Copy>(a: T, b: T) -> T;
1849
1850 /// Float subtraction that allows optimizations based on algebraic rules.
1851 /// May assume inputs are finite.
1852 ///
1853 /// This intrinsic does not have a stable counterpart.
1854 #[rustc_nounwind]
fsub_fast<T: Copy>(a: T, b: T) -> T1855 pub fn fsub_fast<T: Copy>(a: T, b: T) -> T;
1856
1857 /// Float multiplication that allows optimizations based on algebraic rules.
1858 /// May assume inputs are finite.
1859 ///
1860 /// This intrinsic does not have a stable counterpart.
1861 #[rustc_nounwind]
fmul_fast<T: Copy>(a: T, b: T) -> T1862 pub fn fmul_fast<T: Copy>(a: T, b: T) -> T;
1863
1864 /// Float division that allows optimizations based on algebraic rules.
1865 /// May assume inputs are finite.
1866 ///
1867 /// This intrinsic does not have a stable counterpart.
1868 #[rustc_nounwind]
fdiv_fast<T: Copy>(a: T, b: T) -> T1869 pub fn fdiv_fast<T: Copy>(a: T, b: T) -> T;
1870
1871 /// Float remainder that allows optimizations based on algebraic rules.
1872 /// May assume inputs are finite.
1873 ///
1874 /// This intrinsic does not have a stable counterpart.
1875 #[rustc_nounwind]
frem_fast<T: Copy>(a: T, b: T) -> T1876 pub fn frem_fast<T: Copy>(a: T, b: T) -> T;
1877
1878 /// Convert with LLVM’s fptoui/fptosi, which may return undef for values out of range
1879 /// (<https://github.com/rust-lang/rust/issues/10184>)
1880 ///
1881 /// Stabilized as [`f32::to_int_unchecked`] and [`f64::to_int_unchecked`].
1882 #[rustc_nounwind]
float_to_int_unchecked<Float: Copy, Int: Copy>(value: Float) -> Int1883 pub fn float_to_int_unchecked<Float: Copy, Int: Copy>(value: Float) -> Int;
1884
1885 /// Returns the number of bits set in an integer type `T`
1886 ///
1887 /// Note that, unlike most intrinsics, this is safe to call;
1888 /// it does not require an `unsafe` block.
1889 /// Therefore, implementations must not require the user to uphold
1890 /// any safety invariants.
1891 ///
1892 /// The stabilized versions of this intrinsic are available on the integer
1893 /// primitives via the `count_ones` method. For example,
1894 /// [`u32::count_ones`]
1895 #[rustc_const_stable(feature = "const_ctpop", since = "1.40.0")]
1896 #[rustc_safe_intrinsic]
1897 #[rustc_nounwind]
ctpop<T: Copy>(x: T) -> T1898 pub fn ctpop<T: Copy>(x: T) -> T;
1899
1900 /// Returns the number of leading unset bits (zeroes) in an integer type `T`.
1901 ///
1902 /// Note that, unlike most intrinsics, this is safe to call;
1903 /// it does not require an `unsafe` block.
1904 /// Therefore, implementations must not require the user to uphold
1905 /// any safety invariants.
1906 ///
1907 /// The stabilized versions of this intrinsic are available on the integer
1908 /// primitives via the `leading_zeros` method. For example,
1909 /// [`u32::leading_zeros`]
1910 ///
1911 /// # Examples
1912 ///
1913 /// ```
1914 /// #![feature(core_intrinsics)]
1915 ///
1916 /// use std::intrinsics::ctlz;
1917 ///
1918 /// let x = 0b0001_1100_u8;
1919 /// let num_leading = ctlz(x);
1920 /// assert_eq!(num_leading, 3);
1921 /// ```
1922 ///
1923 /// An `x` with value `0` will return the bit width of `T`.
1924 ///
1925 /// ```
1926 /// #![feature(core_intrinsics)]
1927 ///
1928 /// use std::intrinsics::ctlz;
1929 ///
1930 /// let x = 0u16;
1931 /// let num_leading = ctlz(x);
1932 /// assert_eq!(num_leading, 16);
1933 /// ```
1934 #[rustc_const_stable(feature = "const_ctlz", since = "1.40.0")]
1935 #[rustc_safe_intrinsic]
1936 #[rustc_nounwind]
ctlz<T: Copy>(x: T) -> T1937 pub fn ctlz<T: Copy>(x: T) -> T;
1938
1939 /// Like `ctlz`, but extra-unsafe as it returns `undef` when
1940 /// given an `x` with value `0`.
1941 ///
1942 /// This intrinsic does not have a stable counterpart.
1943 ///
1944 /// # Examples
1945 ///
1946 /// ```
1947 /// #![feature(core_intrinsics)]
1948 ///
1949 /// use std::intrinsics::ctlz_nonzero;
1950 ///
1951 /// let x = 0b0001_1100_u8;
1952 /// let num_leading = unsafe { ctlz_nonzero(x) };
1953 /// assert_eq!(num_leading, 3);
1954 /// ```
1955 #[rustc_const_stable(feature = "constctlz", since = "1.50.0")]
1956 #[rustc_nounwind]
ctlz_nonzero<T: Copy>(x: T) -> T1957 pub fn ctlz_nonzero<T: Copy>(x: T) -> T;
1958
1959 /// Returns the number of trailing unset bits (zeroes) in an integer type `T`.
1960 ///
1961 /// Note that, unlike most intrinsics, this is safe to call;
1962 /// it does not require an `unsafe` block.
1963 /// Therefore, implementations must not require the user to uphold
1964 /// any safety invariants.
1965 ///
1966 /// The stabilized versions of this intrinsic are available on the integer
1967 /// primitives via the `trailing_zeros` method. For example,
1968 /// [`u32::trailing_zeros`]
1969 ///
1970 /// # Examples
1971 ///
1972 /// ```
1973 /// #![feature(core_intrinsics)]
1974 ///
1975 /// use std::intrinsics::cttz;
1976 ///
1977 /// let x = 0b0011_1000_u8;
1978 /// let num_trailing = cttz(x);
1979 /// assert_eq!(num_trailing, 3);
1980 /// ```
1981 ///
1982 /// An `x` with value `0` will return the bit width of `T`:
1983 ///
1984 /// ```
1985 /// #![feature(core_intrinsics)]
1986 ///
1987 /// use std::intrinsics::cttz;
1988 ///
1989 /// let x = 0u16;
1990 /// let num_trailing = cttz(x);
1991 /// assert_eq!(num_trailing, 16);
1992 /// ```
1993 #[rustc_const_stable(feature = "const_cttz", since = "1.40.0")]
1994 #[rustc_safe_intrinsic]
1995 #[rustc_nounwind]
cttz<T: Copy>(x: T) -> T1996 pub fn cttz<T: Copy>(x: T) -> T;
1997
1998 /// Like `cttz`, but extra-unsafe as it returns `undef` when
1999 /// given an `x` with value `0`.
2000 ///
2001 /// This intrinsic does not have a stable counterpart.
2002 ///
2003 /// # Examples
2004 ///
2005 /// ```
2006 /// #![feature(core_intrinsics)]
2007 ///
2008 /// use std::intrinsics::cttz_nonzero;
2009 ///
2010 /// let x = 0b0011_1000_u8;
2011 /// let num_trailing = unsafe { cttz_nonzero(x) };
2012 /// assert_eq!(num_trailing, 3);
2013 /// ```
2014 #[rustc_const_stable(feature = "const_cttz_nonzero", since = "1.53.0")]
2015 #[rustc_nounwind]
cttz_nonzero<T: Copy>(x: T) -> T2016 pub fn cttz_nonzero<T: Copy>(x: T) -> T;
2017
2018 /// Reverses the bytes in an integer type `T`.
2019 ///
2020 /// Note that, unlike most intrinsics, this is safe to call;
2021 /// it does not require an `unsafe` block.
2022 /// Therefore, implementations must not require the user to uphold
2023 /// any safety invariants.
2024 ///
2025 /// The stabilized versions of this intrinsic are available on the integer
2026 /// primitives via the `swap_bytes` method. For example,
2027 /// [`u32::swap_bytes`]
2028 #[rustc_const_stable(feature = "const_bswap", since = "1.40.0")]
2029 #[rustc_safe_intrinsic]
2030 #[rustc_nounwind]
bswap<T: Copy>(x: T) -> T2031 pub fn bswap<T: Copy>(x: T) -> T;
2032
2033 /// Reverses the bits in an integer type `T`.
2034 ///
2035 /// Note that, unlike most intrinsics, this is safe to call;
2036 /// it does not require an `unsafe` block.
2037 /// Therefore, implementations must not require the user to uphold
2038 /// any safety invariants.
2039 ///
2040 /// The stabilized versions of this intrinsic are available on the integer
2041 /// primitives via the `reverse_bits` method. For example,
2042 /// [`u32::reverse_bits`]
2043 #[rustc_const_stable(feature = "const_bitreverse", since = "1.40.0")]
2044 #[rustc_safe_intrinsic]
2045 #[rustc_nounwind]
bitreverse<T: Copy>(x: T) -> T2046 pub fn bitreverse<T: Copy>(x: T) -> T;
2047
2048 /// Performs checked integer addition.
2049 ///
2050 /// Note that, unlike most intrinsics, this is safe to call;
2051 /// it does not require an `unsafe` block.
2052 /// Therefore, implementations must not require the user to uphold
2053 /// any safety invariants.
2054 ///
2055 /// The stabilized versions of this intrinsic are available on the integer
2056 /// primitives via the `overflowing_add` method. For example,
2057 /// [`u32::overflowing_add`]
2058 #[rustc_const_stable(feature = "const_int_overflow", since = "1.40.0")]
2059 #[rustc_safe_intrinsic]
2060 #[rustc_nounwind]
add_with_overflow<T: Copy>(x: T, y: T) -> (T, bool)2061 pub fn add_with_overflow<T: Copy>(x: T, y: T) -> (T, bool);
2062
2063 /// Performs checked integer subtraction
2064 ///
2065 /// Note that, unlike most intrinsics, this is safe to call;
2066 /// it does not require an `unsafe` block.
2067 /// Therefore, implementations must not require the user to uphold
2068 /// any safety invariants.
2069 ///
2070 /// The stabilized versions of this intrinsic are available on the integer
2071 /// primitives via the `overflowing_sub` method. For example,
2072 /// [`u32::overflowing_sub`]
2073 #[rustc_const_stable(feature = "const_int_overflow", since = "1.40.0")]
2074 #[rustc_safe_intrinsic]
2075 #[rustc_nounwind]
sub_with_overflow<T: Copy>(x: T, y: T) -> (T, bool)2076 pub fn sub_with_overflow<T: Copy>(x: T, y: T) -> (T, bool);
2077
2078 /// Performs checked integer multiplication
2079 ///
2080 /// Note that, unlike most intrinsics, this is safe to call;
2081 /// it does not require an `unsafe` block.
2082 /// Therefore, implementations must not require the user to uphold
2083 /// any safety invariants.
2084 ///
2085 /// The stabilized versions of this intrinsic are available on the integer
2086 /// primitives via the `overflowing_mul` method. For example,
2087 /// [`u32::overflowing_mul`]
2088 #[rustc_const_stable(feature = "const_int_overflow", since = "1.40.0")]
2089 #[rustc_safe_intrinsic]
2090 #[rustc_nounwind]
mul_with_overflow<T: Copy>(x: T, y: T) -> (T, bool)2091 pub fn mul_with_overflow<T: Copy>(x: T, y: T) -> (T, bool);
2092
2093 /// Performs an exact division, resulting in undefined behavior where
2094 /// `x % y != 0` or `y == 0` or `x == T::MIN && y == -1`
2095 ///
2096 /// This intrinsic does not have a stable counterpart.
2097 #[rustc_const_unstable(feature = "const_exact_div", issue = "none")]
2098 #[rustc_nounwind]
exact_div<T: Copy>(x: T, y: T) -> T2099 pub fn exact_div<T: Copy>(x: T, y: T) -> T;
2100
2101 /// Performs an unchecked division, resulting in undefined behavior
2102 /// where `y == 0` or `x == T::MIN && y == -1`
2103 ///
2104 /// Safe wrappers for this intrinsic are available on the integer
2105 /// primitives via the `checked_div` method. For example,
2106 /// [`u32::checked_div`]
2107 #[rustc_const_stable(feature = "const_int_unchecked_div", since = "1.52.0")]
2108 #[rustc_nounwind]
unchecked_div<T: Copy>(x: T, y: T) -> T2109 pub fn unchecked_div<T: Copy>(x: T, y: T) -> T;
2110 /// Returns the remainder of an unchecked division, resulting in
2111 /// undefined behavior when `y == 0` or `x == T::MIN && y == -1`
2112 ///
2113 /// Safe wrappers for this intrinsic are available on the integer
2114 /// primitives via the `checked_rem` method. For example,
2115 /// [`u32::checked_rem`]
2116 #[rustc_const_stable(feature = "const_int_unchecked_rem", since = "1.52.0")]
2117 #[rustc_nounwind]
unchecked_rem<T: Copy>(x: T, y: T) -> T2118 pub fn unchecked_rem<T: Copy>(x: T, y: T) -> T;
2119
2120 /// Performs an unchecked left shift, resulting in undefined behavior when
2121 /// `y < 0` or `y >= N`, where N is the width of T in bits.
2122 ///
2123 /// Safe wrappers for this intrinsic are available on the integer
2124 /// primitives via the `checked_shl` method. For example,
2125 /// [`u32::checked_shl`]
2126 #[rustc_const_stable(feature = "const_int_unchecked", since = "1.40.0")]
2127 #[rustc_nounwind]
unchecked_shl<T: Copy>(x: T, y: T) -> T2128 pub fn unchecked_shl<T: Copy>(x: T, y: T) -> T;
2129 /// Performs an unchecked right shift, resulting in undefined behavior when
2130 /// `y < 0` or `y >= N`, where N is the width of T in bits.
2131 ///
2132 /// Safe wrappers for this intrinsic are available on the integer
2133 /// primitives via the `checked_shr` method. For example,
2134 /// [`u32::checked_shr`]
2135 #[rustc_const_stable(feature = "const_int_unchecked", since = "1.40.0")]
2136 #[rustc_nounwind]
unchecked_shr<T: Copy>(x: T, y: T) -> T2137 pub fn unchecked_shr<T: Copy>(x: T, y: T) -> T;
2138
2139 /// Returns the result of an unchecked addition, resulting in
2140 /// undefined behavior when `x + y > T::MAX` or `x + y < T::MIN`.
2141 ///
2142 /// This intrinsic does not have a stable counterpart.
2143 #[rustc_const_unstable(feature = "const_int_unchecked_arith", issue = "none")]
2144 #[rustc_nounwind]
unchecked_add<T: Copy>(x: T, y: T) -> T2145 pub fn unchecked_add<T: Copy>(x: T, y: T) -> T;
2146
2147 /// Returns the result of an unchecked subtraction, resulting in
2148 /// undefined behavior when `x - y > T::MAX` or `x - y < T::MIN`.
2149 ///
2150 /// This intrinsic does not have a stable counterpart.
2151 #[rustc_const_unstable(feature = "const_int_unchecked_arith", issue = "none")]
2152 #[rustc_nounwind]
unchecked_sub<T: Copy>(x: T, y: T) -> T2153 pub fn unchecked_sub<T: Copy>(x: T, y: T) -> T;
2154
2155 /// Returns the result of an unchecked multiplication, resulting in
2156 /// undefined behavior when `x * y > T::MAX` or `x * y < T::MIN`.
2157 ///
2158 /// This intrinsic does not have a stable counterpart.
2159 #[rustc_const_unstable(feature = "const_int_unchecked_arith", issue = "none")]
2160 #[rustc_nounwind]
unchecked_mul<T: Copy>(x: T, y: T) -> T2161 pub fn unchecked_mul<T: Copy>(x: T, y: T) -> T;
2162
2163 /// Performs rotate left.
2164 ///
2165 /// Note that, unlike most intrinsics, this is safe to call;
2166 /// it does not require an `unsafe` block.
2167 /// Therefore, implementations must not require the user to uphold
2168 /// any safety invariants.
2169 ///
2170 /// The stabilized versions of this intrinsic are available on the integer
2171 /// primitives via the `rotate_left` method. For example,
2172 /// [`u32::rotate_left`]
2173 #[rustc_const_stable(feature = "const_int_rotate", since = "1.40.0")]
2174 #[rustc_safe_intrinsic]
2175 #[rustc_nounwind]
rotate_left<T: Copy>(x: T, y: T) -> T2176 pub fn rotate_left<T: Copy>(x: T, y: T) -> T;
2177
2178 /// Performs rotate right.
2179 ///
2180 /// Note that, unlike most intrinsics, this is safe to call;
2181 /// it does not require an `unsafe` block.
2182 /// Therefore, implementations must not require the user to uphold
2183 /// any safety invariants.
2184 ///
2185 /// The stabilized versions of this intrinsic are available on the integer
2186 /// primitives via the `rotate_right` method. For example,
2187 /// [`u32::rotate_right`]
2188 #[rustc_const_stable(feature = "const_int_rotate", since = "1.40.0")]
2189 #[rustc_safe_intrinsic]
2190 #[rustc_nounwind]
rotate_right<T: Copy>(x: T, y: T) -> T2191 pub fn rotate_right<T: Copy>(x: T, y: T) -> T;
2192
2193 /// Returns (a + b) mod 2<sup>N</sup>, where N is the width of T in bits.
2194 ///
2195 /// Note that, unlike most intrinsics, this is safe to call;
2196 /// it does not require an `unsafe` block.
2197 /// Therefore, implementations must not require the user to uphold
2198 /// any safety invariants.
2199 ///
2200 /// The stabilized versions of this intrinsic are available on the integer
2201 /// primitives via the `wrapping_add` method. For example,
2202 /// [`u32::wrapping_add`]
2203 #[rustc_const_stable(feature = "const_int_wrapping", since = "1.40.0")]
2204 #[rustc_safe_intrinsic]
2205 #[rustc_nounwind]
wrapping_add<T: Copy>(a: T, b: T) -> T2206 pub fn wrapping_add<T: Copy>(a: T, b: T) -> T;
2207 /// Returns (a - b) mod 2<sup>N</sup>, where N is the width of T in bits.
2208 ///
2209 /// Note that, unlike most intrinsics, this is safe to call;
2210 /// it does not require an `unsafe` block.
2211 /// Therefore, implementations must not require the user to uphold
2212 /// any safety invariants.
2213 ///
2214 /// The stabilized versions of this intrinsic are available on the integer
2215 /// primitives via the `wrapping_sub` method. For example,
2216 /// [`u32::wrapping_sub`]
2217 #[rustc_const_stable(feature = "const_int_wrapping", since = "1.40.0")]
2218 #[rustc_safe_intrinsic]
2219 #[rustc_nounwind]
wrapping_sub<T: Copy>(a: T, b: T) -> T2220 pub fn wrapping_sub<T: Copy>(a: T, b: T) -> T;
2221 /// Returns (a * b) mod 2<sup>N</sup>, where N is the width of T in bits.
2222 ///
2223 /// Note that, unlike most intrinsics, this is safe to call;
2224 /// it does not require an `unsafe` block.
2225 /// Therefore, implementations must not require the user to uphold
2226 /// any safety invariants.
2227 ///
2228 /// The stabilized versions of this intrinsic are available on the integer
2229 /// primitives via the `wrapping_mul` method. For example,
2230 /// [`u32::wrapping_mul`]
2231 #[rustc_const_stable(feature = "const_int_wrapping", since = "1.40.0")]
2232 #[rustc_safe_intrinsic]
2233 #[rustc_nounwind]
wrapping_mul<T: Copy>(a: T, b: T) -> T2234 pub fn wrapping_mul<T: Copy>(a: T, b: T) -> T;
2235
2236 /// Computes `a + b`, saturating at numeric bounds.
2237 ///
2238 /// Note that, unlike most intrinsics, this is safe to call;
2239 /// it does not require an `unsafe` block.
2240 /// Therefore, implementations must not require the user to uphold
2241 /// any safety invariants.
2242 ///
2243 /// The stabilized versions of this intrinsic are available on the integer
2244 /// primitives via the `saturating_add` method. For example,
2245 /// [`u32::saturating_add`]
2246 #[rustc_const_stable(feature = "const_int_saturating", since = "1.40.0")]
2247 #[rustc_safe_intrinsic]
2248 #[rustc_nounwind]
saturating_add<T: Copy>(a: T, b: T) -> T2249 pub fn saturating_add<T: Copy>(a: T, b: T) -> T;
2250 /// Computes `a - b`, saturating at numeric bounds.
2251 ///
2252 /// Note that, unlike most intrinsics, this is safe to call;
2253 /// it does not require an `unsafe` block.
2254 /// Therefore, implementations must not require the user to uphold
2255 /// any safety invariants.
2256 ///
2257 /// The stabilized versions of this intrinsic are available on the integer
2258 /// primitives via the `saturating_sub` method. For example,
2259 /// [`u32::saturating_sub`]
2260 #[rustc_const_stable(feature = "const_int_saturating", since = "1.40.0")]
2261 #[rustc_safe_intrinsic]
2262 #[rustc_nounwind]
saturating_sub<T: Copy>(a: T, b: T) -> T2263 pub fn saturating_sub<T: Copy>(a: T, b: T) -> T;
2264
2265 /// This is an implementation detail of [`crate::ptr::read`] and should
2266 /// not be used anywhere else. See its comments for why this exists.
2267 ///
2268 /// This intrinsic can *only* be called where the pointer is a local without
2269 /// projections (`read_via_copy(ptr)`, not `read_via_copy(*ptr)`) so that it
2270 /// trivially obeys runtime-MIR rules about derefs in operands.
2271 #[rustc_const_stable(feature = "const_ptr_read", since = "1.71.0")]
2272 #[rustc_nounwind]
read_via_copy<T>(ptr: *const T) -> T2273 pub fn read_via_copy<T>(ptr: *const T) -> T;
2274
2275 /// This is an implementation detail of [`crate::ptr::write`] and should
2276 /// not be used anywhere else. See its comments for why this exists.
2277 ///
2278 /// This intrinsic can *only* be called where the pointer is a local without
2279 /// projections (`write_via_move(ptr, x)`, not `write_via_move(*ptr, x)`) so
2280 /// that it trivially obeys runtime-MIR rules about derefs in operands.
2281 #[rustc_const_unstable(feature = "const_ptr_write", issue = "86302")]
2282 #[rustc_nounwind]
write_via_move<T>(ptr: *mut T, value: T)2283 pub fn write_via_move<T>(ptr: *mut T, value: T);
2284
2285 /// Returns the value of the discriminant for the variant in 'v';
2286 /// if `T` has no discriminant, returns `0`.
2287 ///
2288 /// Note that, unlike most intrinsics, this is safe to call;
2289 /// it does not require an `unsafe` block.
2290 /// Therefore, implementations must not require the user to uphold
2291 /// any safety invariants.
2292 ///
2293 /// The stabilized version of this intrinsic is [`core::mem::discriminant`].
2294 #[rustc_const_unstable(feature = "const_discriminant", issue = "69821")]
2295 #[rustc_safe_intrinsic]
2296 #[rustc_nounwind]
discriminant_value<T>(v: &T) -> <T as DiscriminantKind>::Discriminant2297 pub fn discriminant_value<T>(v: &T) -> <T as DiscriminantKind>::Discriminant;
2298
2299 /// Returns the number of variants of the type `T` cast to a `usize`;
2300 /// if `T` has no variants, returns `0`. Uninhabited variants will be counted.
2301 ///
2302 /// Note that, unlike most intrinsics, this is safe to call;
2303 /// it does not require an `unsafe` block.
2304 /// Therefore, implementations must not require the user to uphold
2305 /// any safety invariants.
2306 ///
2307 /// The to-be-stabilized version of this intrinsic is [`mem::variant_count`].
2308 #[rustc_const_unstable(feature = "variant_count", issue = "73662")]
2309 #[rustc_safe_intrinsic]
2310 #[rustc_nounwind]
variant_count<T>() -> usize2311 pub fn variant_count<T>() -> usize;
2312
2313 /// Rust's "try catch" construct which invokes the function pointer `try_fn`
2314 /// with the data pointer `data`.
2315 ///
2316 /// The third argument is a function called if a panic occurs. This function
2317 /// takes the data pointer and a pointer to the target-specific exception
2318 /// object that was caught. For more information see the compiler's
2319 /// source as well as std's catch implementation.
2320 ///
2321 /// `catch_fn` must not unwind.
2322 #[rustc_nounwind]
2323 pub fn r#try(try_fn: fn(*mut u8), data: *mut u8, catch_fn: fn(*mut u8, *mut u8)) -> i32;
2324
2325 /// Emits a `!nontemporal` store according to LLVM (see their docs).
2326 /// Probably will never become stable.
2327 #[rustc_nounwind]
nontemporal_store<T>(ptr: *mut T, val: T)2328 pub fn nontemporal_store<T>(ptr: *mut T, val: T);
2329
2330 /// See documentation of `<*const T>::offset_from` for details.
2331 #[rustc_const_stable(feature = "const_ptr_offset_from", since = "1.65.0")]
2332 #[rustc_nounwind]
ptr_offset_from<T>(ptr: *const T, base: *const T) -> isize2333 pub fn ptr_offset_from<T>(ptr: *const T, base: *const T) -> isize;
2334
2335 /// See documentation of `<*const T>::sub_ptr` for details.
2336 #[rustc_const_unstable(feature = "const_ptr_sub_ptr", issue = "95892")]
2337 #[rustc_nounwind]
ptr_offset_from_unsigned<T>(ptr: *const T, base: *const T) -> usize2338 pub fn ptr_offset_from_unsigned<T>(ptr: *const T, base: *const T) -> usize;
2339
2340 /// See documentation of `<*const T>::guaranteed_eq` for details.
2341 /// Returns `2` if the result is unknown.
2342 /// Returns `1` if the pointers are guaranteed equal
2343 /// Returns `0` if the pointers are guaranteed inequal
2344 ///
2345 /// Note that, unlike most intrinsics, this is safe to call;
2346 /// it does not require an `unsafe` block.
2347 /// Therefore, implementations must not require the user to uphold
2348 /// any safety invariants.
2349 #[rustc_const_unstable(feature = "const_raw_ptr_comparison", issue = "53020")]
2350 #[rustc_safe_intrinsic]
2351 #[rustc_nounwind]
ptr_guaranteed_cmp<T>(ptr: *const T, other: *const T) -> u82352 pub fn ptr_guaranteed_cmp<T>(ptr: *const T, other: *const T) -> u8;
2353
2354 /// Allocates a block of memory at compile time.
2355 /// At runtime, just returns a null pointer.
2356 ///
2357 /// # Safety
2358 ///
2359 /// - The `align` argument must be a power of two.
2360 /// - At compile time, a compile error occurs if this constraint is violated.
2361 /// - At runtime, it is not checked.
2362 #[rustc_const_unstable(feature = "const_heap", issue = "79597")]
2363 #[rustc_nounwind]
const_allocate(size: usize, align: usize) -> *mut u82364 pub fn const_allocate(size: usize, align: usize) -> *mut u8;
2365
2366 /// Deallocates a memory which allocated by `intrinsics::const_allocate` at compile time.
2367 /// At runtime, does nothing.
2368 ///
2369 /// # Safety
2370 ///
2371 /// - The `align` argument must be a power of two.
2372 /// - At compile time, a compile error occurs if this constraint is violated.
2373 /// - At runtime, it is not checked.
2374 /// - If the `ptr` is created in an another const, this intrinsic doesn't deallocate it.
2375 /// - If the `ptr` is pointing to a local variable, this intrinsic doesn't deallocate it.
2376 #[rustc_const_unstable(feature = "const_heap", issue = "79597")]
2377 #[rustc_nounwind]
const_deallocate(ptr: *mut u8, size: usize, align: usize)2378 pub fn const_deallocate(ptr: *mut u8, size: usize, align: usize);
2379
2380 /// Determines whether the raw bytes of the two values are equal.
2381 ///
2382 /// This is particularly handy for arrays, since it allows things like just
2383 /// comparing `i96`s instead of forcing `alloca`s for `[6 x i16]`.
2384 ///
2385 /// Above some backend-decided threshold this will emit calls to `memcmp`,
2386 /// like slice equality does, instead of causing massive code size.
2387 ///
2388 /// Since this works by comparing the underlying bytes, the actual `T` is
2389 /// not particularly important. It will be used for its size and alignment,
2390 /// but any validity restrictions will be ignored, not enforced.
2391 ///
2392 /// # Safety
2393 ///
2394 /// It's UB to call this if any of the *bytes* in `*a` or `*b` are uninitialized or carry a
2395 /// pointer value.
2396 /// Note that this is a stricter criterion than just the *values* being
2397 /// fully-initialized: if `T` has padding, it's UB to call this intrinsic.
2398 ///
2399 /// (The implementation is allowed to branch on the results of comparisons,
2400 /// which is UB if any of their inputs are `undef`.)
2401 #[rustc_const_unstable(feature = "const_intrinsic_raw_eq", issue = "none")]
2402 #[rustc_nounwind]
raw_eq<T>(a: &T, b: &T) -> bool2403 pub fn raw_eq<T>(a: &T, b: &T) -> bool;
2404
2405 /// See documentation of [`std::hint::black_box`] for details.
2406 ///
2407 /// [`std::hint::black_box`]: crate::hint::black_box
2408 #[rustc_const_unstable(feature = "const_black_box", issue = "none")]
2409 #[rustc_safe_intrinsic]
2410 #[rustc_nounwind]
black_box<T>(dummy: T) -> T2411 pub fn black_box<T>(dummy: T) -> T;
2412
2413 /// `ptr` must point to a vtable.
2414 /// The intrinsic will return the size stored in that vtable.
2415 #[rustc_nounwind]
vtable_size(ptr: *const ()) -> usize2416 pub fn vtable_size(ptr: *const ()) -> usize;
2417
2418 /// `ptr` must point to a vtable.
2419 /// The intrinsic will return the alignment stored in that vtable.
2420 #[rustc_nounwind]
vtable_align(ptr: *const ()) -> usize2421 pub fn vtable_align(ptr: *const ()) -> usize;
2422
2423 /// Selects which function to call depending on the context.
2424 ///
2425 /// If this function is evaluated at compile-time, then a call to this
2426 /// intrinsic will be replaced with a call to `called_in_const`. It gets
2427 /// replaced with a call to `called_at_rt` otherwise.
2428 ///
2429 /// # Type Requirements
2430 ///
2431 /// The two functions must be both function items. They cannot be function
2432 /// pointers or closures. The first function must be a `const fn`.
2433 ///
2434 /// `arg` will be the tupled arguments that will be passed to either one of
2435 /// the two functions, therefore, both functions must accept the same type of
2436 /// arguments. Both functions must return RET.
2437 ///
2438 /// # Safety
2439 ///
2440 /// The two functions must behave observably equivalent. Safe code in other
2441 /// crates may assume that calling a `const fn` at compile-time and at run-time
2442 /// produces the same result. A function that produces a different result when
2443 /// evaluated at run-time, or has any other observable side-effects, is
2444 /// *unsound*.
2445 ///
2446 /// Here is an example of how this could cause a problem:
2447 /// ```no_run
2448 /// #![feature(const_eval_select)]
2449 /// #![feature(core_intrinsics)]
2450 /// use std::hint::unreachable_unchecked;
2451 /// use std::intrinsics::const_eval_select;
2452 ///
2453 /// // Crate A
2454 /// pub const fn inconsistent() -> i32 {
2455 /// fn runtime() -> i32 { 1 }
2456 /// const fn compiletime() -> i32 { 2 }
2457 ///
2458 /// unsafe {
2459 // // ⚠ This code violates the required equivalence of `compiletime`
2460 /// // and `runtime`.
2461 /// const_eval_select((), compiletime, runtime)
2462 /// }
2463 /// }
2464 ///
2465 /// // Crate B
2466 /// const X: i32 = inconsistent();
2467 /// let x = inconsistent();
2468 /// if x != X { unsafe { unreachable_unchecked(); }}
2469 /// ```
2470 ///
2471 /// This code causes Undefined Behavior when being run, since the
2472 /// `unreachable_unchecked` is actually being reached. The bug is in *crate A*,
2473 /// which violates the principle that a `const fn` must behave the same at
2474 /// compile-time and at run-time. The unsafe code in crate B is fine.
2475 #[rustc_const_unstable(feature = "const_eval_select", issue = "none")]
const_eval_select<ARG: Tuple, F, G, RET>( arg: ARG, called_in_const: F, called_at_rt: G, ) -> RET where G: FnOnce<ARG, Output = RET>, F: FnOnce<ARG, Output = RET>2476 pub fn const_eval_select<ARG: Tuple, F, G, RET>(
2477 arg: ARG,
2478 called_in_const: F,
2479 called_at_rt: G,
2480 ) -> RET
2481 where
2482 G: FnOnce<ARG, Output = RET>,
2483 F: FnOnce<ARG, Output = RET>;
2484
2485 /// This method creates a pointer to any `Some` value. If the argument is
2486 /// `None`, an invalid within-bounds pointer (that is still acceptable for
2487 /// constructing an empty slice) is returned.
2488 #[rustc_nounwind]
option_payload_ptr<T>(arg: *const Option<T>) -> *const T2489 pub fn option_payload_ptr<T>(arg: *const Option<T>) -> *const T;
2490 }
2491
2492 // Some functions are defined here because they accidentally got made
2493 // available in this module on stable. See <https://github.com/rust-lang/rust/issues/15702>.
2494 // (`transmute` also falls into this category, but it cannot be wrapped due to the
2495 // check that `T` and `U` have the same size.)
2496
2497 /// Check that the preconditions of an unsafe function are followed, if debug_assertions are on,
2498 /// and only at runtime.
2499 ///
2500 /// This macro should be called as `assert_unsafe_precondition!([Generics](name: Type) => Expression)`
2501 /// where the names specified will be moved into the macro as captured variables, and defines an item
2502 /// to call `const_eval_select` on. The tokens inside the square brackets are used to denote generics
2503 /// for the function declarations and can be omitted if there is no generics.
2504 ///
2505 /// # Safety
2506 ///
2507 /// Invoking this macro is only sound if the following code is already UB when the passed
2508 /// expression evaluates to false.
2509 ///
2510 /// This macro expands to a check at runtime if debug_assertions is set. It has no effect at
2511 /// compile time, but the semantics of the contained `const_eval_select` must be the same at
2512 /// runtime and at compile time. Thus if the expression evaluates to false, this macro produces
2513 /// different behavior at compile time and at runtime, and invoking it is incorrect.
2514 ///
2515 /// So in a sense it is UB if this macro is useful, but we expect callers of `unsafe fn` to make
2516 /// the occasional mistake, and this check should help them figure things out.
2517 #[allow_internal_unstable(const_eval_select)] // permit this to be called in stably-const fn
2518 macro_rules! assert_unsafe_precondition {
2519 ($name:expr, $([$($tt:tt)*])?($($i:ident:$ty:ty),*$(,)?) => $e:expr) => {
2520 if cfg!(debug_assertions) {
2521 // allow non_snake_case to allow capturing const generics
2522 #[allow(non_snake_case)]
2523 #[inline(always)]
2524 fn runtime$(<$($tt)*>)?($($i:$ty),*) {
2525 if !$e {
2526 // don't unwind to reduce impact on code size
2527 ::core::panicking::panic_nounwind(
2528 concat!("unsafe precondition(s) violated: ", $name)
2529 );
2530 }
2531 }
2532 #[allow(non_snake_case)]
2533 #[inline]
2534 const fn comptime$(<$($tt)*>)?($(_:$ty),*) {}
2535
2536 ::core::intrinsics::const_eval_select(($($i,)*), comptime, runtime);
2537 }
2538 };
2539 }
2540 pub(crate) use assert_unsafe_precondition;
2541
2542 /// Checks whether `ptr` is properly aligned with respect to
2543 /// `align_of::<T>()`.
is_aligned_and_not_null<T>(ptr: *const T) -> bool2544 pub(crate) fn is_aligned_and_not_null<T>(ptr: *const T) -> bool {
2545 !ptr.is_null() && ptr.is_aligned()
2546 }
2547
2548 /// Checks whether an allocation of `len` instances of `T` exceeds
2549 /// the maximum allowed allocation size.
is_valid_allocation_size<T>(len: usize) -> bool2550 pub(crate) fn is_valid_allocation_size<T>(len: usize) -> bool {
2551 let max_len = const {
2552 let size = crate::mem::size_of::<T>();
2553 if size == 0 { usize::MAX } else { isize::MAX as usize / size }
2554 };
2555 len <= max_len
2556 }
2557
2558 /// Checks whether the regions of memory starting at `src` and `dst` of size
2559 /// `count * size_of::<T>()` do *not* overlap.
is_nonoverlapping<T>(src: *const T, dst: *const T, count: usize) -> bool2560 pub(crate) fn is_nonoverlapping<T>(src: *const T, dst: *const T, count: usize) -> bool {
2561 let src_usize = src.addr();
2562 let dst_usize = dst.addr();
2563 let size = mem::size_of::<T>()
2564 .checked_mul(count)
2565 .expect("is_nonoverlapping: `size_of::<T>() * count` overflows a usize");
2566 let diff = if src_usize > dst_usize { src_usize - dst_usize } else { dst_usize - src_usize };
2567 // If the absolute distance between the ptrs is at least as big as the size of the buffer,
2568 // they do not overlap.
2569 diff >= size
2570 }
2571
2572 /// Copies `count * size_of::<T>()` bytes from `src` to `dst`. The source
2573 /// and destination must *not* overlap.
2574 ///
2575 /// For regions of memory which might overlap, use [`copy`] instead.
2576 ///
2577 /// `copy_nonoverlapping` is semantically equivalent to C's [`memcpy`], but
2578 /// with the argument order swapped.
2579 ///
2580 /// The copy is "untyped" in the sense that data may be uninitialized or otherwise violate the
2581 /// requirements of `T`. The initialization state is preserved exactly.
2582 ///
2583 /// [`memcpy`]: https://en.cppreference.com/w/c/string/byte/memcpy
2584 ///
2585 /// # Safety
2586 ///
2587 /// Behavior is undefined if any of the following conditions are violated:
2588 ///
2589 /// * `src` must be [valid] for reads of `count * size_of::<T>()` bytes.
2590 ///
2591 /// * `dst` must be [valid] for writes of `count * size_of::<T>()` bytes.
2592 ///
2593 /// * Both `src` and `dst` must be properly aligned.
2594 ///
2595 /// * The region of memory beginning at `src` with a size of `count *
2596 /// size_of::<T>()` bytes must *not* overlap with the region of memory
2597 /// beginning at `dst` with the same size.
2598 ///
2599 /// Like [`read`], `copy_nonoverlapping` creates a bitwise copy of `T`, regardless of
2600 /// whether `T` is [`Copy`]. If `T` is not [`Copy`], using *both* the values
2601 /// in the region beginning at `*src` and the region beginning at `*dst` can
2602 /// [violate memory safety][read-ownership].
2603 ///
2604 /// Note that even if the effectively copied size (`count * size_of::<T>()`) is
2605 /// `0`, the pointers must be non-null and properly aligned.
2606 ///
2607 /// [`read`]: crate::ptr::read
2608 /// [read-ownership]: crate::ptr::read#ownership-of-the-returned-value
2609 /// [valid]: crate::ptr#safety
2610 ///
2611 /// # Examples
2612 ///
2613 /// Manually implement [`Vec::append`]:
2614 ///
2615 /// ```
2616 /// use std::ptr;
2617 ///
2618 /// /// Moves all the elements of `src` into `dst`, leaving `src` empty.
2619 /// fn append<T>(dst: &mut Vec<T>, src: &mut Vec<T>) {
2620 /// let src_len = src.len();
2621 /// let dst_len = dst.len();
2622 ///
2623 /// // Ensure that `dst` has enough capacity to hold all of `src`.
2624 /// dst.reserve(src_len);
2625 ///
2626 /// unsafe {
2627 /// // The call to add is always safe because `Vec` will never
2628 /// // allocate more than `isize::MAX` bytes.
2629 /// let dst_ptr = dst.as_mut_ptr().add(dst_len);
2630 /// let src_ptr = src.as_ptr();
2631 ///
2632 /// // Truncate `src` without dropping its contents. We do this first,
2633 /// // to avoid problems in case something further down panics.
2634 /// src.set_len(0);
2635 ///
2636 /// // The two regions cannot overlap because mutable references do
2637 /// // not alias, and two different vectors cannot own the same
2638 /// // memory.
2639 /// ptr::copy_nonoverlapping(src_ptr, dst_ptr, src_len);
2640 ///
2641 /// // Notify `dst` that it now holds the contents of `src`.
2642 /// dst.set_len(dst_len + src_len);
2643 /// }
2644 /// }
2645 ///
2646 /// let mut a = vec!['r'];
2647 /// let mut b = vec!['u', 's', 't'];
2648 ///
2649 /// append(&mut a, &mut b);
2650 ///
2651 /// assert_eq!(a, &['r', 'u', 's', 't']);
2652 /// assert!(b.is_empty());
2653 /// ```
2654 ///
2655 /// [`Vec::append`]: ../../std/vec/struct.Vec.html#method.append
2656 #[doc(alias = "memcpy")]
2657 #[stable(feature = "rust1", since = "1.0.0")]
2658 #[rustc_allowed_through_unstable_modules]
2659 #[rustc_const_stable(feature = "const_intrinsic_copy", since = "1.63.0")]
2660 #[inline(always)]
2661 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
copy_nonoverlapping<T>(src: *const T, dst: *mut T, count: usize)2662 pub const unsafe fn copy_nonoverlapping<T>(src: *const T, dst: *mut T, count: usize) {
2663 extern "rust-intrinsic" {
2664 #[rustc_const_stable(feature = "const_intrinsic_copy", since = "1.63.0")]
2665 #[rustc_nounwind]
2666 pub fn copy_nonoverlapping<T>(src: *const T, dst: *mut T, count: usize);
2667 }
2668
2669 // SAFETY: the safety contract for `copy_nonoverlapping` must be
2670 // upheld by the caller.
2671 unsafe {
2672 assert_unsafe_precondition!(
2673 "ptr::copy_nonoverlapping requires that both pointer arguments are aligned and non-null \
2674 and the specified memory ranges do not overlap",
2675 [T](src: *const T, dst: *mut T, count: usize) =>
2676 is_aligned_and_not_null(src)
2677 && is_aligned_and_not_null(dst)
2678 && is_nonoverlapping(src, dst, count)
2679 );
2680 copy_nonoverlapping(src, dst, count)
2681 }
2682 }
2683
2684 /// Copies `count * size_of::<T>()` bytes from `src` to `dst`. The source
2685 /// and destination may overlap.
2686 ///
2687 /// If the source and destination will *never* overlap,
2688 /// [`copy_nonoverlapping`] can be used instead.
2689 ///
2690 /// `copy` is semantically equivalent to C's [`memmove`], but with the argument
2691 /// order swapped. Copying takes place as if the bytes were copied from `src`
2692 /// to a temporary array and then copied from the array to `dst`.
2693 ///
2694 /// The copy is "untyped" in the sense that data may be uninitialized or otherwise violate the
2695 /// requirements of `T`. The initialization state is preserved exactly.
2696 ///
2697 /// [`memmove`]: https://en.cppreference.com/w/c/string/byte/memmove
2698 ///
2699 /// # Safety
2700 ///
2701 /// Behavior is undefined if any of the following conditions are violated:
2702 ///
2703 /// * `src` must be [valid] for reads of `count * size_of::<T>()` bytes.
2704 ///
2705 /// * `dst` must be [valid] for writes of `count * size_of::<T>()` bytes.
2706 ///
2707 /// * Both `src` and `dst` must be properly aligned.
2708 ///
2709 /// Like [`read`], `copy` creates a bitwise copy of `T`, regardless of
2710 /// whether `T` is [`Copy`]. If `T` is not [`Copy`], using both the values
2711 /// in the region beginning at `*src` and the region beginning at `*dst` can
2712 /// [violate memory safety][read-ownership].
2713 ///
2714 /// Note that even if the effectively copied size (`count * size_of::<T>()`) is
2715 /// `0`, the pointers must be non-null and properly aligned.
2716 ///
2717 /// [`read`]: crate::ptr::read
2718 /// [read-ownership]: crate::ptr::read#ownership-of-the-returned-value
2719 /// [valid]: crate::ptr#safety
2720 ///
2721 /// # Examples
2722 ///
2723 /// Efficiently create a Rust vector from an unsafe buffer:
2724 ///
2725 /// ```
2726 /// use std::ptr;
2727 ///
2728 /// /// # Safety
2729 /// ///
2730 /// /// * `ptr` must be correctly aligned for its type and non-zero.
2731 /// /// * `ptr` must be valid for reads of `elts` contiguous elements of type `T`.
2732 /// /// * Those elements must not be used after calling this function unless `T: Copy`.
2733 /// # #[allow(dead_code)]
2734 /// unsafe fn from_buf_raw<T>(ptr: *const T, elts: usize) -> Vec<T> {
2735 /// let mut dst = Vec::with_capacity(elts);
2736 ///
2737 /// // SAFETY: Our precondition ensures the source is aligned and valid,
2738 /// // and `Vec::with_capacity` ensures that we have usable space to write them.
2739 /// ptr::copy(ptr, dst.as_mut_ptr(), elts);
2740 ///
2741 /// // SAFETY: We created it with this much capacity earlier,
2742 /// // and the previous `copy` has initialized these elements.
2743 /// dst.set_len(elts);
2744 /// dst
2745 /// }
2746 /// ```
2747 #[doc(alias = "memmove")]
2748 #[stable(feature = "rust1", since = "1.0.0")]
2749 #[rustc_allowed_through_unstable_modules]
2750 #[rustc_const_stable(feature = "const_intrinsic_copy", since = "1.63.0")]
2751 #[inline(always)]
2752 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
copy<T>(src: *const T, dst: *mut T, count: usize)2753 pub const unsafe fn copy<T>(src: *const T, dst: *mut T, count: usize) {
2754 extern "rust-intrinsic" {
2755 #[rustc_const_stable(feature = "const_intrinsic_copy", since = "1.63.0")]
2756 #[rustc_nounwind]
2757 fn copy<T>(src: *const T, dst: *mut T, count: usize);
2758 }
2759
2760 // SAFETY: the safety contract for `copy` must be upheld by the caller.
2761 unsafe {
2762 assert_unsafe_precondition!(
2763 "ptr::copy requires that both pointer arguments are aligned and non-null",
2764 [T](src: *const T, dst: *mut T) =>
2765 is_aligned_and_not_null(src) && is_aligned_and_not_null(dst)
2766 );
2767 copy(src, dst, count)
2768 }
2769 }
2770
2771 /// Sets `count * size_of::<T>()` bytes of memory starting at `dst` to
2772 /// `val`.
2773 ///
2774 /// `write_bytes` is similar to C's [`memset`], but sets `count *
2775 /// size_of::<T>()` bytes to `val`.
2776 ///
2777 /// [`memset`]: https://en.cppreference.com/w/c/string/byte/memset
2778 ///
2779 /// # Safety
2780 ///
2781 /// Behavior is undefined if any of the following conditions are violated:
2782 ///
2783 /// * `dst` must be [valid] for writes of `count * size_of::<T>()` bytes.
2784 ///
2785 /// * `dst` must be properly aligned.
2786 ///
2787 /// Note that even if the effectively copied size (`count * size_of::<T>()`) is
2788 /// `0`, the pointer must be non-null and properly aligned.
2789 ///
2790 /// Additionally, note that changing `*dst` in this way can easily lead to undefined behavior (UB)
2791 /// later if the written bytes are not a valid representation of some `T`. For instance, the
2792 /// following is an **incorrect** use of this function:
2793 ///
2794 /// ```rust,no_run
2795 /// unsafe {
2796 /// let mut value: u8 = 0;
2797 /// let ptr: *mut bool = &mut value as *mut u8 as *mut bool;
2798 /// let _bool = ptr.read(); // This is fine, `ptr` points to a valid `bool`.
2799 /// ptr.write_bytes(42u8, 1); // This function itself does not cause UB...
2800 /// let _bool = ptr.read(); // ...but it makes this operation UB! ⚠️
2801 /// }
2802 /// ```
2803 ///
2804 /// [valid]: crate::ptr#safety
2805 ///
2806 /// # Examples
2807 ///
2808 /// Basic usage:
2809 ///
2810 /// ```
2811 /// use std::ptr;
2812 ///
2813 /// let mut vec = vec![0u32; 4];
2814 /// unsafe {
2815 /// let vec_ptr = vec.as_mut_ptr();
2816 /// ptr::write_bytes(vec_ptr, 0xfe, 2);
2817 /// }
2818 /// assert_eq!(vec, [0xfefefefe, 0xfefefefe, 0, 0]);
2819 /// ```
2820 #[doc(alias = "memset")]
2821 #[stable(feature = "rust1", since = "1.0.0")]
2822 #[rustc_allowed_through_unstable_modules]
2823 #[rustc_const_unstable(feature = "const_ptr_write", issue = "86302")]
2824 #[inline(always)]
2825 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
write_bytes<T>(dst: *mut T, val: u8, count: usize)2826 pub const unsafe fn write_bytes<T>(dst: *mut T, val: u8, count: usize) {
2827 extern "rust-intrinsic" {
2828 #[rustc_const_unstable(feature = "const_ptr_write", issue = "86302")]
2829 #[rustc_nounwind]
2830 fn write_bytes<T>(dst: *mut T, val: u8, count: usize);
2831 }
2832
2833 // SAFETY: the safety contract for `write_bytes` must be upheld by the caller.
2834 unsafe {
2835 assert_unsafe_precondition!(
2836 "ptr::write_bytes requires that the destination pointer is aligned and non-null",
2837 [T](dst: *mut T) => is_aligned_and_not_null(dst)
2838 );
2839 write_bytes(dst, val, count)
2840 }
2841 }
2842