• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 use core::iter::{FromIterator, Iterator};
2 use core::mem::{self, ManuallyDrop, MaybeUninit};
3 use core::ops::{Deref, DerefMut};
4 use core::ptr::{self, NonNull};
5 use core::{cmp, fmt, hash, isize, slice, usize};
6 
7 use alloc::{
8     borrow::{Borrow, BorrowMut},
9     boxed::Box,
10     string::String,
11     vec,
12     vec::Vec,
13 };
14 
15 use crate::buf::{IntoIter, UninitSlice};
16 use crate::bytes::Vtable;
17 #[allow(unused)]
18 use crate::loom::sync::atomic::AtomicMut;
19 use crate::loom::sync::atomic::{AtomicPtr, AtomicUsize, Ordering};
20 use crate::{Buf, BufMut, Bytes};
21 
22 /// A unique reference to a contiguous slice of memory.
23 ///
24 /// `BytesMut` represents a unique view into a potentially shared memory region.
25 /// Given the uniqueness guarantee, owners of `BytesMut` handles are able to
26 /// mutate the memory.
27 ///
28 /// `BytesMut` can be thought of as containing a `buf: Arc<Vec<u8>>`, an offset
29 /// into `buf`, a slice length, and a guarantee that no other `BytesMut` for the
30 /// same `buf` overlaps with its slice. That guarantee means that a write lock
31 /// is not required.
32 ///
33 /// # Growth
34 ///
35 /// `BytesMut`'s `BufMut` implementation will implicitly grow its buffer as
36 /// necessary. However, explicitly reserving the required space up-front before
37 /// a series of inserts will be more efficient.
38 ///
39 /// # Examples
40 ///
41 /// ```
42 /// use bytes::{BytesMut, BufMut};
43 ///
44 /// let mut buf = BytesMut::with_capacity(64);
45 ///
46 /// buf.put_u8(b'h');
47 /// buf.put_u8(b'e');
48 /// buf.put(&b"llo"[..]);
49 ///
50 /// assert_eq!(&buf[..], b"hello");
51 ///
52 /// // Freeze the buffer so that it can be shared
53 /// let a = buf.freeze();
54 ///
55 /// // This does not allocate, instead `b` points to the same memory.
56 /// let b = a.clone();
57 ///
58 /// assert_eq!(&a[..], b"hello");
59 /// assert_eq!(&b[..], b"hello");
60 /// ```
61 pub struct BytesMut {
62     ptr: NonNull<u8>,
63     len: usize,
64     cap: usize,
65     data: *mut Shared,
66 }
67 
68 // Thread-safe reference-counted container for the shared storage. This mostly
69 // the same as `core::sync::Arc` but without the weak counter. The ref counting
70 // fns are based on the ones found in `std`.
71 //
72 // The main reason to use `Shared` instead of `core::sync::Arc` is that it ends
73 // up making the overall code simpler and easier to reason about. This is due to
74 // some of the logic around setting `Inner::arc` and other ways the `arc` field
75 // is used. Using `Arc` ended up requiring a number of funky transmutes and
76 // other shenanigans to make it work.
77 struct Shared {
78     vec: Vec<u8>,
79     original_capacity_repr: usize,
80     ref_count: AtomicUsize,
81 }
82 
83 // Buffer storage strategy flags.
84 const KIND_ARC: usize = 0b0;
85 const KIND_VEC: usize = 0b1;
86 const KIND_MASK: usize = 0b1;
87 
88 // The max original capacity value. Any `Bytes` allocated with a greater initial
89 // capacity will default to this.
90 const MAX_ORIGINAL_CAPACITY_WIDTH: usize = 17;
91 // The original capacity algorithm will not take effect unless the originally
92 // allocated capacity was at least 1kb in size.
93 const MIN_ORIGINAL_CAPACITY_WIDTH: usize = 10;
94 // The original capacity is stored in powers of 2 starting at 1kb to a max of
95 // 64kb. Representing it as such requires only 3 bits of storage.
96 const ORIGINAL_CAPACITY_MASK: usize = 0b11100;
97 const ORIGINAL_CAPACITY_OFFSET: usize = 2;
98 
99 // When the storage is in the `Vec` representation, the pointer can be advanced
100 // at most this value. This is due to the amount of storage available to track
101 // the offset is usize - number of KIND bits and number of ORIGINAL_CAPACITY
102 // bits.
103 const VEC_POS_OFFSET: usize = 5;
104 const MAX_VEC_POS: usize = usize::MAX >> VEC_POS_OFFSET;
105 const NOT_VEC_POS_MASK: usize = 0b11111;
106 
107 #[cfg(target_pointer_width = "64")]
108 const PTR_WIDTH: usize = 64;
109 #[cfg(target_pointer_width = "32")]
110 const PTR_WIDTH: usize = 32;
111 
112 /*
113  *
114  * ===== BytesMut =====
115  *
116  */
117 
118 impl BytesMut {
119     /// Creates a new `BytesMut` with the specified capacity.
120     ///
121     /// The returned `BytesMut` will be able to hold at least `capacity` bytes
122     /// without reallocating.
123     ///
124     /// It is important to note that this function does not specify the length
125     /// of the returned `BytesMut`, but only the capacity.
126     ///
127     /// # Examples
128     ///
129     /// ```
130     /// use bytes::{BytesMut, BufMut};
131     ///
132     /// let mut bytes = BytesMut::with_capacity(64);
133     ///
134     /// // `bytes` contains no data, even though there is capacity
135     /// assert_eq!(bytes.len(), 0);
136     ///
137     /// bytes.put(&b"hello world"[..]);
138     ///
139     /// assert_eq!(&bytes[..], b"hello world");
140     /// ```
141     #[inline]
with_capacity(capacity: usize) -> BytesMut142     pub fn with_capacity(capacity: usize) -> BytesMut {
143         BytesMut::from_vec(Vec::with_capacity(capacity))
144     }
145 
146     /// Creates a new `BytesMut` with default capacity.
147     ///
148     /// Resulting object has length 0 and unspecified capacity.
149     /// This function does not allocate.
150     ///
151     /// # Examples
152     ///
153     /// ```
154     /// use bytes::{BytesMut, BufMut};
155     ///
156     /// let mut bytes = BytesMut::new();
157     ///
158     /// assert_eq!(0, bytes.len());
159     ///
160     /// bytes.reserve(2);
161     /// bytes.put_slice(b"xy");
162     ///
163     /// assert_eq!(&b"xy"[..], &bytes[..]);
164     /// ```
165     #[inline]
new() -> BytesMut166     pub fn new() -> BytesMut {
167         BytesMut::with_capacity(0)
168     }
169 
170     /// Returns the number of bytes contained in this `BytesMut`.
171     ///
172     /// # Examples
173     ///
174     /// ```
175     /// use bytes::BytesMut;
176     ///
177     /// let b = BytesMut::from(&b"hello"[..]);
178     /// assert_eq!(b.len(), 5);
179     /// ```
180     #[inline]
len(&self) -> usize181     pub fn len(&self) -> usize {
182         self.len
183     }
184 
185     /// Returns true if the `BytesMut` has a length of 0.
186     ///
187     /// # Examples
188     ///
189     /// ```
190     /// use bytes::BytesMut;
191     ///
192     /// let b = BytesMut::with_capacity(64);
193     /// assert!(b.is_empty());
194     /// ```
195     #[inline]
is_empty(&self) -> bool196     pub fn is_empty(&self) -> bool {
197         self.len == 0
198     }
199 
200     /// Returns the number of bytes the `BytesMut` can hold without reallocating.
201     ///
202     /// # Examples
203     ///
204     /// ```
205     /// use bytes::BytesMut;
206     ///
207     /// let b = BytesMut::with_capacity(64);
208     /// assert_eq!(b.capacity(), 64);
209     /// ```
210     #[inline]
capacity(&self) -> usize211     pub fn capacity(&self) -> usize {
212         self.cap
213     }
214 
215     /// Converts `self` into an immutable `Bytes`.
216     ///
217     /// The conversion is zero cost and is used to indicate that the slice
218     /// referenced by the handle will no longer be mutated. Once the conversion
219     /// is done, the handle can be cloned and shared across threads.
220     ///
221     /// # Examples
222     ///
223     /// ```
224     /// use bytes::{BytesMut, BufMut};
225     /// use std::thread;
226     ///
227     /// let mut b = BytesMut::with_capacity(64);
228     /// b.put(&b"hello world"[..]);
229     /// let b1 = b.freeze();
230     /// let b2 = b1.clone();
231     ///
232     /// let th = thread::spawn(move || {
233     ///     assert_eq!(&b1[..], b"hello world");
234     /// });
235     ///
236     /// assert_eq!(&b2[..], b"hello world");
237     /// th.join().unwrap();
238     /// ```
239     #[inline]
freeze(mut self) -> Bytes240     pub fn freeze(mut self) -> Bytes {
241         if self.kind() == KIND_VEC {
242             // Just re-use `Bytes` internal Vec vtable
243             unsafe {
244                 let (off, _) = self.get_vec_pos();
245                 let vec = rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off);
246                 mem::forget(self);
247                 let mut b: Bytes = vec.into();
248                 b.advance(off);
249                 b
250             }
251         } else {
252             debug_assert_eq!(self.kind(), KIND_ARC);
253 
254             let ptr = self.ptr.as_ptr();
255             let len = self.len;
256             let data = AtomicPtr::new(self.data.cast());
257             mem::forget(self);
258             unsafe { Bytes::with_vtable(ptr, len, data, &SHARED_VTABLE) }
259         }
260     }
261 
262     /// Creates a new `BytesMut`, which is initialized with zero.
263     ///
264     /// # Examples
265     ///
266     /// ```
267     /// use bytes::BytesMut;
268     ///
269     /// let zeros = BytesMut::zeroed(42);
270     ///
271     /// assert_eq!(zeros.len(), 42);
272     /// zeros.into_iter().for_each(|x| assert_eq!(x, 0));
273     /// ```
zeroed(len: usize) -> BytesMut274     pub fn zeroed(len: usize) -> BytesMut {
275         BytesMut::from_vec(vec![0; len])
276     }
277 
278     /// Splits the bytes into two at the given index.
279     ///
280     /// Afterwards `self` contains elements `[0, at)`, and the returned
281     /// `BytesMut` contains elements `[at, capacity)`.
282     ///
283     /// This is an `O(1)` operation that just increases the reference count
284     /// and sets a few indices.
285     ///
286     /// # Examples
287     ///
288     /// ```
289     /// use bytes::BytesMut;
290     ///
291     /// let mut a = BytesMut::from(&b"hello world"[..]);
292     /// let mut b = a.split_off(5);
293     ///
294     /// a[0] = b'j';
295     /// b[0] = b'!';
296     ///
297     /// assert_eq!(&a[..], b"jello");
298     /// assert_eq!(&b[..], b"!world");
299     /// ```
300     ///
301     /// # Panics
302     ///
303     /// Panics if `at > capacity`.
304     #[must_use = "consider BytesMut::truncate if you don't need the other half"]
split_off(&mut self, at: usize) -> BytesMut305     pub fn split_off(&mut self, at: usize) -> BytesMut {
306         assert!(
307             at <= self.capacity(),
308             "split_off out of bounds: {:?} <= {:?}",
309             at,
310             self.capacity(),
311         );
312         unsafe {
313             let mut other = self.shallow_clone();
314             other.set_start(at);
315             self.set_end(at);
316             other
317         }
318     }
319 
320     /// Removes the bytes from the current view, returning them in a new
321     /// `BytesMut` handle.
322     ///
323     /// Afterwards, `self` will be empty, but will retain any additional
324     /// capacity that it had before the operation. This is identical to
325     /// `self.split_to(self.len())`.
326     ///
327     /// This is an `O(1)` operation that just increases the reference count and
328     /// sets a few indices.
329     ///
330     /// # Examples
331     ///
332     /// ```
333     /// use bytes::{BytesMut, BufMut};
334     ///
335     /// let mut buf = BytesMut::with_capacity(1024);
336     /// buf.put(&b"hello world"[..]);
337     ///
338     /// let other = buf.split();
339     ///
340     /// assert!(buf.is_empty());
341     /// assert_eq!(1013, buf.capacity());
342     ///
343     /// assert_eq!(other, b"hello world"[..]);
344     /// ```
345     #[must_use = "consider BytesMut::advance(len()) if you don't need the other half"]
split(&mut self) -> BytesMut346     pub fn split(&mut self) -> BytesMut {
347         let len = self.len();
348         self.split_to(len)
349     }
350 
351     /// Splits the buffer into two at the given index.
352     ///
353     /// Afterwards `self` contains elements `[at, len)`, and the returned `BytesMut`
354     /// contains elements `[0, at)`.
355     ///
356     /// This is an `O(1)` operation that just increases the reference count and
357     /// sets a few indices.
358     ///
359     /// # Examples
360     ///
361     /// ```
362     /// use bytes::BytesMut;
363     ///
364     /// let mut a = BytesMut::from(&b"hello world"[..]);
365     /// let mut b = a.split_to(5);
366     ///
367     /// a[0] = b'!';
368     /// b[0] = b'j';
369     ///
370     /// assert_eq!(&a[..], b"!world");
371     /// assert_eq!(&b[..], b"jello");
372     /// ```
373     ///
374     /// # Panics
375     ///
376     /// Panics if `at > len`.
377     #[must_use = "consider BytesMut::advance if you don't need the other half"]
split_to(&mut self, at: usize) -> BytesMut378     pub fn split_to(&mut self, at: usize) -> BytesMut {
379         assert!(
380             at <= self.len(),
381             "split_to out of bounds: {:?} <= {:?}",
382             at,
383             self.len(),
384         );
385 
386         unsafe {
387             let mut other = self.shallow_clone();
388             other.set_end(at);
389             self.set_start(at);
390             other
391         }
392     }
393 
394     /// Shortens the buffer, keeping the first `len` bytes and dropping the
395     /// rest.
396     ///
397     /// If `len` is greater than the buffer's current length, this has no
398     /// effect.
399     ///
400     /// Existing underlying capacity is preserved.
401     ///
402     /// The [`split_off`] method can emulate `truncate`, but this causes the
403     /// excess bytes to be returned instead of dropped.
404     ///
405     /// # Examples
406     ///
407     /// ```
408     /// use bytes::BytesMut;
409     ///
410     /// let mut buf = BytesMut::from(&b"hello world"[..]);
411     /// buf.truncate(5);
412     /// assert_eq!(buf, b"hello"[..]);
413     /// ```
414     ///
415     /// [`split_off`]: #method.split_off
truncate(&mut self, len: usize)416     pub fn truncate(&mut self, len: usize) {
417         if len <= self.len() {
418             unsafe {
419                 self.set_len(len);
420             }
421         }
422     }
423 
424     /// Clears the buffer, removing all data. Existing capacity is preserved.
425     ///
426     /// # Examples
427     ///
428     /// ```
429     /// use bytes::BytesMut;
430     ///
431     /// let mut buf = BytesMut::from(&b"hello world"[..]);
432     /// buf.clear();
433     /// assert!(buf.is_empty());
434     /// ```
clear(&mut self)435     pub fn clear(&mut self) {
436         self.truncate(0);
437     }
438 
439     /// Resizes the buffer so that `len` is equal to `new_len`.
440     ///
441     /// If `new_len` is greater than `len`, the buffer is extended by the
442     /// difference with each additional byte set to `value`. If `new_len` is
443     /// less than `len`, the buffer is simply truncated.
444     ///
445     /// # Examples
446     ///
447     /// ```
448     /// use bytes::BytesMut;
449     ///
450     /// let mut buf = BytesMut::new();
451     ///
452     /// buf.resize(3, 0x1);
453     /// assert_eq!(&buf[..], &[0x1, 0x1, 0x1]);
454     ///
455     /// buf.resize(2, 0x2);
456     /// assert_eq!(&buf[..], &[0x1, 0x1]);
457     ///
458     /// buf.resize(4, 0x3);
459     /// assert_eq!(&buf[..], &[0x1, 0x1, 0x3, 0x3]);
460     /// ```
resize(&mut self, new_len: usize, value: u8)461     pub fn resize(&mut self, new_len: usize, value: u8) {
462         let len = self.len();
463         if new_len > len {
464             let additional = new_len - len;
465             self.reserve(additional);
466             unsafe {
467                 let dst = self.chunk_mut().as_mut_ptr();
468                 ptr::write_bytes(dst, value, additional);
469                 self.set_len(new_len);
470             }
471         } else {
472             self.truncate(new_len);
473         }
474     }
475 
476     /// Sets the length of the buffer.
477     ///
478     /// This will explicitly set the size of the buffer without actually
479     /// modifying the data, so it is up to the caller to ensure that the data
480     /// has been initialized.
481     ///
482     /// # Examples
483     ///
484     /// ```
485     /// use bytes::BytesMut;
486     ///
487     /// let mut b = BytesMut::from(&b"hello world"[..]);
488     ///
489     /// unsafe {
490     ///     b.set_len(5);
491     /// }
492     ///
493     /// assert_eq!(&b[..], b"hello");
494     ///
495     /// unsafe {
496     ///     b.set_len(11);
497     /// }
498     ///
499     /// assert_eq!(&b[..], b"hello world");
500     /// ```
501     #[inline]
set_len(&mut self, len: usize)502     pub unsafe fn set_len(&mut self, len: usize) {
503         debug_assert!(len <= self.cap, "set_len out of bounds");
504         self.len = len;
505     }
506 
507     /// Reserves capacity for at least `additional` more bytes to be inserted
508     /// into the given `BytesMut`.
509     ///
510     /// More than `additional` bytes may be reserved in order to avoid frequent
511     /// reallocations. A call to `reserve` may result in an allocation.
512     ///
513     /// Before allocating new buffer space, the function will attempt to reclaim
514     /// space in the existing buffer. If the current handle references a view
515     /// into a larger original buffer, and all other handles referencing part
516     /// of the same original buffer have been dropped, then the current view
517     /// can be copied/shifted to the front of the buffer and the handle can take
518     /// ownership of the full buffer, provided that the full buffer is large
519     /// enough to fit the requested additional capacity.
520     ///
521     /// This optimization will only happen if shifting the data from the current
522     /// view to the front of the buffer is not too expensive in terms of the
523     /// (amortized) time required. The precise condition is subject to change;
524     /// as of now, the length of the data being shifted needs to be at least as
525     /// large as the distance that it's shifted by. If the current view is empty
526     /// and the original buffer is large enough to fit the requested additional
527     /// capacity, then reallocations will never happen.
528     ///
529     /// # Examples
530     ///
531     /// In the following example, a new buffer is allocated.
532     ///
533     /// ```
534     /// use bytes::BytesMut;
535     ///
536     /// let mut buf = BytesMut::from(&b"hello"[..]);
537     /// buf.reserve(64);
538     /// assert!(buf.capacity() >= 69);
539     /// ```
540     ///
541     /// In the following example, the existing buffer is reclaimed.
542     ///
543     /// ```
544     /// use bytes::{BytesMut, BufMut};
545     ///
546     /// let mut buf = BytesMut::with_capacity(128);
547     /// buf.put(&[0; 64][..]);
548     ///
549     /// let ptr = buf.as_ptr();
550     /// let other = buf.split();
551     ///
552     /// assert!(buf.is_empty());
553     /// assert_eq!(buf.capacity(), 64);
554     ///
555     /// drop(other);
556     /// buf.reserve(128);
557     ///
558     /// assert_eq!(buf.capacity(), 128);
559     /// assert_eq!(buf.as_ptr(), ptr);
560     /// ```
561     ///
562     /// # Panics
563     ///
564     /// Panics if the new capacity overflows `usize`.
565     #[inline]
reserve(&mut self, additional: usize)566     pub fn reserve(&mut self, additional: usize) {
567         let len = self.len();
568         let rem = self.capacity() - len;
569 
570         if additional <= rem {
571             // The handle can already store at least `additional` more bytes, so
572             // there is no further work needed to be done.
573             return;
574         }
575 
576         self.reserve_inner(additional);
577     }
578 
579     // In separate function to allow the short-circuits in `reserve` to
580     // be inline-able. Significant helps performance.
reserve_inner(&mut self, additional: usize)581     fn reserve_inner(&mut self, additional: usize) {
582         let len = self.len();
583         let kind = self.kind();
584 
585         if kind == KIND_VEC {
586             // If there's enough free space before the start of the buffer, then
587             // just copy the data backwards and reuse the already-allocated
588             // space.
589             //
590             // Otherwise, since backed by a vector, use `Vec::reserve`
591             //
592             // We need to make sure that this optimization does not kill the
593             // amortized runtimes of BytesMut's operations.
594             unsafe {
595                 let (off, prev) = self.get_vec_pos();
596 
597                 // Only reuse space if we can satisfy the requested additional space.
598                 //
599                 // Also check if the value of `off` suggests that enough bytes
600                 // have been read to account for the overhead of shifting all
601                 // the data (in an amortized analysis).
602                 // Hence the condition `off >= self.len()`.
603                 //
604                 // This condition also already implies that the buffer is going
605                 // to be (at least) half-empty in the end; so we do not break
606                 // the (amortized) runtime with future resizes of the underlying
607                 // `Vec`.
608                 //
609                 // [For more details check issue #524, and PR #525.]
610                 if self.capacity() - self.len() + off >= additional && off >= self.len() {
611                     // There's enough space, and it's not too much overhead:
612                     // reuse the space!
613                     //
614                     // Just move the pointer back to the start after copying
615                     // data back.
616                     let base_ptr = self.ptr.as_ptr().offset(-(off as isize));
617                     // Since `off >= self.len()`, the two regions don't overlap.
618                     ptr::copy_nonoverlapping(self.ptr.as_ptr(), base_ptr, self.len);
619                     self.ptr = vptr(base_ptr);
620                     self.set_vec_pos(0, prev);
621 
622                     // Length stays constant, but since we moved backwards we
623                     // can gain capacity back.
624                     self.cap += off;
625                 } else {
626                     // Not enough space, or reusing might be too much overhead:
627                     // allocate more space!
628                     let mut v =
629                         ManuallyDrop::new(rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off));
630                     v.reserve(additional);
631 
632                     // Update the info
633                     self.ptr = vptr(v.as_mut_ptr().add(off));
634                     self.len = v.len() - off;
635                     self.cap = v.capacity() - off;
636                 }
637 
638                 return;
639             }
640         }
641 
642         debug_assert_eq!(kind, KIND_ARC);
643         let shared: *mut Shared = self.data;
644 
645         // Reserving involves abandoning the currently shared buffer and
646         // allocating a new vector with the requested capacity.
647         //
648         // Compute the new capacity
649         let mut new_cap = len.checked_add(additional).expect("overflow");
650 
651         let original_capacity;
652         let original_capacity_repr;
653 
654         unsafe {
655             original_capacity_repr = (*shared).original_capacity_repr;
656             original_capacity = original_capacity_from_repr(original_capacity_repr);
657 
658             // First, try to reclaim the buffer. This is possible if the current
659             // handle is the only outstanding handle pointing to the buffer.
660             if (*shared).is_unique() {
661                 // This is the only handle to the buffer. It can be reclaimed.
662                 // However, before doing the work of copying data, check to make
663                 // sure that the vector has enough capacity.
664                 let v = &mut (*shared).vec;
665 
666                 let v_capacity = v.capacity();
667                 let ptr = v.as_mut_ptr();
668 
669                 let offset = offset_from(self.ptr.as_ptr(), ptr);
670 
671                 // Compare the condition in the `kind == KIND_VEC` case above
672                 // for more details.
673                 if v_capacity >= new_cap + offset {
674                     self.cap = new_cap;
675                     // no copy is necessary
676                 } else if v_capacity >= new_cap && offset >= len {
677                     // The capacity is sufficient, and copying is not too much
678                     // overhead: reclaim the buffer!
679 
680                     // `offset >= len` means: no overlap
681                     ptr::copy_nonoverlapping(self.ptr.as_ptr(), ptr, len);
682 
683                     self.ptr = vptr(ptr);
684                     self.cap = v.capacity();
685                 } else {
686                     // calculate offset
687                     let off = (self.ptr.as_ptr() as usize) - (v.as_ptr() as usize);
688 
689                     // new_cap is calculated in terms of `BytesMut`, not the underlying
690                     // `Vec`, so it does not take the offset into account.
691                     //
692                     // Thus we have to manually add it here.
693                     new_cap = new_cap.checked_add(off).expect("overflow");
694 
695                     // The vector capacity is not sufficient. The reserve request is
696                     // asking for more than the initial buffer capacity. Allocate more
697                     // than requested if `new_cap` is not much bigger than the current
698                     // capacity.
699                     //
700                     // There are some situations, using `reserve_exact` that the
701                     // buffer capacity could be below `original_capacity`, so do a
702                     // check.
703                     let double = v.capacity().checked_shl(1).unwrap_or(new_cap);
704 
705                     new_cap = cmp::max(double, new_cap);
706 
707                     // No space - allocate more
708                     //
709                     // The length field of `Shared::vec` is not used by the `BytesMut`;
710                     // instead we use the `len` field in the `BytesMut` itself. However,
711                     // when calling `reserve`, it doesn't guarantee that data stored in
712                     // the unused capacity of the vector is copied over to the new
713                     // allocation, so we need to ensure that we don't have any data we
714                     // care about in the unused capacity before calling `reserve`.
715                     debug_assert!(off + len <= v.capacity());
716                     v.set_len(off + len);
717                     v.reserve(new_cap - v.len());
718 
719                     // Update the info
720                     self.ptr = vptr(v.as_mut_ptr().add(off));
721                     self.cap = v.capacity() - off;
722                 }
723 
724                 return;
725             } else {
726                 new_cap = cmp::max(new_cap, original_capacity);
727             }
728         }
729 
730         // Create a new vector to store the data
731         let mut v = ManuallyDrop::new(Vec::with_capacity(new_cap));
732 
733         // Copy the bytes
734         v.extend_from_slice(self.as_ref());
735 
736         // Release the shared handle. This must be done *after* the bytes are
737         // copied.
738         unsafe { release_shared(shared) };
739 
740         // Update self
741         let data = (original_capacity_repr << ORIGINAL_CAPACITY_OFFSET) | KIND_VEC;
742         self.data = invalid_ptr(data);
743         self.ptr = vptr(v.as_mut_ptr());
744         self.len = v.len();
745         self.cap = v.capacity();
746     }
747 
748     /// Appends given bytes to this `BytesMut`.
749     ///
750     /// If this `BytesMut` object does not have enough capacity, it is resized
751     /// first.
752     ///
753     /// # Examples
754     ///
755     /// ```
756     /// use bytes::BytesMut;
757     ///
758     /// let mut buf = BytesMut::with_capacity(0);
759     /// buf.extend_from_slice(b"aaabbb");
760     /// buf.extend_from_slice(b"cccddd");
761     ///
762     /// assert_eq!(b"aaabbbcccddd", &buf[..]);
763     /// ```
extend_from_slice(&mut self, extend: &[u8])764     pub fn extend_from_slice(&mut self, extend: &[u8]) {
765         let cnt = extend.len();
766         self.reserve(cnt);
767 
768         unsafe {
769             let dst = self.spare_capacity_mut();
770             // Reserved above
771             debug_assert!(dst.len() >= cnt);
772 
773             ptr::copy_nonoverlapping(extend.as_ptr(), dst.as_mut_ptr().cast(), cnt);
774         }
775 
776         unsafe {
777             self.advance_mut(cnt);
778         }
779     }
780 
781     /// Absorbs a `BytesMut` that was previously split off.
782     ///
783     /// If the two `BytesMut` objects were previously contiguous and not mutated
784     /// in a way that causes re-allocation i.e., if `other` was created by
785     /// calling `split_off` on this `BytesMut`, then this is an `O(1)` operation
786     /// that just decreases a reference count and sets a few indices.
787     /// Otherwise this method degenerates to
788     /// `self.extend_from_slice(other.as_ref())`.
789     ///
790     /// # Examples
791     ///
792     /// ```
793     /// use bytes::BytesMut;
794     ///
795     /// let mut buf = BytesMut::with_capacity(64);
796     /// buf.extend_from_slice(b"aaabbbcccddd");
797     ///
798     /// let split = buf.split_off(6);
799     /// assert_eq!(b"aaabbb", &buf[..]);
800     /// assert_eq!(b"cccddd", &split[..]);
801     ///
802     /// buf.unsplit(split);
803     /// assert_eq!(b"aaabbbcccddd", &buf[..]);
804     /// ```
unsplit(&mut self, other: BytesMut)805     pub fn unsplit(&mut self, other: BytesMut) {
806         if self.is_empty() {
807             *self = other;
808             return;
809         }
810 
811         if let Err(other) = self.try_unsplit(other) {
812             self.extend_from_slice(other.as_ref());
813         }
814     }
815 
816     // private
817 
818     // For now, use a `Vec` to manage the memory for us, but we may want to
819     // change that in the future to some alternate allocator strategy.
820     //
821     // Thus, we don't expose an easy way to construct from a `Vec` since an
822     // internal change could make a simple pattern (`BytesMut::from(vec)`)
823     // suddenly a lot more expensive.
824     #[inline]
from_vec(mut vec: Vec<u8>) -> BytesMut825     pub(crate) fn from_vec(mut vec: Vec<u8>) -> BytesMut {
826         let ptr = vptr(vec.as_mut_ptr());
827         let len = vec.len();
828         let cap = vec.capacity();
829         mem::forget(vec);
830 
831         let original_capacity_repr = original_capacity_to_repr(cap);
832         let data = (original_capacity_repr << ORIGINAL_CAPACITY_OFFSET) | KIND_VEC;
833 
834         BytesMut {
835             ptr,
836             len,
837             cap,
838             data: invalid_ptr(data),
839         }
840     }
841 
842     #[inline]
as_slice(&self) -> &[u8]843     fn as_slice(&self) -> &[u8] {
844         unsafe { slice::from_raw_parts(self.ptr.as_ptr(), self.len) }
845     }
846 
847     #[inline]
as_slice_mut(&mut self) -> &mut [u8]848     fn as_slice_mut(&mut self) -> &mut [u8] {
849         unsafe { slice::from_raw_parts_mut(self.ptr.as_ptr(), self.len) }
850     }
851 
set_start(&mut self, start: usize)852     unsafe fn set_start(&mut self, start: usize) {
853         // Setting the start to 0 is a no-op, so return early if this is the
854         // case.
855         if start == 0 {
856             return;
857         }
858 
859         debug_assert!(start <= self.cap, "internal: set_start out of bounds");
860 
861         let kind = self.kind();
862 
863         if kind == KIND_VEC {
864             // Setting the start when in vec representation is a little more
865             // complicated. First, we have to track how far ahead the
866             // "start" of the byte buffer from the beginning of the vec. We
867             // also have to ensure that we don't exceed the maximum shift.
868             let (mut pos, prev) = self.get_vec_pos();
869             pos += start;
870 
871             if pos <= MAX_VEC_POS {
872                 self.set_vec_pos(pos, prev);
873             } else {
874                 // The repr must be upgraded to ARC. This will never happen
875                 // on 64 bit systems and will only happen on 32 bit systems
876                 // when shifting past 134,217,727 bytes. As such, we don't
877                 // worry too much about performance here.
878                 self.promote_to_shared(/*ref_count = */ 1);
879             }
880         }
881 
882         // Updating the start of the view is setting `ptr` to point to the
883         // new start and updating the `len` field to reflect the new length
884         // of the view.
885         self.ptr = vptr(self.ptr.as_ptr().add(start));
886 
887         if self.len >= start {
888             self.len -= start;
889         } else {
890             self.len = 0;
891         }
892 
893         self.cap -= start;
894     }
895 
set_end(&mut self, end: usize)896     unsafe fn set_end(&mut self, end: usize) {
897         debug_assert_eq!(self.kind(), KIND_ARC);
898         assert!(end <= self.cap, "set_end out of bounds");
899 
900         self.cap = end;
901         self.len = cmp::min(self.len, end);
902     }
903 
try_unsplit(&mut self, other: BytesMut) -> Result<(), BytesMut>904     fn try_unsplit(&mut self, other: BytesMut) -> Result<(), BytesMut> {
905         if other.capacity() == 0 {
906             return Ok(());
907         }
908 
909         let ptr = unsafe { self.ptr.as_ptr().add(self.len) };
910         if ptr == other.ptr.as_ptr()
911             && self.kind() == KIND_ARC
912             && other.kind() == KIND_ARC
913             && self.data == other.data
914         {
915             // Contiguous blocks, just combine directly
916             self.len += other.len;
917             self.cap += other.cap;
918             Ok(())
919         } else {
920             Err(other)
921         }
922     }
923 
924     #[inline]
kind(&self) -> usize925     fn kind(&self) -> usize {
926         self.data as usize & KIND_MASK
927     }
928 
promote_to_shared(&mut self, ref_cnt: usize)929     unsafe fn promote_to_shared(&mut self, ref_cnt: usize) {
930         debug_assert_eq!(self.kind(), KIND_VEC);
931         debug_assert!(ref_cnt == 1 || ref_cnt == 2);
932 
933         let original_capacity_repr =
934             (self.data as usize & ORIGINAL_CAPACITY_MASK) >> ORIGINAL_CAPACITY_OFFSET;
935 
936         // The vec offset cannot be concurrently mutated, so there
937         // should be no danger reading it.
938         let off = (self.data as usize) >> VEC_POS_OFFSET;
939 
940         // First, allocate a new `Shared` instance containing the
941         // `Vec` fields. It's important to note that `ptr`, `len`,
942         // and `cap` cannot be mutated without having `&mut self`.
943         // This means that these fields will not be concurrently
944         // updated and since the buffer hasn't been promoted to an
945         // `Arc`, those three fields still are the components of the
946         // vector.
947         let shared = Box::new(Shared {
948             vec: rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off),
949             original_capacity_repr,
950             ref_count: AtomicUsize::new(ref_cnt),
951         });
952 
953         let shared = Box::into_raw(shared);
954 
955         // The pointer should be aligned, so this assert should
956         // always succeed.
957         debug_assert_eq!(shared as usize & KIND_MASK, KIND_ARC);
958 
959         self.data = shared;
960     }
961 
962     /// Makes an exact shallow clone of `self`.
963     ///
964     /// The kind of `self` doesn't matter, but this is unsafe
965     /// because the clone will have the same offsets. You must
966     /// be sure the returned value to the user doesn't allow
967     /// two views into the same range.
968     #[inline]
shallow_clone(&mut self) -> BytesMut969     unsafe fn shallow_clone(&mut self) -> BytesMut {
970         if self.kind() == KIND_ARC {
971             increment_shared(self.data);
972             ptr::read(self)
973         } else {
974             self.promote_to_shared(/*ref_count = */ 2);
975             ptr::read(self)
976         }
977     }
978 
979     #[inline]
get_vec_pos(&mut self) -> (usize, usize)980     unsafe fn get_vec_pos(&mut self) -> (usize, usize) {
981         debug_assert_eq!(self.kind(), KIND_VEC);
982 
983         let prev = self.data as usize;
984         (prev >> VEC_POS_OFFSET, prev)
985     }
986 
987     #[inline]
set_vec_pos(&mut self, pos: usize, prev: usize)988     unsafe fn set_vec_pos(&mut self, pos: usize, prev: usize) {
989         debug_assert_eq!(self.kind(), KIND_VEC);
990         debug_assert!(pos <= MAX_VEC_POS);
991 
992         self.data = invalid_ptr((pos << VEC_POS_OFFSET) | (prev & NOT_VEC_POS_MASK));
993     }
994 
995     /// Returns the remaining spare capacity of the buffer as a slice of `MaybeUninit<u8>`.
996     ///
997     /// The returned slice can be used to fill the buffer with data (e.g. by
998     /// reading from a file) before marking the data as initialized using the
999     /// [`set_len`] method.
1000     ///
1001     /// [`set_len`]: BytesMut::set_len
1002     ///
1003     /// # Examples
1004     ///
1005     /// ```
1006     /// use bytes::BytesMut;
1007     ///
1008     /// // Allocate buffer big enough for 10 bytes.
1009     /// let mut buf = BytesMut::with_capacity(10);
1010     ///
1011     /// // Fill in the first 3 elements.
1012     /// let uninit = buf.spare_capacity_mut();
1013     /// uninit[0].write(0);
1014     /// uninit[1].write(1);
1015     /// uninit[2].write(2);
1016     ///
1017     /// // Mark the first 3 bytes of the buffer as being initialized.
1018     /// unsafe {
1019     ///     buf.set_len(3);
1020     /// }
1021     ///
1022     /// assert_eq!(&buf[..], &[0, 1, 2]);
1023     /// ```
1024     #[inline]
spare_capacity_mut(&mut self) -> &mut [MaybeUninit<u8>]1025     pub fn spare_capacity_mut(&mut self) -> &mut [MaybeUninit<u8>] {
1026         unsafe {
1027             let ptr = self.ptr.as_ptr().add(self.len);
1028             let len = self.cap - self.len;
1029 
1030             slice::from_raw_parts_mut(ptr.cast(), len)
1031         }
1032     }
1033 }
1034 
1035 impl Drop for BytesMut {
drop(&mut self)1036     fn drop(&mut self) {
1037         let kind = self.kind();
1038 
1039         if kind == KIND_VEC {
1040             unsafe {
1041                 let (off, _) = self.get_vec_pos();
1042 
1043                 // Vector storage, free the vector
1044                 let _ = rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off);
1045             }
1046         } else if kind == KIND_ARC {
1047             unsafe { release_shared(self.data) };
1048         }
1049     }
1050 }
1051 
1052 impl Buf for BytesMut {
1053     #[inline]
remaining(&self) -> usize1054     fn remaining(&self) -> usize {
1055         self.len()
1056     }
1057 
1058     #[inline]
chunk(&self) -> &[u8]1059     fn chunk(&self) -> &[u8] {
1060         self.as_slice()
1061     }
1062 
1063     #[inline]
advance(&mut self, cnt: usize)1064     fn advance(&mut self, cnt: usize) {
1065         assert!(
1066             cnt <= self.remaining(),
1067             "cannot advance past `remaining`: {:?} <= {:?}",
1068             cnt,
1069             self.remaining(),
1070         );
1071         unsafe {
1072             self.set_start(cnt);
1073         }
1074     }
1075 
copy_to_bytes(&mut self, len: usize) -> crate::Bytes1076     fn copy_to_bytes(&mut self, len: usize) -> crate::Bytes {
1077         self.split_to(len).freeze()
1078     }
1079 }
1080 
1081 unsafe impl BufMut for BytesMut {
1082     #[inline]
remaining_mut(&self) -> usize1083     fn remaining_mut(&self) -> usize {
1084         usize::MAX - self.len()
1085     }
1086 
1087     #[inline]
advance_mut(&mut self, cnt: usize)1088     unsafe fn advance_mut(&mut self, cnt: usize) {
1089         let new_len = self.len() + cnt;
1090         assert!(
1091             new_len <= self.cap,
1092             "new_len = {}; capacity = {}",
1093             new_len,
1094             self.cap
1095         );
1096         self.len = new_len;
1097     }
1098 
1099     #[inline]
chunk_mut(&mut self) -> &mut UninitSlice1100     fn chunk_mut(&mut self) -> &mut UninitSlice {
1101         if self.capacity() == self.len() {
1102             self.reserve(64);
1103         }
1104         UninitSlice::from_slice(self.spare_capacity_mut())
1105     }
1106 
1107     // Specialize these methods so they can skip checking `remaining_mut`
1108     // and `advance_mut`.
1109 
put<T: crate::Buf>(&mut self, mut src: T) where Self: Sized,1110     fn put<T: crate::Buf>(&mut self, mut src: T)
1111     where
1112         Self: Sized,
1113     {
1114         while src.has_remaining() {
1115             let s = src.chunk();
1116             let l = s.len();
1117             self.extend_from_slice(s);
1118             src.advance(l);
1119         }
1120     }
1121 
put_slice(&mut self, src: &[u8])1122     fn put_slice(&mut self, src: &[u8]) {
1123         self.extend_from_slice(src);
1124     }
1125 
put_bytes(&mut self, val: u8, cnt: usize)1126     fn put_bytes(&mut self, val: u8, cnt: usize) {
1127         self.reserve(cnt);
1128         unsafe {
1129             let dst = self.spare_capacity_mut();
1130             // Reserved above
1131             debug_assert!(dst.len() >= cnt);
1132 
1133             ptr::write_bytes(dst.as_mut_ptr(), val, cnt);
1134 
1135             self.advance_mut(cnt);
1136         }
1137     }
1138 }
1139 
1140 impl AsRef<[u8]> for BytesMut {
1141     #[inline]
as_ref(&self) -> &[u8]1142     fn as_ref(&self) -> &[u8] {
1143         self.as_slice()
1144     }
1145 }
1146 
1147 impl Deref for BytesMut {
1148     type Target = [u8];
1149 
1150     #[inline]
deref(&self) -> &[u8]1151     fn deref(&self) -> &[u8] {
1152         self.as_ref()
1153     }
1154 }
1155 
1156 impl AsMut<[u8]> for BytesMut {
1157     #[inline]
as_mut(&mut self) -> &mut [u8]1158     fn as_mut(&mut self) -> &mut [u8] {
1159         self.as_slice_mut()
1160     }
1161 }
1162 
1163 impl DerefMut for BytesMut {
1164     #[inline]
deref_mut(&mut self) -> &mut [u8]1165     fn deref_mut(&mut self) -> &mut [u8] {
1166         self.as_mut()
1167     }
1168 }
1169 
1170 impl<'a> From<&'a [u8]> for BytesMut {
from(src: &'a [u8]) -> BytesMut1171     fn from(src: &'a [u8]) -> BytesMut {
1172         BytesMut::from_vec(src.to_vec())
1173     }
1174 }
1175 
1176 impl<'a> From<&'a str> for BytesMut {
from(src: &'a str) -> BytesMut1177     fn from(src: &'a str) -> BytesMut {
1178         BytesMut::from(src.as_bytes())
1179     }
1180 }
1181 
1182 impl From<BytesMut> for Bytes {
from(src: BytesMut) -> Bytes1183     fn from(src: BytesMut) -> Bytes {
1184         src.freeze()
1185     }
1186 }
1187 
1188 impl PartialEq for BytesMut {
eq(&self, other: &BytesMut) -> bool1189     fn eq(&self, other: &BytesMut) -> bool {
1190         self.as_slice() == other.as_slice()
1191     }
1192 }
1193 
1194 impl PartialOrd for BytesMut {
partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering>1195     fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
1196         self.as_slice().partial_cmp(other.as_slice())
1197     }
1198 }
1199 
1200 impl Ord for BytesMut {
cmp(&self, other: &BytesMut) -> cmp::Ordering1201     fn cmp(&self, other: &BytesMut) -> cmp::Ordering {
1202         self.as_slice().cmp(other.as_slice())
1203     }
1204 }
1205 
1206 impl Eq for BytesMut {}
1207 
1208 impl Default for BytesMut {
1209     #[inline]
default() -> BytesMut1210     fn default() -> BytesMut {
1211         BytesMut::new()
1212     }
1213 }
1214 
1215 impl hash::Hash for BytesMut {
hash<H>(&self, state: &mut H) where H: hash::Hasher,1216     fn hash<H>(&self, state: &mut H)
1217     where
1218         H: hash::Hasher,
1219     {
1220         let s: &[u8] = self.as_ref();
1221         s.hash(state);
1222     }
1223 }
1224 
1225 impl Borrow<[u8]> for BytesMut {
borrow(&self) -> &[u8]1226     fn borrow(&self) -> &[u8] {
1227         self.as_ref()
1228     }
1229 }
1230 
1231 impl BorrowMut<[u8]> for BytesMut {
borrow_mut(&mut self) -> &mut [u8]1232     fn borrow_mut(&mut self) -> &mut [u8] {
1233         self.as_mut()
1234     }
1235 }
1236 
1237 impl fmt::Write for BytesMut {
1238     #[inline]
write_str(&mut self, s: &str) -> fmt::Result1239     fn write_str(&mut self, s: &str) -> fmt::Result {
1240         if self.remaining_mut() >= s.len() {
1241             self.put_slice(s.as_bytes());
1242             Ok(())
1243         } else {
1244             Err(fmt::Error)
1245         }
1246     }
1247 
1248     #[inline]
write_fmt(&mut self, args: fmt::Arguments<'_>) -> fmt::Result1249     fn write_fmt(&mut self, args: fmt::Arguments<'_>) -> fmt::Result {
1250         fmt::write(self, args)
1251     }
1252 }
1253 
1254 impl Clone for BytesMut {
clone(&self) -> BytesMut1255     fn clone(&self) -> BytesMut {
1256         BytesMut::from(&self[..])
1257     }
1258 }
1259 
1260 impl IntoIterator for BytesMut {
1261     type Item = u8;
1262     type IntoIter = IntoIter<BytesMut>;
1263 
into_iter(self) -> Self::IntoIter1264     fn into_iter(self) -> Self::IntoIter {
1265         IntoIter::new(self)
1266     }
1267 }
1268 
1269 impl<'a> IntoIterator for &'a BytesMut {
1270     type Item = &'a u8;
1271     type IntoIter = core::slice::Iter<'a, u8>;
1272 
into_iter(self) -> Self::IntoIter1273     fn into_iter(self) -> Self::IntoIter {
1274         self.as_ref().iter()
1275     }
1276 }
1277 
1278 impl Extend<u8> for BytesMut {
extend<T>(&mut self, iter: T) where T: IntoIterator<Item = u8>,1279     fn extend<T>(&mut self, iter: T)
1280     where
1281         T: IntoIterator<Item = u8>,
1282     {
1283         let iter = iter.into_iter();
1284 
1285         let (lower, _) = iter.size_hint();
1286         self.reserve(lower);
1287 
1288         // TODO: optimize
1289         // 1. If self.kind() == KIND_VEC, use Vec::extend
1290         // 2. Make `reserve` inline-able
1291         for b in iter {
1292             self.reserve(1);
1293             self.put_u8(b);
1294         }
1295     }
1296 }
1297 
1298 impl<'a> Extend<&'a u8> for BytesMut {
extend<T>(&mut self, iter: T) where T: IntoIterator<Item = &'a u8>,1299     fn extend<T>(&mut self, iter: T)
1300     where
1301         T: IntoIterator<Item = &'a u8>,
1302     {
1303         self.extend(iter.into_iter().copied())
1304     }
1305 }
1306 
1307 impl Extend<Bytes> for BytesMut {
extend<T>(&mut self, iter: T) where T: IntoIterator<Item = Bytes>,1308     fn extend<T>(&mut self, iter: T)
1309     where
1310         T: IntoIterator<Item = Bytes>,
1311     {
1312         for bytes in iter {
1313             self.extend_from_slice(&bytes)
1314         }
1315     }
1316 }
1317 
1318 impl FromIterator<u8> for BytesMut {
from_iter<T: IntoIterator<Item = u8>>(into_iter: T) -> Self1319     fn from_iter<T: IntoIterator<Item = u8>>(into_iter: T) -> Self {
1320         BytesMut::from_vec(Vec::from_iter(into_iter))
1321     }
1322 }
1323 
1324 impl<'a> FromIterator<&'a u8> for BytesMut {
from_iter<T: IntoIterator<Item = &'a u8>>(into_iter: T) -> Self1325     fn from_iter<T: IntoIterator<Item = &'a u8>>(into_iter: T) -> Self {
1326         BytesMut::from_iter(into_iter.into_iter().copied())
1327     }
1328 }
1329 
1330 /*
1331  *
1332  * ===== Inner =====
1333  *
1334  */
1335 
increment_shared(ptr: *mut Shared)1336 unsafe fn increment_shared(ptr: *mut Shared) {
1337     let old_size = (*ptr).ref_count.fetch_add(1, Ordering::Relaxed);
1338 
1339     if old_size > isize::MAX as usize {
1340         crate::abort();
1341     }
1342 }
1343 
release_shared(ptr: *mut Shared)1344 unsafe fn release_shared(ptr: *mut Shared) {
1345     // `Shared` storage... follow the drop steps from Arc.
1346     if (*ptr).ref_count.fetch_sub(1, Ordering::Release) != 1 {
1347         return;
1348     }
1349 
1350     // This fence is needed to prevent reordering of use of the data and
1351     // deletion of the data.  Because it is marked `Release`, the decreasing
1352     // of the reference count synchronizes with this `Acquire` fence. This
1353     // means that use of the data happens before decreasing the reference
1354     // count, which happens before this fence, which happens before the
1355     // deletion of the data.
1356     //
1357     // As explained in the [Boost documentation][1],
1358     //
1359     // > It is important to enforce any possible access to the object in one
1360     // > thread (through an existing reference) to *happen before* deleting
1361     // > the object in a different thread. This is achieved by a "release"
1362     // > operation after dropping a reference (any access to the object
1363     // > through this reference must obviously happened before), and an
1364     // > "acquire" operation before deleting the object.
1365     //
1366     // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
1367     //
1368     // Thread sanitizer does not support atomic fences. Use an atomic load
1369     // instead.
1370     (*ptr).ref_count.load(Ordering::Acquire);
1371 
1372     // Drop the data
1373     drop(Box::from_raw(ptr));
1374 }
1375 
1376 impl Shared {
is_unique(&self) -> bool1377     fn is_unique(&self) -> bool {
1378         // The goal is to check if the current handle is the only handle
1379         // that currently has access to the buffer. This is done by
1380         // checking if the `ref_count` is currently 1.
1381         //
1382         // The `Acquire` ordering synchronizes with the `Release` as
1383         // part of the `fetch_sub` in `release_shared`. The `fetch_sub`
1384         // operation guarantees that any mutations done in other threads
1385         // are ordered before the `ref_count` is decremented. As such,
1386         // this `Acquire` will guarantee that those mutations are
1387         // visible to the current thread.
1388         self.ref_count.load(Ordering::Acquire) == 1
1389     }
1390 }
1391 
1392 #[inline]
original_capacity_to_repr(cap: usize) -> usize1393 fn original_capacity_to_repr(cap: usize) -> usize {
1394     let width = PTR_WIDTH - ((cap >> MIN_ORIGINAL_CAPACITY_WIDTH).leading_zeros() as usize);
1395     cmp::min(
1396         width,
1397         MAX_ORIGINAL_CAPACITY_WIDTH - MIN_ORIGINAL_CAPACITY_WIDTH,
1398     )
1399 }
1400 
original_capacity_from_repr(repr: usize) -> usize1401 fn original_capacity_from_repr(repr: usize) -> usize {
1402     if repr == 0 {
1403         return 0;
1404     }
1405 
1406     1 << (repr + (MIN_ORIGINAL_CAPACITY_WIDTH - 1))
1407 }
1408 
1409 /*
1410 #[test]
1411 fn test_original_capacity_to_repr() {
1412     assert_eq!(original_capacity_to_repr(0), 0);
1413 
1414     let max_width = 32;
1415 
1416     for width in 1..(max_width + 1) {
1417         let cap = 1 << width - 1;
1418 
1419         let expected = if width < MIN_ORIGINAL_CAPACITY_WIDTH {
1420             0
1421         } else if width < MAX_ORIGINAL_CAPACITY_WIDTH {
1422             width - MIN_ORIGINAL_CAPACITY_WIDTH
1423         } else {
1424             MAX_ORIGINAL_CAPACITY_WIDTH - MIN_ORIGINAL_CAPACITY_WIDTH
1425         };
1426 
1427         assert_eq!(original_capacity_to_repr(cap), expected);
1428 
1429         if width > 1 {
1430             assert_eq!(original_capacity_to_repr(cap + 1), expected);
1431         }
1432 
1433         //  MIN_ORIGINAL_CAPACITY_WIDTH must be bigger than 7 to pass tests below
1434         if width == MIN_ORIGINAL_CAPACITY_WIDTH + 1 {
1435             assert_eq!(original_capacity_to_repr(cap - 24), expected - 1);
1436             assert_eq!(original_capacity_to_repr(cap + 76), expected);
1437         } else if width == MIN_ORIGINAL_CAPACITY_WIDTH + 2 {
1438             assert_eq!(original_capacity_to_repr(cap - 1), expected - 1);
1439             assert_eq!(original_capacity_to_repr(cap - 48), expected - 1);
1440         }
1441     }
1442 }
1443 
1444 #[test]
1445 fn test_original_capacity_from_repr() {
1446     assert_eq!(0, original_capacity_from_repr(0));
1447 
1448     let min_cap = 1 << MIN_ORIGINAL_CAPACITY_WIDTH;
1449 
1450     assert_eq!(min_cap, original_capacity_from_repr(1));
1451     assert_eq!(min_cap * 2, original_capacity_from_repr(2));
1452     assert_eq!(min_cap * 4, original_capacity_from_repr(3));
1453     assert_eq!(min_cap * 8, original_capacity_from_repr(4));
1454     assert_eq!(min_cap * 16, original_capacity_from_repr(5));
1455     assert_eq!(min_cap * 32, original_capacity_from_repr(6));
1456     assert_eq!(min_cap * 64, original_capacity_from_repr(7));
1457 }
1458 */
1459 
1460 unsafe impl Send for BytesMut {}
1461 unsafe impl Sync for BytesMut {}
1462 
1463 /*
1464  *
1465  * ===== PartialEq / PartialOrd =====
1466  *
1467  */
1468 
1469 impl PartialEq<[u8]> for BytesMut {
eq(&self, other: &[u8]) -> bool1470     fn eq(&self, other: &[u8]) -> bool {
1471         &**self == other
1472     }
1473 }
1474 
1475 impl PartialOrd<[u8]> for BytesMut {
partial_cmp(&self, other: &[u8]) -> Option<cmp::Ordering>1476     fn partial_cmp(&self, other: &[u8]) -> Option<cmp::Ordering> {
1477         (**self).partial_cmp(other)
1478     }
1479 }
1480 
1481 impl PartialEq<BytesMut> for [u8] {
eq(&self, other: &BytesMut) -> bool1482     fn eq(&self, other: &BytesMut) -> bool {
1483         *other == *self
1484     }
1485 }
1486 
1487 impl PartialOrd<BytesMut> for [u8] {
partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering>1488     fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
1489         <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other)
1490     }
1491 }
1492 
1493 impl PartialEq<str> for BytesMut {
eq(&self, other: &str) -> bool1494     fn eq(&self, other: &str) -> bool {
1495         &**self == other.as_bytes()
1496     }
1497 }
1498 
1499 impl PartialOrd<str> for BytesMut {
partial_cmp(&self, other: &str) -> Option<cmp::Ordering>1500     fn partial_cmp(&self, other: &str) -> Option<cmp::Ordering> {
1501         (**self).partial_cmp(other.as_bytes())
1502     }
1503 }
1504 
1505 impl PartialEq<BytesMut> for str {
eq(&self, other: &BytesMut) -> bool1506     fn eq(&self, other: &BytesMut) -> bool {
1507         *other == *self
1508     }
1509 }
1510 
1511 impl PartialOrd<BytesMut> for str {
partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering>1512     fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
1513         <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other)
1514     }
1515 }
1516 
1517 impl PartialEq<Vec<u8>> for BytesMut {
eq(&self, other: &Vec<u8>) -> bool1518     fn eq(&self, other: &Vec<u8>) -> bool {
1519         *self == other[..]
1520     }
1521 }
1522 
1523 impl PartialOrd<Vec<u8>> for BytesMut {
partial_cmp(&self, other: &Vec<u8>) -> Option<cmp::Ordering>1524     fn partial_cmp(&self, other: &Vec<u8>) -> Option<cmp::Ordering> {
1525         (**self).partial_cmp(&other[..])
1526     }
1527 }
1528 
1529 impl PartialEq<BytesMut> for Vec<u8> {
eq(&self, other: &BytesMut) -> bool1530     fn eq(&self, other: &BytesMut) -> bool {
1531         *other == *self
1532     }
1533 }
1534 
1535 impl PartialOrd<BytesMut> for Vec<u8> {
partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering>1536     fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
1537         other.partial_cmp(self)
1538     }
1539 }
1540 
1541 impl PartialEq<String> for BytesMut {
eq(&self, other: &String) -> bool1542     fn eq(&self, other: &String) -> bool {
1543         *self == other[..]
1544     }
1545 }
1546 
1547 impl PartialOrd<String> for BytesMut {
partial_cmp(&self, other: &String) -> Option<cmp::Ordering>1548     fn partial_cmp(&self, other: &String) -> Option<cmp::Ordering> {
1549         (**self).partial_cmp(other.as_bytes())
1550     }
1551 }
1552 
1553 impl PartialEq<BytesMut> for String {
eq(&self, other: &BytesMut) -> bool1554     fn eq(&self, other: &BytesMut) -> bool {
1555         *other == *self
1556     }
1557 }
1558 
1559 impl PartialOrd<BytesMut> for String {
partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering>1560     fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
1561         <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other)
1562     }
1563 }
1564 
1565 impl<'a, T: ?Sized> PartialEq<&'a T> for BytesMut
1566 where
1567     BytesMut: PartialEq<T>,
1568 {
eq(&self, other: &&'a T) -> bool1569     fn eq(&self, other: &&'a T) -> bool {
1570         *self == **other
1571     }
1572 }
1573 
1574 impl<'a, T: ?Sized> PartialOrd<&'a T> for BytesMut
1575 where
1576     BytesMut: PartialOrd<T>,
1577 {
partial_cmp(&self, other: &&'a T) -> Option<cmp::Ordering>1578     fn partial_cmp(&self, other: &&'a T) -> Option<cmp::Ordering> {
1579         self.partial_cmp(*other)
1580     }
1581 }
1582 
1583 impl PartialEq<BytesMut> for &[u8] {
eq(&self, other: &BytesMut) -> bool1584     fn eq(&self, other: &BytesMut) -> bool {
1585         *other == *self
1586     }
1587 }
1588 
1589 impl PartialOrd<BytesMut> for &[u8] {
partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering>1590     fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
1591         <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other)
1592     }
1593 }
1594 
1595 impl PartialEq<BytesMut> for &str {
eq(&self, other: &BytesMut) -> bool1596     fn eq(&self, other: &BytesMut) -> bool {
1597         *other == *self
1598     }
1599 }
1600 
1601 impl PartialOrd<BytesMut> for &str {
partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering>1602     fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
1603         other.partial_cmp(self)
1604     }
1605 }
1606 
1607 impl PartialEq<BytesMut> for Bytes {
eq(&self, other: &BytesMut) -> bool1608     fn eq(&self, other: &BytesMut) -> bool {
1609         other[..] == self[..]
1610     }
1611 }
1612 
1613 impl PartialEq<Bytes> for BytesMut {
eq(&self, other: &Bytes) -> bool1614     fn eq(&self, other: &Bytes) -> bool {
1615         other[..] == self[..]
1616     }
1617 }
1618 
1619 impl From<BytesMut> for Vec<u8> {
from(mut bytes: BytesMut) -> Self1620     fn from(mut bytes: BytesMut) -> Self {
1621         let kind = bytes.kind();
1622 
1623         let mut vec = if kind == KIND_VEC {
1624             unsafe {
1625                 let (off, _) = bytes.get_vec_pos();
1626                 rebuild_vec(bytes.ptr.as_ptr(), bytes.len, bytes.cap, off)
1627             }
1628         } else if kind == KIND_ARC {
1629             let shared = bytes.data as *mut Shared;
1630 
1631             if unsafe { (*shared).is_unique() } {
1632                 let vec = mem::replace(unsafe { &mut (*shared).vec }, Vec::new());
1633 
1634                 unsafe { release_shared(shared) };
1635 
1636                 vec
1637             } else {
1638                 return bytes.deref().to_vec();
1639             }
1640         } else {
1641             return bytes.deref().to_vec();
1642         };
1643 
1644         let len = bytes.len;
1645 
1646         unsafe {
1647             ptr::copy(bytes.ptr.as_ptr(), vec.as_mut_ptr(), len);
1648             vec.set_len(len);
1649         }
1650 
1651         mem::forget(bytes);
1652 
1653         vec
1654     }
1655 }
1656 
1657 #[inline]
vptr(ptr: *mut u8) -> NonNull<u8>1658 fn vptr(ptr: *mut u8) -> NonNull<u8> {
1659     if cfg!(debug_assertions) {
1660         NonNull::new(ptr).expect("Vec pointer should be non-null")
1661     } else {
1662         unsafe { NonNull::new_unchecked(ptr) }
1663     }
1664 }
1665 
1666 /// Returns a dangling pointer with the given address. This is used to store
1667 /// integer data in pointer fields.
1668 ///
1669 /// It is equivalent to `addr as *mut T`, but this fails on miri when strict
1670 /// provenance checking is enabled.
1671 #[inline]
invalid_ptr<T>(addr: usize) -> *mut T1672 fn invalid_ptr<T>(addr: usize) -> *mut T {
1673     let ptr = core::ptr::null_mut::<u8>().wrapping_add(addr);
1674     debug_assert_eq!(ptr as usize, addr);
1675     ptr.cast::<T>()
1676 }
1677 
1678 /// Precondition: dst >= original
1679 ///
1680 /// The following line is equivalent to:
1681 ///
1682 /// ```rust,ignore
1683 /// self.ptr.as_ptr().offset_from(ptr) as usize;
1684 /// ```
1685 ///
1686 /// But due to min rust is 1.39 and it is only stablised
1687 /// in 1.47, we cannot use it.
1688 #[inline]
offset_from(dst: *mut u8, original: *mut u8) -> usize1689 fn offset_from(dst: *mut u8, original: *mut u8) -> usize {
1690     debug_assert!(dst >= original);
1691 
1692     dst as usize - original as usize
1693 }
1694 
rebuild_vec(ptr: *mut u8, mut len: usize, mut cap: usize, off: usize) -> Vec<u8>1695 unsafe fn rebuild_vec(ptr: *mut u8, mut len: usize, mut cap: usize, off: usize) -> Vec<u8> {
1696     let ptr = ptr.offset(-(off as isize));
1697     len += off;
1698     cap += off;
1699 
1700     Vec::from_raw_parts(ptr, len, cap)
1701 }
1702 
1703 // ===== impl SharedVtable =====
1704 
1705 static SHARED_VTABLE: Vtable = Vtable {
1706     clone: shared_v_clone,
1707     to_vec: shared_v_to_vec,
1708     drop: shared_v_drop,
1709 };
1710 
shared_v_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes1711 unsafe fn shared_v_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
1712     let shared = data.load(Ordering::Relaxed) as *mut Shared;
1713     increment_shared(shared);
1714 
1715     let data = AtomicPtr::new(shared as *mut ());
1716     Bytes::with_vtable(ptr, len, data, &SHARED_VTABLE)
1717 }
1718 
shared_v_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8>1719 unsafe fn shared_v_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
1720     let shared: *mut Shared = data.load(Ordering::Relaxed).cast();
1721 
1722     if (*shared).is_unique() {
1723         let shared = &mut *shared;
1724 
1725         // Drop shared
1726         let mut vec = mem::replace(&mut shared.vec, Vec::new());
1727         release_shared(shared);
1728 
1729         // Copy back buffer
1730         ptr::copy(ptr, vec.as_mut_ptr(), len);
1731         vec.set_len(len);
1732 
1733         vec
1734     } else {
1735         let v = slice::from_raw_parts(ptr, len).to_vec();
1736         release_shared(shared);
1737         v
1738     }
1739 }
1740 
shared_v_drop(data: &mut AtomicPtr<()>, _ptr: *const u8, _len: usize)1741 unsafe fn shared_v_drop(data: &mut AtomicPtr<()>, _ptr: *const u8, _len: usize) {
1742     data.with_mut(|shared| {
1743         release_shared(*shared as *mut Shared);
1744     });
1745 }
1746 
1747 // compile-fails
1748 
1749 /// ```compile_fail
1750 /// use bytes::BytesMut;
1751 /// #[deny(unused_must_use)]
1752 /// {
1753 ///     let mut b1 = BytesMut::from("hello world");
1754 ///     b1.split_to(6);
1755 /// }
1756 /// ```
_split_to_must_use()1757 fn _split_to_must_use() {}
1758 
1759 /// ```compile_fail
1760 /// use bytes::BytesMut;
1761 /// #[deny(unused_must_use)]
1762 /// {
1763 ///     let mut b1 = BytesMut::from("hello world");
1764 ///     b1.split_off(6);
1765 /// }
1766 /// ```
_split_off_must_use()1767 fn _split_off_must_use() {}
1768 
1769 /// ```compile_fail
1770 /// use bytes::BytesMut;
1771 /// #[deny(unused_must_use)]
1772 /// {
1773 ///     let mut b1 = BytesMut::from("hello world");
1774 ///     b1.split();
1775 /// }
1776 /// ```
_split_must_use()1777 fn _split_must_use() {}
1778 
1779 // fuzz tests
1780 #[cfg(all(test, loom))]
1781 mod fuzz {
1782     use loom::sync::Arc;
1783     use loom::thread;
1784 
1785     use super::BytesMut;
1786     use crate::Bytes;
1787 
1788     #[test]
bytes_mut_cloning_frozen()1789     fn bytes_mut_cloning_frozen() {
1790         loom::model(|| {
1791             let a = BytesMut::from(&b"abcdefgh"[..]).split().freeze();
1792             let addr = a.as_ptr() as usize;
1793 
1794             // test the Bytes::clone is Sync by putting it in an Arc
1795             let a1 = Arc::new(a);
1796             let a2 = a1.clone();
1797 
1798             let t1 = thread::spawn(move || {
1799                 let b: Bytes = (*a1).clone();
1800                 assert_eq!(b.as_ptr() as usize, addr);
1801             });
1802 
1803             let t2 = thread::spawn(move || {
1804                 let b: Bytes = (*a2).clone();
1805                 assert_eq!(b.as_ptr() as usize, addr);
1806             });
1807 
1808             t1.join().unwrap();
1809             t2.join().unwrap();
1810         });
1811     }
1812 }
1813