• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 use core::iter::{FromIterator, Iterator};
2 use core::mem::{self, ManuallyDrop, MaybeUninit};
3 use core::ops::{Deref, DerefMut};
4 use core::ptr::{self, NonNull};
5 use core::{cmp, fmt, hash, isize, slice, usize};
6 
7 use alloc::{
8     borrow::{Borrow, BorrowMut},
9     boxed::Box,
10     string::String,
11     vec,
12     vec::Vec,
13 };
14 
15 use crate::buf::{IntoIter, UninitSlice};
16 use crate::bytes::Vtable;
17 #[allow(unused)]
18 use crate::loom::sync::atomic::AtomicMut;
19 use crate::loom::sync::atomic::{AtomicPtr, AtomicUsize, Ordering};
20 use crate::{Buf, BufMut, Bytes};
21 
22 /// A unique reference to a contiguous slice of memory.
23 ///
24 /// `BytesMut` represents a unique view into a potentially shared memory region.
25 /// Given the uniqueness guarantee, owners of `BytesMut` handles are able to
26 /// mutate the memory.
27 ///
28 /// `BytesMut` can be thought of as containing a `buf: Arc<Vec<u8>>`, an offset
29 /// into `buf`, a slice length, and a guarantee that no other `BytesMut` for the
30 /// same `buf` overlaps with its slice. That guarantee means that a write lock
31 /// is not required.
32 ///
33 /// # Growth
34 ///
35 /// `BytesMut`'s `BufMut` implementation will implicitly grow its buffer as
36 /// necessary. However, explicitly reserving the required space up-front before
37 /// a series of inserts will be more efficient.
38 ///
39 /// # Examples
40 ///
41 /// ```
42 /// use bytes::{BytesMut, BufMut};
43 ///
44 /// let mut buf = BytesMut::with_capacity(64);
45 ///
46 /// buf.put_u8(b'h');
47 /// buf.put_u8(b'e');
48 /// buf.put(&b"llo"[..]);
49 ///
50 /// assert_eq!(&buf[..], b"hello");
51 ///
52 /// // Freeze the buffer so that it can be shared
53 /// let a = buf.freeze();
54 ///
55 /// // This does not allocate, instead `b` points to the same memory.
56 /// let b = a.clone();
57 ///
58 /// assert_eq!(&a[..], b"hello");
59 /// assert_eq!(&b[..], b"hello");
60 /// ```
61 pub struct BytesMut {
62     ptr: NonNull<u8>,
63     len: usize,
64     cap: usize,
65     data: *mut Shared,
66 }
67 
68 // Thread-safe reference-counted container for the shared storage. This mostly
69 // the same as `core::sync::Arc` but without the weak counter. The ref counting
70 // fns are based on the ones found in `std`.
71 //
72 // The main reason to use `Shared` instead of `core::sync::Arc` is that it ends
73 // up making the overall code simpler and easier to reason about. This is due to
74 // some of the logic around setting `Inner::arc` and other ways the `arc` field
75 // is used. Using `Arc` ended up requiring a number of funky transmutes and
76 // other shenanigans to make it work.
77 struct Shared {
78     vec: Vec<u8>,
79     original_capacity_repr: usize,
80     ref_count: AtomicUsize,
81 }
82 
83 // Buffer storage strategy flags.
84 const KIND_ARC: usize = 0b0;
85 const KIND_VEC: usize = 0b1;
86 const KIND_MASK: usize = 0b1;
87 
88 // The max original capacity value. Any `Bytes` allocated with a greater initial
89 // capacity will default to this.
90 const MAX_ORIGINAL_CAPACITY_WIDTH: usize = 17;
91 // The original capacity algorithm will not take effect unless the originally
92 // allocated capacity was at least 1kb in size.
93 const MIN_ORIGINAL_CAPACITY_WIDTH: usize = 10;
94 // The original capacity is stored in powers of 2 starting at 1kb to a max of
95 // 64kb. Representing it as such requires only 3 bits of storage.
96 const ORIGINAL_CAPACITY_MASK: usize = 0b11100;
97 const ORIGINAL_CAPACITY_OFFSET: usize = 2;
98 
99 // When the storage is in the `Vec` representation, the pointer can be advanced
100 // at most this value. This is due to the amount of storage available to track
101 // the offset is usize - number of KIND bits and number of ORIGINAL_CAPACITY
102 // bits.
103 const VEC_POS_OFFSET: usize = 5;
104 const MAX_VEC_POS: usize = usize::MAX >> VEC_POS_OFFSET;
105 const NOT_VEC_POS_MASK: usize = 0b11111;
106 
107 #[cfg(target_pointer_width = "64")]
108 const PTR_WIDTH: usize = 64;
109 #[cfg(target_pointer_width = "32")]
110 const PTR_WIDTH: usize = 32;
111 
112 /*
113  *
114  * ===== BytesMut =====
115  *
116  */
117 
118 impl BytesMut {
119     /// Creates a new `BytesMut` with the specified capacity.
120     ///
121     /// The returned `BytesMut` will be able to hold at least `capacity` bytes
122     /// without reallocating.
123     ///
124     /// It is important to note that this function does not specify the length
125     /// of the returned `BytesMut`, but only the capacity.
126     ///
127     /// # Examples
128     ///
129     /// ```
130     /// use bytes::{BytesMut, BufMut};
131     ///
132     /// let mut bytes = BytesMut::with_capacity(64);
133     ///
134     /// // `bytes` contains no data, even though there is capacity
135     /// assert_eq!(bytes.len(), 0);
136     ///
137     /// bytes.put(&b"hello world"[..]);
138     ///
139     /// assert_eq!(&bytes[..], b"hello world");
140     /// ```
141     #[inline]
with_capacity(capacity: usize) -> BytesMut142     pub fn with_capacity(capacity: usize) -> BytesMut {
143         BytesMut::from_vec(Vec::with_capacity(capacity))
144     }
145 
146     /// Creates a new `BytesMut` with default capacity.
147     ///
148     /// Resulting object has length 0 and unspecified capacity.
149     /// This function does not allocate.
150     ///
151     /// # Examples
152     ///
153     /// ```
154     /// use bytes::{BytesMut, BufMut};
155     ///
156     /// let mut bytes = BytesMut::new();
157     ///
158     /// assert_eq!(0, bytes.len());
159     ///
160     /// bytes.reserve(2);
161     /// bytes.put_slice(b"xy");
162     ///
163     /// assert_eq!(&b"xy"[..], &bytes[..]);
164     /// ```
165     #[inline]
new() -> BytesMut166     pub fn new() -> BytesMut {
167         BytesMut::with_capacity(0)
168     }
169 
170     /// Returns the number of bytes contained in this `BytesMut`.
171     ///
172     /// # Examples
173     ///
174     /// ```
175     /// use bytes::BytesMut;
176     ///
177     /// let b = BytesMut::from(&b"hello"[..]);
178     /// assert_eq!(b.len(), 5);
179     /// ```
180     #[inline]
len(&self) -> usize181     pub fn len(&self) -> usize {
182         self.len
183     }
184 
185     /// Returns true if the `BytesMut` has a length of 0.
186     ///
187     /// # Examples
188     ///
189     /// ```
190     /// use bytes::BytesMut;
191     ///
192     /// let b = BytesMut::with_capacity(64);
193     /// assert!(b.is_empty());
194     /// ```
195     #[inline]
is_empty(&self) -> bool196     pub fn is_empty(&self) -> bool {
197         self.len == 0
198     }
199 
200     /// Returns the number of bytes the `BytesMut` can hold without reallocating.
201     ///
202     /// # Examples
203     ///
204     /// ```
205     /// use bytes::BytesMut;
206     ///
207     /// let b = BytesMut::with_capacity(64);
208     /// assert_eq!(b.capacity(), 64);
209     /// ```
210     #[inline]
capacity(&self) -> usize211     pub fn capacity(&self) -> usize {
212         self.cap
213     }
214 
215     /// Converts `self` into an immutable `Bytes`.
216     ///
217     /// The conversion is zero cost and is used to indicate that the slice
218     /// referenced by the handle will no longer be mutated. Once the conversion
219     /// is done, the handle can be cloned and shared across threads.
220     ///
221     /// # Examples
222     ///
223     /// ```
224     /// use bytes::{BytesMut, BufMut};
225     /// use std::thread;
226     ///
227     /// let mut b = BytesMut::with_capacity(64);
228     /// b.put(&b"hello world"[..]);
229     /// let b1 = b.freeze();
230     /// let b2 = b1.clone();
231     ///
232     /// let th = thread::spawn(move || {
233     ///     assert_eq!(&b1[..], b"hello world");
234     /// });
235     ///
236     /// assert_eq!(&b2[..], b"hello world");
237     /// th.join().unwrap();
238     /// ```
239     #[inline]
freeze(mut self) -> Bytes240     pub fn freeze(mut self) -> Bytes {
241         if self.kind() == KIND_VEC {
242             // Just re-use `Bytes` internal Vec vtable
243             unsafe {
244                 let (off, _) = self.get_vec_pos();
245                 let vec = rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off);
246                 mem::forget(self);
247                 let mut b: Bytes = vec.into();
248                 b.advance(off);
249                 b
250             }
251         } else {
252             debug_assert_eq!(self.kind(), KIND_ARC);
253 
254             let ptr = self.ptr.as_ptr();
255             let len = self.len;
256             let data = AtomicPtr::new(self.data.cast());
257             mem::forget(self);
258             unsafe { Bytes::with_vtable(ptr, len, data, &SHARED_VTABLE) }
259         }
260     }
261 
262     /// Creates a new `BytesMut`, which is initialized with zero.
263     ///
264     /// # Examples
265     ///
266     /// ```
267     /// use bytes::BytesMut;
268     ///
269     /// let zeros = BytesMut::zeroed(42);
270     ///
271     /// assert_eq!(zeros.len(), 42);
272     /// zeros.into_iter().for_each(|x| assert_eq!(x, 0));
273     /// ```
zeroed(len: usize) -> BytesMut274     pub fn zeroed(len: usize) -> BytesMut {
275         BytesMut::from_vec(vec![0; len])
276     }
277 
278     /// Splits the bytes into two at the given index.
279     ///
280     /// Afterwards `self` contains elements `[0, at)`, and the returned
281     /// `BytesMut` contains elements `[at, capacity)`.
282     ///
283     /// This is an `O(1)` operation that just increases the reference count
284     /// and sets a few indices.
285     ///
286     /// # Examples
287     ///
288     /// ```
289     /// use bytes::BytesMut;
290     ///
291     /// let mut a = BytesMut::from(&b"hello world"[..]);
292     /// let mut b = a.split_off(5);
293     ///
294     /// a[0] = b'j';
295     /// b[0] = b'!';
296     ///
297     /// assert_eq!(&a[..], b"jello");
298     /// assert_eq!(&b[..], b"!world");
299     /// ```
300     ///
301     /// # Panics
302     ///
303     /// Panics if `at > capacity`.
304     #[must_use = "consider BytesMut::truncate if you don't need the other half"]
split_off(&mut self, at: usize) -> BytesMut305     pub fn split_off(&mut self, at: usize) -> BytesMut {
306         assert!(
307             at <= self.capacity(),
308             "split_off out of bounds: {:?} <= {:?}",
309             at,
310             self.capacity(),
311         );
312         unsafe {
313             let mut other = self.shallow_clone();
314             other.set_start(at);
315             self.set_end(at);
316             other
317         }
318     }
319 
320     /// Removes the bytes from the current view, returning them in a new
321     /// `BytesMut` handle.
322     ///
323     /// Afterwards, `self` will be empty, but will retain any additional
324     /// capacity that it had before the operation. This is identical to
325     /// `self.split_to(self.len())`.
326     ///
327     /// This is an `O(1)` operation that just increases the reference count and
328     /// sets a few indices.
329     ///
330     /// # Examples
331     ///
332     /// ```
333     /// use bytes::{BytesMut, BufMut};
334     ///
335     /// let mut buf = BytesMut::with_capacity(1024);
336     /// buf.put(&b"hello world"[..]);
337     ///
338     /// let other = buf.split();
339     ///
340     /// assert!(buf.is_empty());
341     /// assert_eq!(1013, buf.capacity());
342     ///
343     /// assert_eq!(other, b"hello world"[..]);
344     /// ```
345     #[must_use = "consider BytesMut::advance(len()) if you don't need the other half"]
split(&mut self) -> BytesMut346     pub fn split(&mut self) -> BytesMut {
347         let len = self.len();
348         self.split_to(len)
349     }
350 
351     /// Splits the buffer into two at the given index.
352     ///
353     /// Afterwards `self` contains elements `[at, len)`, and the returned `BytesMut`
354     /// contains elements `[0, at)`.
355     ///
356     /// This is an `O(1)` operation that just increases the reference count and
357     /// sets a few indices.
358     ///
359     /// # Examples
360     ///
361     /// ```
362     /// use bytes::BytesMut;
363     ///
364     /// let mut a = BytesMut::from(&b"hello world"[..]);
365     /// let mut b = a.split_to(5);
366     ///
367     /// a[0] = b'!';
368     /// b[0] = b'j';
369     ///
370     /// assert_eq!(&a[..], b"!world");
371     /// assert_eq!(&b[..], b"jello");
372     /// ```
373     ///
374     /// # Panics
375     ///
376     /// Panics if `at > len`.
377     #[must_use = "consider BytesMut::advance if you don't need the other half"]
split_to(&mut self, at: usize) -> BytesMut378     pub fn split_to(&mut self, at: usize) -> BytesMut {
379         assert!(
380             at <= self.len(),
381             "split_to out of bounds: {:?} <= {:?}",
382             at,
383             self.len(),
384         );
385 
386         unsafe {
387             let mut other = self.shallow_clone();
388             other.set_end(at);
389             self.set_start(at);
390             other
391         }
392     }
393 
394     /// Shortens the buffer, keeping the first `len` bytes and dropping the
395     /// rest.
396     ///
397     /// If `len` is greater than the buffer's current length, this has no
398     /// effect.
399     ///
400     /// Existing underlying capacity is preserved.
401     ///
402     /// The [`split_off`] method can emulate `truncate`, but this causes the
403     /// excess bytes to be returned instead of dropped.
404     ///
405     /// # Examples
406     ///
407     /// ```
408     /// use bytes::BytesMut;
409     ///
410     /// let mut buf = BytesMut::from(&b"hello world"[..]);
411     /// buf.truncate(5);
412     /// assert_eq!(buf, b"hello"[..]);
413     /// ```
414     ///
415     /// [`split_off`]: #method.split_off
truncate(&mut self, len: usize)416     pub fn truncate(&mut self, len: usize) {
417         if len <= self.len() {
418             unsafe {
419                 self.set_len(len);
420             }
421         }
422     }
423 
424     /// Clears the buffer, removing all data. Existing capacity is preserved.
425     ///
426     /// # Examples
427     ///
428     /// ```
429     /// use bytes::BytesMut;
430     ///
431     /// let mut buf = BytesMut::from(&b"hello world"[..]);
432     /// buf.clear();
433     /// assert!(buf.is_empty());
434     /// ```
clear(&mut self)435     pub fn clear(&mut self) {
436         self.truncate(0);
437     }
438 
439     /// Resizes the buffer so that `len` is equal to `new_len`.
440     ///
441     /// If `new_len` is greater than `len`, the buffer is extended by the
442     /// difference with each additional byte set to `value`. If `new_len` is
443     /// less than `len`, the buffer is simply truncated.
444     ///
445     /// # Examples
446     ///
447     /// ```
448     /// use bytes::BytesMut;
449     ///
450     /// let mut buf = BytesMut::new();
451     ///
452     /// buf.resize(3, 0x1);
453     /// assert_eq!(&buf[..], &[0x1, 0x1, 0x1]);
454     ///
455     /// buf.resize(2, 0x2);
456     /// assert_eq!(&buf[..], &[0x1, 0x1]);
457     ///
458     /// buf.resize(4, 0x3);
459     /// assert_eq!(&buf[..], &[0x1, 0x1, 0x3, 0x3]);
460     /// ```
resize(&mut self, new_len: usize, value: u8)461     pub fn resize(&mut self, new_len: usize, value: u8) {
462         let len = self.len();
463         if new_len > len {
464             let additional = new_len - len;
465             self.reserve(additional);
466             unsafe {
467                 let dst = self.chunk_mut().as_mut_ptr();
468                 ptr::write_bytes(dst, value, additional);
469                 self.set_len(new_len);
470             }
471         } else {
472             self.truncate(new_len);
473         }
474     }
475 
476     /// Sets the length of the buffer.
477     ///
478     /// This will explicitly set the size of the buffer without actually
479     /// modifying the data, so it is up to the caller to ensure that the data
480     /// has been initialized.
481     ///
482     /// # Examples
483     ///
484     /// ```
485     /// use bytes::BytesMut;
486     ///
487     /// let mut b = BytesMut::from(&b"hello world"[..]);
488     ///
489     /// unsafe {
490     ///     b.set_len(5);
491     /// }
492     ///
493     /// assert_eq!(&b[..], b"hello");
494     ///
495     /// unsafe {
496     ///     b.set_len(11);
497     /// }
498     ///
499     /// assert_eq!(&b[..], b"hello world");
500     /// ```
501     #[inline]
set_len(&mut self, len: usize)502     pub unsafe fn set_len(&mut self, len: usize) {
503         debug_assert!(len <= self.cap, "set_len out of bounds");
504         self.len = len;
505     }
506 
507     /// Reserves capacity for at least `additional` more bytes to be inserted
508     /// into the given `BytesMut`.
509     ///
510     /// More than `additional` bytes may be reserved in order to avoid frequent
511     /// reallocations. A call to `reserve` may result in an allocation.
512     ///
513     /// Before allocating new buffer space, the function will attempt to reclaim
514     /// space in the existing buffer. If the current handle references a view
515     /// into a larger original buffer, and all other handles referencing part
516     /// of the same original buffer have been dropped, then the current view
517     /// can be copied/shifted to the front of the buffer and the handle can take
518     /// ownership of the full buffer, provided that the full buffer is large
519     /// enough to fit the requested additional capacity.
520     ///
521     /// This optimization will only happen if shifting the data from the current
522     /// view to the front of the buffer is not too expensive in terms of the
523     /// (amortized) time required. The precise condition is subject to change;
524     /// as of now, the length of the data being shifted needs to be at least as
525     /// large as the distance that it's shifted by. If the current view is empty
526     /// and the original buffer is large enough to fit the requested additional
527     /// capacity, then reallocations will never happen.
528     ///
529     /// # Examples
530     ///
531     /// In the following example, a new buffer is allocated.
532     ///
533     /// ```
534     /// use bytes::BytesMut;
535     ///
536     /// let mut buf = BytesMut::from(&b"hello"[..]);
537     /// buf.reserve(64);
538     /// assert!(buf.capacity() >= 69);
539     /// ```
540     ///
541     /// In the following example, the existing buffer is reclaimed.
542     ///
543     /// ```
544     /// use bytes::{BytesMut, BufMut};
545     ///
546     /// let mut buf = BytesMut::with_capacity(128);
547     /// buf.put(&[0; 64][..]);
548     ///
549     /// let ptr = buf.as_ptr();
550     /// let other = buf.split();
551     ///
552     /// assert!(buf.is_empty());
553     /// assert_eq!(buf.capacity(), 64);
554     ///
555     /// drop(other);
556     /// buf.reserve(128);
557     ///
558     /// assert_eq!(buf.capacity(), 128);
559     /// assert_eq!(buf.as_ptr(), ptr);
560     /// ```
561     ///
562     /// # Panics
563     ///
564     /// Panics if the new capacity overflows `usize`.
565     #[inline]
reserve(&mut self, additional: usize)566     pub fn reserve(&mut self, additional: usize) {
567         let len = self.len();
568         let rem = self.capacity() - len;
569 
570         if additional <= rem {
571             // The handle can already store at least `additional` more bytes, so
572             // there is no further work needed to be done.
573             return;
574         }
575 
576         self.reserve_inner(additional);
577     }
578 
579     // In separate function to allow the short-circuits in `reserve` to
580     // be inline-able. Significant helps performance.
reserve_inner(&mut self, additional: usize)581     fn reserve_inner(&mut self, additional: usize) {
582         let len = self.len();
583         let kind = self.kind();
584 
585         if kind == KIND_VEC {
586             // If there's enough free space before the start of the buffer, then
587             // just copy the data backwards and reuse the already-allocated
588             // space.
589             //
590             // Otherwise, since backed by a vector, use `Vec::reserve`
591             //
592             // We need to make sure that this optimization does not kill the
593             // amortized runtimes of BytesMut's operations.
594             unsafe {
595                 let (off, prev) = self.get_vec_pos();
596 
597                 // Only reuse space if we can satisfy the requested additional space.
598                 //
599                 // Also check if the value of `off` suggests that enough bytes
600                 // have been read to account for the overhead of shifting all
601                 // the data (in an amortized analysis).
602                 // Hence the condition `off >= self.len()`.
603                 //
604                 // This condition also already implies that the buffer is going
605                 // to be (at least) half-empty in the end; so we do not break
606                 // the (amortized) runtime with future resizes of the underlying
607                 // `Vec`.
608                 //
609                 // [For more details check issue #524, and PR #525.]
610                 if self.capacity() - self.len() + off >= additional && off >= self.len() {
611                     // There's enough space, and it's not too much overhead:
612                     // reuse the space!
613                     //
614                     // Just move the pointer back to the start after copying
615                     // data back.
616                     let base_ptr = self.ptr.as_ptr().offset(-(off as isize));
617                     // Since `off >= self.len()`, the two regions don't overlap.
618                     ptr::copy_nonoverlapping(self.ptr.as_ptr(), base_ptr, self.len);
619                     self.ptr = vptr(base_ptr);
620                     self.set_vec_pos(0, prev);
621 
622                     // Length stays constant, but since we moved backwards we
623                     // can gain capacity back.
624                     self.cap += off;
625                 } else {
626                     // Not enough space, or reusing might be too much overhead:
627                     // allocate more space!
628                     let mut v =
629                         ManuallyDrop::new(rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off));
630                     v.reserve(additional);
631 
632                     // Update the info
633                     self.ptr = vptr(v.as_mut_ptr().add(off));
634                     self.len = v.len() - off;
635                     self.cap = v.capacity() - off;
636                 }
637 
638                 return;
639             }
640         }
641 
642         debug_assert_eq!(kind, KIND_ARC);
643         let shared: *mut Shared = self.data;
644 
645         // Reserving involves abandoning the currently shared buffer and
646         // allocating a new vector with the requested capacity.
647         //
648         // Compute the new capacity
649         let mut new_cap = len.checked_add(additional).expect("overflow");
650 
651         let original_capacity;
652         let original_capacity_repr;
653 
654         unsafe {
655             original_capacity_repr = (*shared).original_capacity_repr;
656             original_capacity = original_capacity_from_repr(original_capacity_repr);
657 
658             // First, try to reclaim the buffer. This is possible if the current
659             // handle is the only outstanding handle pointing to the buffer.
660             if (*shared).is_unique() {
661                 // This is the only handle to the buffer. It can be reclaimed.
662                 // However, before doing the work of copying data, check to make
663                 // sure that the vector has enough capacity.
664                 let v = &mut (*shared).vec;
665 
666                 let v_capacity = v.capacity();
667                 let ptr = v.as_mut_ptr();
668 
669                 let offset = offset_from(self.ptr.as_ptr(), ptr);
670 
671                 // Compare the condition in the `kind == KIND_VEC` case above
672                 // for more details.
673                 if v_capacity >= new_cap + offset {
674                     self.cap = new_cap;
675                     // no copy is necessary
676                 } else if v_capacity >= new_cap && offset >= len {
677                     // The capacity is sufficient, and copying is not too much
678                     // overhead: reclaim the buffer!
679 
680                     // `offset >= len` means: no overlap
681                     ptr::copy_nonoverlapping(self.ptr.as_ptr(), ptr, len);
682 
683                     self.ptr = vptr(ptr);
684                     self.cap = v.capacity();
685                 } else {
686                     // calculate offset
687                     let off = (self.ptr.as_ptr() as usize) - (v.as_ptr() as usize);
688 
689                     // new_cap is calculated in terms of `BytesMut`, not the underlying
690                     // `Vec`, so it does not take the offset into account.
691                     //
692                     // Thus we have to manually add it here.
693                     new_cap = new_cap.checked_add(off).expect("overflow");
694 
695                     // The vector capacity is not sufficient. The reserve request is
696                     // asking for more than the initial buffer capacity. Allocate more
697                     // than requested if `new_cap` is not much bigger than the current
698                     // capacity.
699                     //
700                     // There are some situations, using `reserve_exact` that the
701                     // buffer capacity could be below `original_capacity`, so do a
702                     // check.
703                     let double = v.capacity().checked_shl(1).unwrap_or(new_cap);
704 
705                     new_cap = cmp::max(double, new_cap);
706 
707                     // No space - allocate more
708                     //
709                     // The length field of `Shared::vec` is not used by the `BytesMut`;
710                     // instead we use the `len` field in the `BytesMut` itself. However,
711                     // when calling `reserve`, it doesn't guarantee that data stored in
712                     // the unused capacity of the vector is copied over to the new
713                     // allocation, so we need to ensure that we don't have any data we
714                     // care about in the unused capacity before calling `reserve`.
715                     debug_assert!(off + len <= v.capacity());
716                     v.set_len(off + len);
717                     v.reserve(new_cap - v.len());
718 
719                     // Update the info
720                     self.ptr = vptr(v.as_mut_ptr().add(off));
721                     self.cap = v.capacity() - off;
722                 }
723 
724                 return;
725             } else {
726                 new_cap = cmp::max(new_cap, original_capacity);
727             }
728         }
729 
730         // Create a new vector to store the data
731         let mut v = ManuallyDrop::new(Vec::with_capacity(new_cap));
732 
733         // Copy the bytes
734         v.extend_from_slice(self.as_ref());
735 
736         // Release the shared handle. This must be done *after* the bytes are
737         // copied.
738         unsafe { release_shared(shared) };
739 
740         // Update self
741         let data = (original_capacity_repr << ORIGINAL_CAPACITY_OFFSET) | KIND_VEC;
742         self.data = invalid_ptr(data);
743         self.ptr = vptr(v.as_mut_ptr());
744         self.len = v.len();
745         self.cap = v.capacity();
746     }
747 
748     /// Appends given bytes to this `BytesMut`.
749     ///
750     /// If this `BytesMut` object does not have enough capacity, it is resized
751     /// first.
752     ///
753     /// # Examples
754     ///
755     /// ```
756     /// use bytes::BytesMut;
757     ///
758     /// let mut buf = BytesMut::with_capacity(0);
759     /// buf.extend_from_slice(b"aaabbb");
760     /// buf.extend_from_slice(b"cccddd");
761     ///
762     /// assert_eq!(b"aaabbbcccddd", &buf[..]);
763     /// ```
764     #[inline]
extend_from_slice(&mut self, extend: &[u8])765     pub fn extend_from_slice(&mut self, extend: &[u8]) {
766         let cnt = extend.len();
767         self.reserve(cnt);
768 
769         unsafe {
770             let dst = self.spare_capacity_mut();
771             // Reserved above
772             debug_assert!(dst.len() >= cnt);
773 
774             ptr::copy_nonoverlapping(extend.as_ptr(), dst.as_mut_ptr().cast(), cnt);
775         }
776 
777         unsafe {
778             self.advance_mut(cnt);
779         }
780     }
781 
782     /// Absorbs a `BytesMut` that was previously split off.
783     ///
784     /// If the two `BytesMut` objects were previously contiguous and not mutated
785     /// in a way that causes re-allocation i.e., if `other` was created by
786     /// calling `split_off` on this `BytesMut`, then this is an `O(1)` operation
787     /// that just decreases a reference count and sets a few indices.
788     /// Otherwise this method degenerates to
789     /// `self.extend_from_slice(other.as_ref())`.
790     ///
791     /// # Examples
792     ///
793     /// ```
794     /// use bytes::BytesMut;
795     ///
796     /// let mut buf = BytesMut::with_capacity(64);
797     /// buf.extend_from_slice(b"aaabbbcccddd");
798     ///
799     /// let split = buf.split_off(6);
800     /// assert_eq!(b"aaabbb", &buf[..]);
801     /// assert_eq!(b"cccddd", &split[..]);
802     ///
803     /// buf.unsplit(split);
804     /// assert_eq!(b"aaabbbcccddd", &buf[..]);
805     /// ```
unsplit(&mut self, other: BytesMut)806     pub fn unsplit(&mut self, other: BytesMut) {
807         if self.is_empty() {
808             *self = other;
809             return;
810         }
811 
812         if let Err(other) = self.try_unsplit(other) {
813             self.extend_from_slice(other.as_ref());
814         }
815     }
816 
817     // private
818 
819     // For now, use a `Vec` to manage the memory for us, but we may want to
820     // change that in the future to some alternate allocator strategy.
821     //
822     // Thus, we don't expose an easy way to construct from a `Vec` since an
823     // internal change could make a simple pattern (`BytesMut::from(vec)`)
824     // suddenly a lot more expensive.
825     #[inline]
from_vec(mut vec: Vec<u8>) -> BytesMut826     pub(crate) fn from_vec(mut vec: Vec<u8>) -> BytesMut {
827         let ptr = vptr(vec.as_mut_ptr());
828         let len = vec.len();
829         let cap = vec.capacity();
830         mem::forget(vec);
831 
832         let original_capacity_repr = original_capacity_to_repr(cap);
833         let data = (original_capacity_repr << ORIGINAL_CAPACITY_OFFSET) | KIND_VEC;
834 
835         BytesMut {
836             ptr,
837             len,
838             cap,
839             data: invalid_ptr(data),
840         }
841     }
842 
843     #[inline]
as_slice(&self) -> &[u8]844     fn as_slice(&self) -> &[u8] {
845         unsafe { slice::from_raw_parts(self.ptr.as_ptr(), self.len) }
846     }
847 
848     #[inline]
as_slice_mut(&mut self) -> &mut [u8]849     fn as_slice_mut(&mut self) -> &mut [u8] {
850         unsafe { slice::from_raw_parts_mut(self.ptr.as_ptr(), self.len) }
851     }
852 
set_start(&mut self, start: usize)853     unsafe fn set_start(&mut self, start: usize) {
854         // Setting the start to 0 is a no-op, so return early if this is the
855         // case.
856         if start == 0 {
857             return;
858         }
859 
860         debug_assert!(start <= self.cap, "internal: set_start out of bounds");
861 
862         let kind = self.kind();
863 
864         if kind == KIND_VEC {
865             // Setting the start when in vec representation is a little more
866             // complicated. First, we have to track how far ahead the
867             // "start" of the byte buffer from the beginning of the vec. We
868             // also have to ensure that we don't exceed the maximum shift.
869             let (mut pos, prev) = self.get_vec_pos();
870             pos += start;
871 
872             if pos <= MAX_VEC_POS {
873                 self.set_vec_pos(pos, prev);
874             } else {
875                 // The repr must be upgraded to ARC. This will never happen
876                 // on 64 bit systems and will only happen on 32 bit systems
877                 // when shifting past 134,217,727 bytes. As such, we don't
878                 // worry too much about performance here.
879                 self.promote_to_shared(/*ref_count = */ 1);
880             }
881         }
882 
883         // Updating the start of the view is setting `ptr` to point to the
884         // new start and updating the `len` field to reflect the new length
885         // of the view.
886         self.ptr = vptr(self.ptr.as_ptr().add(start));
887 
888         if self.len >= start {
889             self.len -= start;
890         } else {
891             self.len = 0;
892         }
893 
894         self.cap -= start;
895     }
896 
set_end(&mut self, end: usize)897     unsafe fn set_end(&mut self, end: usize) {
898         debug_assert_eq!(self.kind(), KIND_ARC);
899         assert!(end <= self.cap, "set_end out of bounds");
900 
901         self.cap = end;
902         self.len = cmp::min(self.len, end);
903     }
904 
try_unsplit(&mut self, other: BytesMut) -> Result<(), BytesMut>905     fn try_unsplit(&mut self, other: BytesMut) -> Result<(), BytesMut> {
906         if other.capacity() == 0 {
907             return Ok(());
908         }
909 
910         let ptr = unsafe { self.ptr.as_ptr().add(self.len) };
911         if ptr == other.ptr.as_ptr()
912             && self.kind() == KIND_ARC
913             && other.kind() == KIND_ARC
914             && self.data == other.data
915         {
916             // Contiguous blocks, just combine directly
917             self.len += other.len;
918             self.cap += other.cap;
919             Ok(())
920         } else {
921             Err(other)
922         }
923     }
924 
925     #[inline]
kind(&self) -> usize926     fn kind(&self) -> usize {
927         self.data as usize & KIND_MASK
928     }
929 
promote_to_shared(&mut self, ref_cnt: usize)930     unsafe fn promote_to_shared(&mut self, ref_cnt: usize) {
931         debug_assert_eq!(self.kind(), KIND_VEC);
932         debug_assert!(ref_cnt == 1 || ref_cnt == 2);
933 
934         let original_capacity_repr =
935             (self.data as usize & ORIGINAL_CAPACITY_MASK) >> ORIGINAL_CAPACITY_OFFSET;
936 
937         // The vec offset cannot be concurrently mutated, so there
938         // should be no danger reading it.
939         let off = (self.data as usize) >> VEC_POS_OFFSET;
940 
941         // First, allocate a new `Shared` instance containing the
942         // `Vec` fields. It's important to note that `ptr`, `len`,
943         // and `cap` cannot be mutated without having `&mut self`.
944         // This means that these fields will not be concurrently
945         // updated and since the buffer hasn't been promoted to an
946         // `Arc`, those three fields still are the components of the
947         // vector.
948         let shared = Box::new(Shared {
949             vec: rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off),
950             original_capacity_repr,
951             ref_count: AtomicUsize::new(ref_cnt),
952         });
953 
954         let shared = Box::into_raw(shared);
955 
956         // The pointer should be aligned, so this assert should
957         // always succeed.
958         debug_assert_eq!(shared as usize & KIND_MASK, KIND_ARC);
959 
960         self.data = shared;
961     }
962 
963     /// Makes an exact shallow clone of `self`.
964     ///
965     /// The kind of `self` doesn't matter, but this is unsafe
966     /// because the clone will have the same offsets. You must
967     /// be sure the returned value to the user doesn't allow
968     /// two views into the same range.
969     #[inline]
shallow_clone(&mut self) -> BytesMut970     unsafe fn shallow_clone(&mut self) -> BytesMut {
971         if self.kind() == KIND_ARC {
972             increment_shared(self.data);
973             ptr::read(self)
974         } else {
975             self.promote_to_shared(/*ref_count = */ 2);
976             ptr::read(self)
977         }
978     }
979 
980     #[inline]
get_vec_pos(&mut self) -> (usize, usize)981     unsafe fn get_vec_pos(&mut self) -> (usize, usize) {
982         debug_assert_eq!(self.kind(), KIND_VEC);
983 
984         let prev = self.data as usize;
985         (prev >> VEC_POS_OFFSET, prev)
986     }
987 
988     #[inline]
set_vec_pos(&mut self, pos: usize, prev: usize)989     unsafe fn set_vec_pos(&mut self, pos: usize, prev: usize) {
990         debug_assert_eq!(self.kind(), KIND_VEC);
991         debug_assert!(pos <= MAX_VEC_POS);
992 
993         self.data = invalid_ptr((pos << VEC_POS_OFFSET) | (prev & NOT_VEC_POS_MASK));
994     }
995 
996     /// Returns the remaining spare capacity of the buffer as a slice of `MaybeUninit<u8>`.
997     ///
998     /// The returned slice can be used to fill the buffer with data (e.g. by
999     /// reading from a file) before marking the data as initialized using the
1000     /// [`set_len`] method.
1001     ///
1002     /// [`set_len`]: BytesMut::set_len
1003     ///
1004     /// # Examples
1005     ///
1006     /// ```
1007     /// use bytes::BytesMut;
1008     ///
1009     /// // Allocate buffer big enough for 10 bytes.
1010     /// let mut buf = BytesMut::with_capacity(10);
1011     ///
1012     /// // Fill in the first 3 elements.
1013     /// let uninit = buf.spare_capacity_mut();
1014     /// uninit[0].write(0);
1015     /// uninit[1].write(1);
1016     /// uninit[2].write(2);
1017     ///
1018     /// // Mark the first 3 bytes of the buffer as being initialized.
1019     /// unsafe {
1020     ///     buf.set_len(3);
1021     /// }
1022     ///
1023     /// assert_eq!(&buf[..], &[0, 1, 2]);
1024     /// ```
1025     #[inline]
spare_capacity_mut(&mut self) -> &mut [MaybeUninit<u8>]1026     pub fn spare_capacity_mut(&mut self) -> &mut [MaybeUninit<u8>] {
1027         unsafe {
1028             let ptr = self.ptr.as_ptr().add(self.len);
1029             let len = self.cap - self.len;
1030 
1031             slice::from_raw_parts_mut(ptr.cast(), len)
1032         }
1033     }
1034 }
1035 
1036 impl Drop for BytesMut {
drop(&mut self)1037     fn drop(&mut self) {
1038         let kind = self.kind();
1039 
1040         if kind == KIND_VEC {
1041             unsafe {
1042                 let (off, _) = self.get_vec_pos();
1043 
1044                 // Vector storage, free the vector
1045                 let _ = rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off);
1046             }
1047         } else if kind == KIND_ARC {
1048             unsafe { release_shared(self.data) };
1049         }
1050     }
1051 }
1052 
1053 impl Buf for BytesMut {
1054     #[inline]
remaining(&self) -> usize1055     fn remaining(&self) -> usize {
1056         self.len()
1057     }
1058 
1059     #[inline]
chunk(&self) -> &[u8]1060     fn chunk(&self) -> &[u8] {
1061         self.as_slice()
1062     }
1063 
1064     #[inline]
advance(&mut self, cnt: usize)1065     fn advance(&mut self, cnt: usize) {
1066         assert!(
1067             cnt <= self.remaining(),
1068             "cannot advance past `remaining`: {:?} <= {:?}",
1069             cnt,
1070             self.remaining(),
1071         );
1072         unsafe {
1073             self.set_start(cnt);
1074         }
1075     }
1076 
copy_to_bytes(&mut self, len: usize) -> crate::Bytes1077     fn copy_to_bytes(&mut self, len: usize) -> crate::Bytes {
1078         self.split_to(len).freeze()
1079     }
1080 }
1081 
1082 unsafe impl BufMut for BytesMut {
1083     #[inline]
remaining_mut(&self) -> usize1084     fn remaining_mut(&self) -> usize {
1085         usize::MAX - self.len()
1086     }
1087 
1088     #[inline]
advance_mut(&mut self, cnt: usize)1089     unsafe fn advance_mut(&mut self, cnt: usize) {
1090         let new_len = self.len() + cnt;
1091         assert!(
1092             new_len <= self.cap,
1093             "new_len = {}; capacity = {}",
1094             new_len,
1095             self.cap
1096         );
1097         self.len = new_len;
1098     }
1099 
1100     #[inline]
chunk_mut(&mut self) -> &mut UninitSlice1101     fn chunk_mut(&mut self) -> &mut UninitSlice {
1102         if self.capacity() == self.len() {
1103             self.reserve(64);
1104         }
1105         self.spare_capacity_mut().into()
1106     }
1107 
1108     // Specialize these methods so they can skip checking `remaining_mut`
1109     // and `advance_mut`.
1110 
put<T: crate::Buf>(&mut self, mut src: T) where Self: Sized,1111     fn put<T: crate::Buf>(&mut self, mut src: T)
1112     where
1113         Self: Sized,
1114     {
1115         while src.has_remaining() {
1116             let s = src.chunk();
1117             let l = s.len();
1118             self.extend_from_slice(s);
1119             src.advance(l);
1120         }
1121     }
1122 
put_slice(&mut self, src: &[u8])1123     fn put_slice(&mut self, src: &[u8]) {
1124         self.extend_from_slice(src);
1125     }
1126 
put_bytes(&mut self, val: u8, cnt: usize)1127     fn put_bytes(&mut self, val: u8, cnt: usize) {
1128         self.reserve(cnt);
1129         unsafe {
1130             let dst = self.spare_capacity_mut();
1131             // Reserved above
1132             debug_assert!(dst.len() >= cnt);
1133 
1134             ptr::write_bytes(dst.as_mut_ptr(), val, cnt);
1135 
1136             self.advance_mut(cnt);
1137         }
1138     }
1139 }
1140 
1141 impl AsRef<[u8]> for BytesMut {
1142     #[inline]
as_ref(&self) -> &[u8]1143     fn as_ref(&self) -> &[u8] {
1144         self.as_slice()
1145     }
1146 }
1147 
1148 impl Deref for BytesMut {
1149     type Target = [u8];
1150 
1151     #[inline]
deref(&self) -> &[u8]1152     fn deref(&self) -> &[u8] {
1153         self.as_ref()
1154     }
1155 }
1156 
1157 impl AsMut<[u8]> for BytesMut {
1158     #[inline]
as_mut(&mut self) -> &mut [u8]1159     fn as_mut(&mut self) -> &mut [u8] {
1160         self.as_slice_mut()
1161     }
1162 }
1163 
1164 impl DerefMut for BytesMut {
1165     #[inline]
deref_mut(&mut self) -> &mut [u8]1166     fn deref_mut(&mut self) -> &mut [u8] {
1167         self.as_mut()
1168     }
1169 }
1170 
1171 impl<'a> From<&'a [u8]> for BytesMut {
from(src: &'a [u8]) -> BytesMut1172     fn from(src: &'a [u8]) -> BytesMut {
1173         BytesMut::from_vec(src.to_vec())
1174     }
1175 }
1176 
1177 impl<'a> From<&'a str> for BytesMut {
from(src: &'a str) -> BytesMut1178     fn from(src: &'a str) -> BytesMut {
1179         BytesMut::from(src.as_bytes())
1180     }
1181 }
1182 
1183 impl From<BytesMut> for Bytes {
from(src: BytesMut) -> Bytes1184     fn from(src: BytesMut) -> Bytes {
1185         src.freeze()
1186     }
1187 }
1188 
1189 impl PartialEq for BytesMut {
eq(&self, other: &BytesMut) -> bool1190     fn eq(&self, other: &BytesMut) -> bool {
1191         self.as_slice() == other.as_slice()
1192     }
1193 }
1194 
1195 impl PartialOrd for BytesMut {
partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering>1196     fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
1197         self.as_slice().partial_cmp(other.as_slice())
1198     }
1199 }
1200 
1201 impl Ord for BytesMut {
cmp(&self, other: &BytesMut) -> cmp::Ordering1202     fn cmp(&self, other: &BytesMut) -> cmp::Ordering {
1203         self.as_slice().cmp(other.as_slice())
1204     }
1205 }
1206 
1207 impl Eq for BytesMut {}
1208 
1209 impl Default for BytesMut {
1210     #[inline]
default() -> BytesMut1211     fn default() -> BytesMut {
1212         BytesMut::new()
1213     }
1214 }
1215 
1216 impl hash::Hash for BytesMut {
hash<H>(&self, state: &mut H) where H: hash::Hasher,1217     fn hash<H>(&self, state: &mut H)
1218     where
1219         H: hash::Hasher,
1220     {
1221         let s: &[u8] = self.as_ref();
1222         s.hash(state);
1223     }
1224 }
1225 
1226 impl Borrow<[u8]> for BytesMut {
borrow(&self) -> &[u8]1227     fn borrow(&self) -> &[u8] {
1228         self.as_ref()
1229     }
1230 }
1231 
1232 impl BorrowMut<[u8]> for BytesMut {
borrow_mut(&mut self) -> &mut [u8]1233     fn borrow_mut(&mut self) -> &mut [u8] {
1234         self.as_mut()
1235     }
1236 }
1237 
1238 impl fmt::Write for BytesMut {
1239     #[inline]
write_str(&mut self, s: &str) -> fmt::Result1240     fn write_str(&mut self, s: &str) -> fmt::Result {
1241         if self.remaining_mut() >= s.len() {
1242             self.put_slice(s.as_bytes());
1243             Ok(())
1244         } else {
1245             Err(fmt::Error)
1246         }
1247     }
1248 
1249     #[inline]
write_fmt(&mut self, args: fmt::Arguments<'_>) -> fmt::Result1250     fn write_fmt(&mut self, args: fmt::Arguments<'_>) -> fmt::Result {
1251         fmt::write(self, args)
1252     }
1253 }
1254 
1255 impl Clone for BytesMut {
clone(&self) -> BytesMut1256     fn clone(&self) -> BytesMut {
1257         BytesMut::from(&self[..])
1258     }
1259 }
1260 
1261 impl IntoIterator for BytesMut {
1262     type Item = u8;
1263     type IntoIter = IntoIter<BytesMut>;
1264 
into_iter(self) -> Self::IntoIter1265     fn into_iter(self) -> Self::IntoIter {
1266         IntoIter::new(self)
1267     }
1268 }
1269 
1270 impl<'a> IntoIterator for &'a BytesMut {
1271     type Item = &'a u8;
1272     type IntoIter = core::slice::Iter<'a, u8>;
1273 
into_iter(self) -> Self::IntoIter1274     fn into_iter(self) -> Self::IntoIter {
1275         self.as_ref().iter()
1276     }
1277 }
1278 
1279 impl Extend<u8> for BytesMut {
extend<T>(&mut self, iter: T) where T: IntoIterator<Item = u8>,1280     fn extend<T>(&mut self, iter: T)
1281     where
1282         T: IntoIterator<Item = u8>,
1283     {
1284         let iter = iter.into_iter();
1285 
1286         let (lower, _) = iter.size_hint();
1287         self.reserve(lower);
1288 
1289         // TODO: optimize
1290         // 1. If self.kind() == KIND_VEC, use Vec::extend
1291         // 2. Make `reserve` inline-able
1292         for b in iter {
1293             self.reserve(1);
1294             self.put_u8(b);
1295         }
1296     }
1297 }
1298 
1299 impl<'a> Extend<&'a u8> for BytesMut {
extend<T>(&mut self, iter: T) where T: IntoIterator<Item = &'a u8>,1300     fn extend<T>(&mut self, iter: T)
1301     where
1302         T: IntoIterator<Item = &'a u8>,
1303     {
1304         self.extend(iter.into_iter().copied())
1305     }
1306 }
1307 
1308 impl Extend<Bytes> for BytesMut {
extend<T>(&mut self, iter: T) where T: IntoIterator<Item = Bytes>,1309     fn extend<T>(&mut self, iter: T)
1310     where
1311         T: IntoIterator<Item = Bytes>,
1312     {
1313         for bytes in iter {
1314             self.extend_from_slice(&bytes)
1315         }
1316     }
1317 }
1318 
1319 impl FromIterator<u8> for BytesMut {
from_iter<T: IntoIterator<Item = u8>>(into_iter: T) -> Self1320     fn from_iter<T: IntoIterator<Item = u8>>(into_iter: T) -> Self {
1321         BytesMut::from_vec(Vec::from_iter(into_iter))
1322     }
1323 }
1324 
1325 impl<'a> FromIterator<&'a u8> for BytesMut {
from_iter<T: IntoIterator<Item = &'a u8>>(into_iter: T) -> Self1326     fn from_iter<T: IntoIterator<Item = &'a u8>>(into_iter: T) -> Self {
1327         BytesMut::from_iter(into_iter.into_iter().copied())
1328     }
1329 }
1330 
1331 /*
1332  *
1333  * ===== Inner =====
1334  *
1335  */
1336 
increment_shared(ptr: *mut Shared)1337 unsafe fn increment_shared(ptr: *mut Shared) {
1338     let old_size = (*ptr).ref_count.fetch_add(1, Ordering::Relaxed);
1339 
1340     if old_size > isize::MAX as usize {
1341         crate::abort();
1342     }
1343 }
1344 
release_shared(ptr: *mut Shared)1345 unsafe fn release_shared(ptr: *mut Shared) {
1346     // `Shared` storage... follow the drop steps from Arc.
1347     if (*ptr).ref_count.fetch_sub(1, Ordering::Release) != 1 {
1348         return;
1349     }
1350 
1351     // This fence is needed to prevent reordering of use of the data and
1352     // deletion of the data.  Because it is marked `Release`, the decreasing
1353     // of the reference count synchronizes with this `Acquire` fence. This
1354     // means that use of the data happens before decreasing the reference
1355     // count, which happens before this fence, which happens before the
1356     // deletion of the data.
1357     //
1358     // As explained in the [Boost documentation][1],
1359     //
1360     // > It is important to enforce any possible access to the object in one
1361     // > thread (through an existing reference) to *happen before* deleting
1362     // > the object in a different thread. This is achieved by a "release"
1363     // > operation after dropping a reference (any access to the object
1364     // > through this reference must obviously happened before), and an
1365     // > "acquire" operation before deleting the object.
1366     //
1367     // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
1368     //
1369     // Thread sanitizer does not support atomic fences. Use an atomic load
1370     // instead.
1371     (*ptr).ref_count.load(Ordering::Acquire);
1372 
1373     // Drop the data
1374     drop(Box::from_raw(ptr));
1375 }
1376 
1377 impl Shared {
is_unique(&self) -> bool1378     fn is_unique(&self) -> bool {
1379         // The goal is to check if the current handle is the only handle
1380         // that currently has access to the buffer. This is done by
1381         // checking if the `ref_count` is currently 1.
1382         //
1383         // The `Acquire` ordering synchronizes with the `Release` as
1384         // part of the `fetch_sub` in `release_shared`. The `fetch_sub`
1385         // operation guarantees that any mutations done in other threads
1386         // are ordered before the `ref_count` is decremented. As such,
1387         // this `Acquire` will guarantee that those mutations are
1388         // visible to the current thread.
1389         self.ref_count.load(Ordering::Acquire) == 1
1390     }
1391 }
1392 
1393 #[inline]
original_capacity_to_repr(cap: usize) -> usize1394 fn original_capacity_to_repr(cap: usize) -> usize {
1395     let width = PTR_WIDTH - ((cap >> MIN_ORIGINAL_CAPACITY_WIDTH).leading_zeros() as usize);
1396     cmp::min(
1397         width,
1398         MAX_ORIGINAL_CAPACITY_WIDTH - MIN_ORIGINAL_CAPACITY_WIDTH,
1399     )
1400 }
1401 
original_capacity_from_repr(repr: usize) -> usize1402 fn original_capacity_from_repr(repr: usize) -> usize {
1403     if repr == 0 {
1404         return 0;
1405     }
1406 
1407     1 << (repr + (MIN_ORIGINAL_CAPACITY_WIDTH - 1))
1408 }
1409 
1410 /*
1411 #[test]
1412 fn test_original_capacity_to_repr() {
1413     assert_eq!(original_capacity_to_repr(0), 0);
1414 
1415     let max_width = 32;
1416 
1417     for width in 1..(max_width + 1) {
1418         let cap = 1 << width - 1;
1419 
1420         let expected = if width < MIN_ORIGINAL_CAPACITY_WIDTH {
1421             0
1422         } else if width < MAX_ORIGINAL_CAPACITY_WIDTH {
1423             width - MIN_ORIGINAL_CAPACITY_WIDTH
1424         } else {
1425             MAX_ORIGINAL_CAPACITY_WIDTH - MIN_ORIGINAL_CAPACITY_WIDTH
1426         };
1427 
1428         assert_eq!(original_capacity_to_repr(cap), expected);
1429 
1430         if width > 1 {
1431             assert_eq!(original_capacity_to_repr(cap + 1), expected);
1432         }
1433 
1434         //  MIN_ORIGINAL_CAPACITY_WIDTH must be bigger than 7 to pass tests below
1435         if width == MIN_ORIGINAL_CAPACITY_WIDTH + 1 {
1436             assert_eq!(original_capacity_to_repr(cap - 24), expected - 1);
1437             assert_eq!(original_capacity_to_repr(cap + 76), expected);
1438         } else if width == MIN_ORIGINAL_CAPACITY_WIDTH + 2 {
1439             assert_eq!(original_capacity_to_repr(cap - 1), expected - 1);
1440             assert_eq!(original_capacity_to_repr(cap - 48), expected - 1);
1441         }
1442     }
1443 }
1444 
1445 #[test]
1446 fn test_original_capacity_from_repr() {
1447     assert_eq!(0, original_capacity_from_repr(0));
1448 
1449     let min_cap = 1 << MIN_ORIGINAL_CAPACITY_WIDTH;
1450 
1451     assert_eq!(min_cap, original_capacity_from_repr(1));
1452     assert_eq!(min_cap * 2, original_capacity_from_repr(2));
1453     assert_eq!(min_cap * 4, original_capacity_from_repr(3));
1454     assert_eq!(min_cap * 8, original_capacity_from_repr(4));
1455     assert_eq!(min_cap * 16, original_capacity_from_repr(5));
1456     assert_eq!(min_cap * 32, original_capacity_from_repr(6));
1457     assert_eq!(min_cap * 64, original_capacity_from_repr(7));
1458 }
1459 */
1460 
1461 unsafe impl Send for BytesMut {}
1462 unsafe impl Sync for BytesMut {}
1463 
1464 /*
1465  *
1466  * ===== PartialEq / PartialOrd =====
1467  *
1468  */
1469 
1470 impl PartialEq<[u8]> for BytesMut {
eq(&self, other: &[u8]) -> bool1471     fn eq(&self, other: &[u8]) -> bool {
1472         &**self == other
1473     }
1474 }
1475 
1476 impl PartialOrd<[u8]> for BytesMut {
partial_cmp(&self, other: &[u8]) -> Option<cmp::Ordering>1477     fn partial_cmp(&self, other: &[u8]) -> Option<cmp::Ordering> {
1478         (**self).partial_cmp(other)
1479     }
1480 }
1481 
1482 impl PartialEq<BytesMut> for [u8] {
eq(&self, other: &BytesMut) -> bool1483     fn eq(&self, other: &BytesMut) -> bool {
1484         *other == *self
1485     }
1486 }
1487 
1488 impl PartialOrd<BytesMut> for [u8] {
partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering>1489     fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
1490         <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other)
1491     }
1492 }
1493 
1494 impl PartialEq<str> for BytesMut {
eq(&self, other: &str) -> bool1495     fn eq(&self, other: &str) -> bool {
1496         &**self == other.as_bytes()
1497     }
1498 }
1499 
1500 impl PartialOrd<str> for BytesMut {
partial_cmp(&self, other: &str) -> Option<cmp::Ordering>1501     fn partial_cmp(&self, other: &str) -> Option<cmp::Ordering> {
1502         (**self).partial_cmp(other.as_bytes())
1503     }
1504 }
1505 
1506 impl PartialEq<BytesMut> for str {
eq(&self, other: &BytesMut) -> bool1507     fn eq(&self, other: &BytesMut) -> bool {
1508         *other == *self
1509     }
1510 }
1511 
1512 impl PartialOrd<BytesMut> for str {
partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering>1513     fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
1514         <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other)
1515     }
1516 }
1517 
1518 impl PartialEq<Vec<u8>> for BytesMut {
eq(&self, other: &Vec<u8>) -> bool1519     fn eq(&self, other: &Vec<u8>) -> bool {
1520         *self == other[..]
1521     }
1522 }
1523 
1524 impl PartialOrd<Vec<u8>> for BytesMut {
partial_cmp(&self, other: &Vec<u8>) -> Option<cmp::Ordering>1525     fn partial_cmp(&self, other: &Vec<u8>) -> Option<cmp::Ordering> {
1526         (**self).partial_cmp(&other[..])
1527     }
1528 }
1529 
1530 impl PartialEq<BytesMut> for Vec<u8> {
eq(&self, other: &BytesMut) -> bool1531     fn eq(&self, other: &BytesMut) -> bool {
1532         *other == *self
1533     }
1534 }
1535 
1536 impl PartialOrd<BytesMut> for Vec<u8> {
partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering>1537     fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
1538         other.partial_cmp(self)
1539     }
1540 }
1541 
1542 impl PartialEq<String> for BytesMut {
eq(&self, other: &String) -> bool1543     fn eq(&self, other: &String) -> bool {
1544         *self == other[..]
1545     }
1546 }
1547 
1548 impl PartialOrd<String> for BytesMut {
partial_cmp(&self, other: &String) -> Option<cmp::Ordering>1549     fn partial_cmp(&self, other: &String) -> Option<cmp::Ordering> {
1550         (**self).partial_cmp(other.as_bytes())
1551     }
1552 }
1553 
1554 impl PartialEq<BytesMut> for String {
eq(&self, other: &BytesMut) -> bool1555     fn eq(&self, other: &BytesMut) -> bool {
1556         *other == *self
1557     }
1558 }
1559 
1560 impl PartialOrd<BytesMut> for String {
partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering>1561     fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
1562         <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other)
1563     }
1564 }
1565 
1566 impl<'a, T: ?Sized> PartialEq<&'a T> for BytesMut
1567 where
1568     BytesMut: PartialEq<T>,
1569 {
eq(&self, other: &&'a T) -> bool1570     fn eq(&self, other: &&'a T) -> bool {
1571         *self == **other
1572     }
1573 }
1574 
1575 impl<'a, T: ?Sized> PartialOrd<&'a T> for BytesMut
1576 where
1577     BytesMut: PartialOrd<T>,
1578 {
partial_cmp(&self, other: &&'a T) -> Option<cmp::Ordering>1579     fn partial_cmp(&self, other: &&'a T) -> Option<cmp::Ordering> {
1580         self.partial_cmp(*other)
1581     }
1582 }
1583 
1584 impl PartialEq<BytesMut> for &[u8] {
eq(&self, other: &BytesMut) -> bool1585     fn eq(&self, other: &BytesMut) -> bool {
1586         *other == *self
1587     }
1588 }
1589 
1590 impl PartialOrd<BytesMut> for &[u8] {
partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering>1591     fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
1592         <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other)
1593     }
1594 }
1595 
1596 impl PartialEq<BytesMut> for &str {
eq(&self, other: &BytesMut) -> bool1597     fn eq(&self, other: &BytesMut) -> bool {
1598         *other == *self
1599     }
1600 }
1601 
1602 impl PartialOrd<BytesMut> for &str {
partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering>1603     fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
1604         other.partial_cmp(self)
1605     }
1606 }
1607 
1608 impl PartialEq<BytesMut> for Bytes {
eq(&self, other: &BytesMut) -> bool1609     fn eq(&self, other: &BytesMut) -> bool {
1610         other[..] == self[..]
1611     }
1612 }
1613 
1614 impl PartialEq<Bytes> for BytesMut {
eq(&self, other: &Bytes) -> bool1615     fn eq(&self, other: &Bytes) -> bool {
1616         other[..] == self[..]
1617     }
1618 }
1619 
1620 impl From<BytesMut> for Vec<u8> {
from(mut bytes: BytesMut) -> Self1621     fn from(mut bytes: BytesMut) -> Self {
1622         let kind = bytes.kind();
1623 
1624         let mut vec = if kind == KIND_VEC {
1625             unsafe {
1626                 let (off, _) = bytes.get_vec_pos();
1627                 rebuild_vec(bytes.ptr.as_ptr(), bytes.len, bytes.cap, off)
1628             }
1629         } else if kind == KIND_ARC {
1630             let shared = bytes.data as *mut Shared;
1631 
1632             if unsafe { (*shared).is_unique() } {
1633                 let vec = mem::replace(unsafe { &mut (*shared).vec }, Vec::new());
1634 
1635                 unsafe { release_shared(shared) };
1636 
1637                 vec
1638             } else {
1639                 return bytes.deref().to_vec();
1640             }
1641         } else {
1642             return bytes.deref().to_vec();
1643         };
1644 
1645         let len = bytes.len;
1646 
1647         unsafe {
1648             ptr::copy(bytes.ptr.as_ptr(), vec.as_mut_ptr(), len);
1649             vec.set_len(len);
1650         }
1651 
1652         mem::forget(bytes);
1653 
1654         vec
1655     }
1656 }
1657 
1658 #[inline]
vptr(ptr: *mut u8) -> NonNull<u8>1659 fn vptr(ptr: *mut u8) -> NonNull<u8> {
1660     if cfg!(debug_assertions) {
1661         NonNull::new(ptr).expect("Vec pointer should be non-null")
1662     } else {
1663         unsafe { NonNull::new_unchecked(ptr) }
1664     }
1665 }
1666 
1667 /// Returns a dangling pointer with the given address. This is used to store
1668 /// integer data in pointer fields.
1669 ///
1670 /// It is equivalent to `addr as *mut T`, but this fails on miri when strict
1671 /// provenance checking is enabled.
1672 #[inline]
invalid_ptr<T>(addr: usize) -> *mut T1673 fn invalid_ptr<T>(addr: usize) -> *mut T {
1674     let ptr = core::ptr::null_mut::<u8>().wrapping_add(addr);
1675     debug_assert_eq!(ptr as usize, addr);
1676     ptr.cast::<T>()
1677 }
1678 
1679 /// Precondition: dst >= original
1680 ///
1681 /// The following line is equivalent to:
1682 ///
1683 /// ```rust,ignore
1684 /// self.ptr.as_ptr().offset_from(ptr) as usize;
1685 /// ```
1686 ///
1687 /// But due to min rust is 1.39 and it is only stablised
1688 /// in 1.47, we cannot use it.
1689 #[inline]
offset_from(dst: *mut u8, original: *mut u8) -> usize1690 fn offset_from(dst: *mut u8, original: *mut u8) -> usize {
1691     debug_assert!(dst >= original);
1692 
1693     dst as usize - original as usize
1694 }
1695 
rebuild_vec(ptr: *mut u8, mut len: usize, mut cap: usize, off: usize) -> Vec<u8>1696 unsafe fn rebuild_vec(ptr: *mut u8, mut len: usize, mut cap: usize, off: usize) -> Vec<u8> {
1697     let ptr = ptr.offset(-(off as isize));
1698     len += off;
1699     cap += off;
1700 
1701     Vec::from_raw_parts(ptr, len, cap)
1702 }
1703 
1704 // ===== impl SharedVtable =====
1705 
1706 static SHARED_VTABLE: Vtable = Vtable {
1707     clone: shared_v_clone,
1708     to_vec: shared_v_to_vec,
1709     drop: shared_v_drop,
1710 };
1711 
shared_v_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes1712 unsafe fn shared_v_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
1713     let shared = data.load(Ordering::Relaxed) as *mut Shared;
1714     increment_shared(shared);
1715 
1716     let data = AtomicPtr::new(shared as *mut ());
1717     Bytes::with_vtable(ptr, len, data, &SHARED_VTABLE)
1718 }
1719 
shared_v_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8>1720 unsafe fn shared_v_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
1721     let shared: *mut Shared = data.load(Ordering::Relaxed).cast();
1722 
1723     if (*shared).is_unique() {
1724         let shared = &mut *shared;
1725 
1726         // Drop shared
1727         let mut vec = mem::replace(&mut shared.vec, Vec::new());
1728         release_shared(shared);
1729 
1730         // Copy back buffer
1731         ptr::copy(ptr, vec.as_mut_ptr(), len);
1732         vec.set_len(len);
1733 
1734         vec
1735     } else {
1736         let v = slice::from_raw_parts(ptr, len).to_vec();
1737         release_shared(shared);
1738         v
1739     }
1740 }
1741 
shared_v_drop(data: &mut AtomicPtr<()>, _ptr: *const u8, _len: usize)1742 unsafe fn shared_v_drop(data: &mut AtomicPtr<()>, _ptr: *const u8, _len: usize) {
1743     data.with_mut(|shared| {
1744         release_shared(*shared as *mut Shared);
1745     });
1746 }
1747 
1748 // compile-fails
1749 
1750 /// ```compile_fail
1751 /// use bytes::BytesMut;
1752 /// #[deny(unused_must_use)]
1753 /// {
1754 ///     let mut b1 = BytesMut::from("hello world");
1755 ///     b1.split_to(6);
1756 /// }
1757 /// ```
_split_to_must_use()1758 fn _split_to_must_use() {}
1759 
1760 /// ```compile_fail
1761 /// use bytes::BytesMut;
1762 /// #[deny(unused_must_use)]
1763 /// {
1764 ///     let mut b1 = BytesMut::from("hello world");
1765 ///     b1.split_off(6);
1766 /// }
1767 /// ```
_split_off_must_use()1768 fn _split_off_must_use() {}
1769 
1770 /// ```compile_fail
1771 /// use bytes::BytesMut;
1772 /// #[deny(unused_must_use)]
1773 /// {
1774 ///     let mut b1 = BytesMut::from("hello world");
1775 ///     b1.split();
1776 /// }
1777 /// ```
_split_must_use()1778 fn _split_must_use() {}
1779 
1780 // fuzz tests
1781 #[cfg(all(test, loom))]
1782 mod fuzz {
1783     use loom::sync::Arc;
1784     use loom::thread;
1785 
1786     use super::BytesMut;
1787     use crate::Bytes;
1788 
1789     #[test]
bytes_mut_cloning_frozen()1790     fn bytes_mut_cloning_frozen() {
1791         loom::model(|| {
1792             let a = BytesMut::from(&b"abcdefgh"[..]).split().freeze();
1793             let addr = a.as_ptr() as usize;
1794 
1795             // test the Bytes::clone is Sync by putting it in an Arc
1796             let a1 = Arc::new(a);
1797             let a2 = a1.clone();
1798 
1799             let t1 = thread::spawn(move || {
1800                 let b: Bytes = (*a1).clone();
1801                 assert_eq!(b.as_ptr() as usize, addr);
1802             });
1803 
1804             let t2 = thread::spawn(move || {
1805                 let b: Bytes = (*a2).clone();
1806                 assert_eq!(b.as_ptr() as usize, addr);
1807             });
1808 
1809             t1.join().unwrap();
1810             t2.join().unwrap();
1811         });
1812     }
1813 }
1814