diff --git a/.lock b/.lock new file mode 100644 index 000000000..e69de29bb diff --git a/bytes/all.html b/bytes/all.html new file mode 100644 index 000000000..0848bdaed --- /dev/null +++ b/bytes/all.html @@ -0,0 +1 @@ +
Redirecting to ../../../bytes/buf/trait.Buf.html...
+ + + \ No newline at end of file diff --git a/bytes/buf/buf_mut/trait.BufMut.html b/bytes/buf/buf_mut/trait.BufMut.html new file mode 100644 index 000000000..77242b643 --- /dev/null +++ b/bytes/buf/buf_mut/trait.BufMut.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../../bytes/buf/trait.BufMut.html...
+ + + \ No newline at end of file diff --git a/bytes/buf/chain/struct.Chain.html b/bytes/buf/chain/struct.Chain.html new file mode 100644 index 000000000..f8ecbf20f --- /dev/null +++ b/bytes/buf/chain/struct.Chain.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../../bytes/buf/struct.Chain.html...
+ + + \ No newline at end of file diff --git a/bytes/buf/index.html b/bytes/buf/index.html new file mode 100644 index 000000000..c5a39c70c --- /dev/null +++ b/bytes/buf/index.html @@ -0,0 +1,11 @@ +Utilities for working with buffers.
+A buffer is any structure that contains a sequence of bytes. The bytes may +or may not be stored in contiguous memory. This module contains traits used +to abstract over buffers as well as utilities for working with buffer types.
+Buf
, BufMut
These are the two foundational traits for abstractly working with buffers.
+They can be thought as iterators for byte structures. They offer additional
+performance over Iterator
by providing an API optimized for byte slices.
Chain
sequences two buffers.BufMut
adapter which limits the amount of bytes that can be written
+to an underlying buffer.Buf
adapter which implements io::Read
for the inner value.Buf
adapter which limits the bytes read from an underlying buffer.BufMut
adapter which implements io::Write
for the inner value.Redirecting to ../../../bytes/buf/struct.IntoIter.html...
+ + + \ No newline at end of file diff --git a/bytes/buf/limit/struct.Limit.html b/bytes/buf/limit/struct.Limit.html new file mode 100644 index 000000000..7e57ce537 --- /dev/null +++ b/bytes/buf/limit/struct.Limit.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../../bytes/buf/struct.Limit.html...
+ + + \ No newline at end of file diff --git a/bytes/buf/reader/struct.Reader.html b/bytes/buf/reader/struct.Reader.html new file mode 100644 index 000000000..8b81eb74c --- /dev/null +++ b/bytes/buf/reader/struct.Reader.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../../bytes/buf/struct.Reader.html...
+ + + \ No newline at end of file diff --git a/bytes/buf/sidebar-items.js b/bytes/buf/sidebar-items.js new file mode 100644 index 000000000..0fcf33881 --- /dev/null +++ b/bytes/buf/sidebar-items.js @@ -0,0 +1 @@ +window.SIDEBAR_ITEMS = {"struct":[["Chain","A `Chain` sequences two buffers."],["IntoIter","Iterator over the bytes contained by the buffer."],["Limit","A `BufMut` adapter which limits the amount of bytes that can be written to an underlying buffer."],["Reader","A `Buf` adapter which implements `io::Read` for the inner value."],["Take","A `Buf` adapter which limits the bytes read from an underlying buffer."],["UninitSlice","Uninitialized byte slice."],["Writer","A `BufMut` adapter which implements `io::Write` for the inner value."]],"trait":[["Buf","Read bytes from a buffer."],["BufMut","A trait for values that provide sequential write access to bytes."]]}; \ No newline at end of file diff --git a/bytes/buf/struct.Chain.html b/bytes/buf/struct.Chain.html new file mode 100644 index 000000000..9c6f92ef6 --- /dev/null +++ b/bytes/buf/struct.Chain.html @@ -0,0 +1,89 @@ +pub struct Chain<T, U> { /* private fields */ }
A Chain
sequences two buffers.
Chain
is an adapter that links two underlying buffers and provides a
+continuous view across both buffers. It is able to sequence either immutable
+buffers (Buf
values) or mutable buffers (BufMut
values).
This struct is generally created by calling Buf::chain
. Please see that
+function’s documentation for more detail.
use bytes::{Bytes, Buf};
+
+let mut buf = (&b"hello "[..])
+ .chain(&b"world"[..]);
+
+let full: Bytes = buf.copy_to_bytes(11);
+assert_eq!(full[..], b"hello world"[..]);
Gets a reference to the first underlying Buf
.
use bytes::Buf;
+
+let buf = (&b"hello"[..])
+ .chain(&b"world"[..]);
+
+assert_eq!(buf.first_ref()[..], b"hello"[..]);
Gets a mutable reference to the first underlying Buf
.
use bytes::Buf;
+
+let mut buf = (&b"hello"[..])
+ .chain(&b"world"[..]);
+
+buf.first_mut().advance(1);
+
+let full = buf.copy_to_bytes(9);
+assert_eq!(full, b"elloworld"[..]);
Gets a reference to the last underlying Buf
.
use bytes::Buf;
+
+let buf = (&b"hello"[..])
+ .chain(&b"world"[..]);
+
+assert_eq!(buf.last_ref()[..], b"world"[..]);
Gets a mutable reference to the last underlying Buf
.
use bytes::Buf;
+
+let mut buf = (&b"hello "[..])
+ .chain(&b"world"[..]);
+
+buf.last_mut().advance(1);
+
+let full = buf.copy_to_bytes(10);
+assert_eq!(full, b"hello orld"[..]);
Consumes this Chain
, returning the underlying values.
use bytes::Buf;
+
+let chain = (&b"hello"[..])
+ .chain(&b"world"[..]);
+
+let (first, last) = chain.into_inner();
+assert_eq!(first[..], b"hello"[..]);
+assert_eq!(last[..], b"world"[..]);
Buf::remaining()
. Note that this can return shorter slice (this allows
+non-continuous internal representation). Read morestd
only.self
in big-endian byte order. Read moreself
in little-endian byte order. Read moreself
in native-endian byte order. Read moreself
in big-endian byte order. Read moreself
in little-endian byte order. Read moreself
in native-endian byte order. Read moreself
in the big-endian byte order. Read moreself
in the little-endian byte order. Read moreself
in native-endian byte order. Read moreself
in big-endian byte order. Read moreself
in little-endian byte order. Read moreself
in native-endian byte order. Read moreself
in big-endian byte order. Read moreself
in little-endian byte order. Read moreself
in native-endian byte order. Read moreself
in big-endian byte order. Read moreself
in little-endian byte order. Read moreself
in native-endian byte order. Read moreself
in big-endian byte order. Read moreself
in little-endian byte order. Read moreself
in native-endian byte order. Read moreself
in big-endian byte order. Read moreself
in little-endian byte order. Read moreself
in native-endian byte order. Read moreself
in big-endian byte order. Read moreself
in little-endian byte order. Read moreself
in native-endian byte order. Read moreself
in big-endian byte order. Read moreself
in little-endian byte order. Read moreself
in native-endian byte order. Read moreself
in big-endian byte order. Read moreself
in little-endian byte order. Read moreself
in native-endian byte order. Read moreself
in big-endian byte order. Read moreself
in little-endian byte order. Read moreself
in native-endian byte order. Read moreBufMut::remaining_mut()
. Note that this can be shorter than the
+whole remainder of the buffer (this allows non-continuous implementation). Read moreself
for more bytes. Read moreself
in big-endian byte order. Read moreself
in little-endian byte order. Read moreself
in native-endian byte order. Read moreself
in big-endian byte order. Read moreself
in little-endian byte order. Read moreself
in native-endian byte order. Read moreself
in big-endian byte order. Read moreself
in little-endian byte order. Read moreself
in native-endian byte order. Read moreself
in big-endian byte order. Read moreself
in little-endian byte order. Read moreself
in native-endian byte order. Read moreself
in the big-endian byte order. Read moreself
in little-endian byte order. Read moreself
in native-endian byte order. Read moreself
in the big-endian byte order. Read moreself
in little-endian byte order. Read moreself
in native-endian byte order. Read moreself
in the big-endian byte order. Read moreself
in little-endian byte order. Read moreself
in native-endian byte order. Read moreself
in the big-endian byte order. Read moreself
in little-endian byte order. Read moreself
in native-endian byte order. Read moreself
in big-endian byte order. Read moreself
in the little-endian byte order. Read moreself
in the native-endian byte order. Read moreself
in big-endian byte order. Read moreself
in little-endian byte order. Read moreself
in native-endian byte order. Read moreself
in big-endian byte order. Read moreself
in little-endian byte order. Read moreself
in native-endian byte order. Read morepub struct IntoIter<T> { /* private fields */ }
Iterator over the bytes contained by the buffer.
+Basic usage:
+ +use bytes::Bytes;
+
+let buf = Bytes::from(&b"abc"[..]);
+let mut iter = buf.into_iter();
+
+assert_eq!(iter.next(), Some(b'a'));
+assert_eq!(iter.next(), Some(b'b'));
+assert_eq!(iter.next(), Some(b'c'));
+assert_eq!(iter.next(), None);
Creates an iterator over the bytes contained by the buffer.
+use bytes::Bytes;
+
+let buf = Bytes::from_static(b"abc");
+let mut iter = buf.into_iter();
+
+assert_eq!(iter.next(), Some(b'a'));
+assert_eq!(iter.next(), Some(b'b'));
+assert_eq!(iter.next(), Some(b'c'));
+assert_eq!(iter.next(), None);
Consumes this IntoIter
, returning the underlying value.
use bytes::{Buf, Bytes};
+
+let buf = Bytes::from(&b"abc"[..]);
+let mut iter = buf.into_iter();
+
+assert_eq!(iter.next(), Some(b'a'));
+
+let buf = iter.into_inner();
+assert_eq!(2, buf.remaining());
Gets a reference to the underlying Buf
.
It is inadvisable to directly read from the underlying Buf
.
use bytes::{Buf, Bytes};
+
+let buf = Bytes::from(&b"abc"[..]);
+let mut iter = buf.into_iter();
+
+assert_eq!(iter.next(), Some(b'a'));
+
+assert_eq!(2, iter.get_ref().remaining());
Gets a mutable reference to the underlying Buf
.
It is inadvisable to directly read from the underlying Buf
.
use bytes::{Buf, BytesMut};
+
+let buf = BytesMut::from(&b"abc"[..]);
+let mut iter = buf.into_iter();
+
+assert_eq!(iter.next(), Some(b'a'));
+
+iter.get_mut().advance(1);
+
+assert_eq!(iter.next(), Some(b'c'));
iter_next_chunk
)N
values. Read moreiter_advance_by
)n
elements. Read moren
th element of the iterator. Read moreiter_intersperse
)separator
+between adjacent items of the original iterator. Read moren
elements. Read moren
elements, or fewer
+if the underlying iterator ends sooner. Read moreiter_collect_into
)iter_is_partitioned
)true
precede all those that return false
. Read moreiterator_try_reduce
)try_find
)iter_array_chunks
)N
elements of the iterator at a time. Read moreiter_order_by
)Iterator
with those
+of another with respect to the specified comparison function. Read moreiter_order_by
)Iterator
with those
+of another with respect to the specified comparison function. Read moreiter_order_by
)Iterator
are lexicographically
+less than those of another. Read moreIterator
are lexicographically
+less or equal to those of another. Read moreIterator
are lexicographically
+greater than those of another. Read moreIterator
are lexicographically
+greater than or equal to those of another. Read moreis_sorted
)is_sorted
)pub struct Limit<T> { /* private fields */ }
A BufMut
adapter which limits the amount of bytes that can be written
+to an underlying buffer.
Consumes this Limit
, returning the underlying value.
Gets a reference to the underlying BufMut
.
It is inadvisable to directly write to the underlying BufMut
.
Gets a mutable reference to the underlying BufMut
.
It is inadvisable to directly write to the underlying BufMut
.
BufMut::remaining_mut()
. Note that this can be shorter than the
+whole remainder of the buffer (this allows non-continuous implementation). Read moreself
for more bytes. Read moreself
in big-endian byte order. Read moreself
in little-endian byte order. Read moreself
in native-endian byte order. Read moreself
in big-endian byte order. Read moreself
in little-endian byte order. Read moreself
in native-endian byte order. Read moreself
in big-endian byte order. Read moreself
in little-endian byte order. Read moreself
in native-endian byte order. Read moreself
in big-endian byte order. Read moreself
in little-endian byte order. Read moreself
in native-endian byte order. Read moreself
in the big-endian byte order. Read moreself
in little-endian byte order. Read moreself
in native-endian byte order. Read moreself
in the big-endian byte order. Read moreself
in little-endian byte order. Read moreself
in native-endian byte order. Read moreself
in the big-endian byte order. Read moreself
in little-endian byte order. Read moreself
in native-endian byte order. Read moreself
in the big-endian byte order. Read moreself
in little-endian byte order. Read moreself
in native-endian byte order. Read moreself
in big-endian byte order. Read moreself
in the little-endian byte order. Read moreself
in the native-endian byte order. Read moreself
in big-endian byte order. Read moreself
in little-endian byte order. Read moreself
in native-endian byte order. Read moreself
in big-endian byte order. Read moreself
in little-endian byte order. Read moreself
in native-endian byte order. Read morepub struct Reader<B> { /* private fields */ }
A Buf
adapter which implements io::Read
for the inner value.
This struct is generally created by calling reader()
on Buf
. See
+documentation of reader()
for more
+details.
Gets a reference to the underlying Buf
.
It is inadvisable to directly read from the underlying Buf
.
use bytes::Buf;
+
+let buf = b"hello world".reader();
+
+assert_eq!(b"hello world", buf.get_ref());
Gets a mutable reference to the underlying Buf
.
It is inadvisable to directly read from the underlying Buf
.
Consumes this Reader
, returning the underlying value.
use bytes::Buf;
+use std::io;
+
+let mut buf = b"hello world".reader();
+let mut dst = vec![];
+
+io::copy(&mut buf, &mut dst).unwrap();
+
+let buf = buf.into_inner();
+assert_eq!(0, buf.remaining());
amt
bytes have been consumed from the buffer,
+so they should no longer be returned in calls to read
. Read morebuf_read_has_data_left
)Read
has any data left to be read. Read more0xA
byte) is reached, and append
+them to the provided buffer. You do not need to clear the buffer before
+appending. Read moreread
, except that it reads into a slice of buffers. Read morecan_vector
)buf
. Read morebuf
. Read morebuf
. Read moreread_buf
)read_buf
)cursor
. Read moreRead
. Read morepub struct Take<T> { /* private fields */ }
A Buf
adapter which limits the bytes read from an underlying buffer.
This struct is generally created by calling take()
on Buf
. See
+documentation of take()
for more details.
Consumes this Take
, returning the underlying value.
use bytes::{Buf, BufMut};
+
+let mut buf = b"hello world".take(2);
+let mut dst = vec![];
+
+dst.put(&mut buf);
+assert_eq!(*dst, b"he"[..]);
+
+let mut buf = buf.into_inner();
+
+dst.clear();
+dst.put(&mut buf);
+assert_eq!(*dst, b"llo world"[..]);
Gets a reference to the underlying Buf
.
It is inadvisable to directly read from the underlying Buf
.
use bytes::Buf;
+
+let buf = b"hello world".take(2);
+
+assert_eq!(11, buf.get_ref().remaining());
Gets a mutable reference to the underlying Buf
.
It is inadvisable to directly read from the underlying Buf
.
use bytes::{Buf, BufMut};
+
+let mut buf = b"hello world".take(2);
+let mut dst = vec![];
+
+buf.get_mut().advance(2);
+
+dst.put(&mut buf);
+assert_eq!(*dst, b"ll"[..]);
Returns the maximum number of bytes that can be read.
+If the inner Buf
has fewer bytes than indicated by this method then
+that is the actual number of available bytes.
use bytes::Buf;
+
+let mut buf = b"hello world".take(2);
+
+assert_eq!(2, buf.limit());
+assert_eq!(b'h', buf.get_u8());
+assert_eq!(1, buf.limit());
Sets the maximum number of bytes that can be read.
+If the inner Buf
has fewer bytes than lim
then that is the actual
+number of available bytes.
use bytes::{Buf, BufMut};
+
+let mut buf = b"hello world".take(2);
+let mut dst = vec![];
+
+dst.put(&mut buf);
+assert_eq!(*dst, b"he"[..]);
+
+dst.clear();
+
+buf.set_limit(3);
+dst.put(&mut buf);
+assert_eq!(*dst, b"llo"[..]);
Buf::remaining()
. Note that this can return shorter slice (this allows
+non-continuous internal representation). Read morestd
only.self
in big-endian byte order. Read moreself
in little-endian byte order. Read moreself
in native-endian byte order. Read moreself
in big-endian byte order. Read moreself
in little-endian byte order. Read moreself
in native-endian byte order. Read moreself
in the big-endian byte order. Read moreself
in the little-endian byte order. Read moreself
in native-endian byte order. Read moreself
in big-endian byte order. Read moreself
in little-endian byte order. Read moreself
in native-endian byte order. Read moreself
in big-endian byte order. Read moreself
in little-endian byte order. Read moreself
in native-endian byte order. Read moreself
in big-endian byte order. Read moreself
in little-endian byte order. Read moreself
in native-endian byte order. Read moreself
in big-endian byte order. Read moreself
in little-endian byte order. Read moreself
in native-endian byte order. Read moreself
in big-endian byte order. Read moreself
in little-endian byte order. Read moreself
in native-endian byte order. Read moreself
in big-endian byte order. Read moreself
in little-endian byte order. Read moreself
in native-endian byte order. Read moreself
in big-endian byte order. Read moreself
in little-endian byte order. Read moreself
in native-endian byte order. Read moreself
in big-endian byte order. Read moreself
in little-endian byte order. Read moreself
in native-endian byte order. Read moreself
in big-endian byte order. Read moreself
in little-endian byte order. Read moreself
in native-endian byte order. Read more#[repr(transparent)]pub struct UninitSlice(_);
Uninitialized byte slice.
+Returned by BufMut::chunk_mut()
, the referenced byte slice may be
+uninitialized. The wrapper provides safe access without introducing
+undefined behavior.
The safety invariants of this wrapper are:
+UninitSlice
is undefined behavior.UninitSlice
is undefined behavior.The difference between &mut UninitSlice
and &mut [MaybeUninit<u8>]
is
+that it is possible in safe code to write uninitialized bytes to an
+&mut [MaybeUninit<u8>]
, which this type prohibits.
Creates a &mut UninitSlice
wrapping a slice of initialised memory.
use bytes::buf::UninitSlice;
+
+let mut buffer = [0u8; 64];
+let slice = UninitSlice::new(&mut buffer[..]);
Creates a &mut UninitSlice
wrapping a slice of uninitialised memory.
use bytes::buf::UninitSlice;
+use core::mem::MaybeUninit;
+
+let mut buffer = [MaybeUninit::uninit(); 64];
+let slice = UninitSlice::uninit(&mut buffer[..]);
+
+let mut vec = Vec::with_capacity(1024);
+let spare: &mut UninitSlice = vec.spare_capacity_mut().into();
Create a &mut UninitSlice
from a pointer and a length.
The caller must ensure that ptr
references a valid memory region owned
+by the caller representing a byte slice for the duration of 'a
.
use bytes::buf::UninitSlice;
+
+let bytes = b"hello world".to_vec();
+let ptr = bytes.as_ptr() as *mut _;
+let len = bytes.len();
+
+let slice = unsafe { UninitSlice::from_raw_parts_mut(ptr, len) };
Write a single byte at the specified offset.
+The function panics if index
is out of bounds.
use bytes::buf::UninitSlice;
+
+let mut data = [b'f', b'o', b'o'];
+let slice = unsafe { UninitSlice::from_raw_parts_mut(data.as_mut_ptr(), 3) };
+
+slice.write_byte(0, b'b');
+
+assert_eq!(b"boo", &data[..]);
Copies bytes from src
into self
.
The length of src
must be the same as self
.
The function panics if src
has a different length than self
.
use bytes::buf::UninitSlice;
+
+let mut data = [b'f', b'o', b'o'];
+let slice = unsafe { UninitSlice::from_raw_parts_mut(data.as_mut_ptr(), 3) };
+
+slice.copy_from_slice(b"bar");
+
+assert_eq!(b"bar", &data[..]);
Return a raw pointer to the slice’s buffer.
+The caller must not read from the referenced memory and must not +write uninitialized bytes to the slice either.
+use bytes::BufMut;
+
+let mut data = [0, 1, 2];
+let mut slice = &mut data[..];
+let ptr = BufMut::chunk_mut(&mut slice).as_mut_ptr();
Return a &mut [MaybeUninit<u8>]
to this slice’s buffer.
The caller must not read from the referenced memory and must not write
+uninitialized bytes to the slice either. This is because BufMut
implementation
+that created the UninitSlice
knows which parts are initialized. Writing uninitialized
+bytes to the slice may cause the BufMut
to read those bytes and trigger undefined
+behavior.
use bytes::BufMut;
+
+let mut data = [0, 1, 2];
+let mut slice = &mut data[..];
+unsafe {
+ let uninit_slice = BufMut::chunk_mut(&mut slice).as_uninit_slice_mut();
+};
container[index]
) operation. Read morecontainer[index]
) operation. Read morecontainer[index]
) operation. Read morecontainer[index]
) operation. Read morecontainer[index]
) operation. Read morecontainer[index]
) operation. Read morepub struct Writer<B> { /* private fields */ }
A BufMut
adapter which implements io::Write
for the inner value.
This struct is generally created by calling writer()
on BufMut
. See
+documentation of writer()
for more
+details.
Gets a reference to the underlying BufMut
.
It is inadvisable to directly write to the underlying BufMut
.
use bytes::BufMut;
+
+let buf = Vec::with_capacity(1024).writer();
+
+assert_eq!(1024, buf.get_ref().capacity());
Gets a mutable reference to the underlying BufMut
.
It is inadvisable to directly write to the underlying BufMut
.
use bytes::BufMut;
+
+let mut buf = vec![].writer();
+
+buf.get_mut().reserve(1024);
+
+assert_eq!(1024, buf.get_ref().capacity());
Consumes this Writer
, returning the underlying value.
use bytes::BufMut;
+use std::io;
+
+let mut buf = vec![].writer();
+let mut src = &b"hello world"[..];
+
+io::copy(&mut src, &mut buf).unwrap();
+
+let buf = buf.into_inner();
+assert_eq!(*buf, b"hello world"[..]);
can_vector
)write_all_vectored
)Redirecting to ../../../bytes/buf/struct.Take.html...
+ + + \ No newline at end of file diff --git a/bytes/buf/trait.Buf.html b/bytes/buf/trait.Buf.html new file mode 100644 index 000000000..6e330fa7c --- /dev/null +++ b/bytes/buf/trait.Buf.html @@ -0,0 +1,617 @@ +pub trait Buf {
+Show 48 methods
fn remaining(&self) -> usize;
+ fn chunk(&self) -> &[u8]ⓘ;
+ fn advance(&mut self, cnt: usize);
+
+ fn chunks_vectored<'a>(&'a self, dst: &mut [IoSlice<'a>]) -> usize { ... }
+ fn has_remaining(&self) -> bool { ... }
+ fn copy_to_slice(&mut self, dst: &mut [u8]) { ... }
+ fn get_u8(&mut self) -> u8 { ... }
+ fn get_i8(&mut self) -> i8 { ... }
+ fn get_u16(&mut self) -> u16 { ... }
+ fn get_u16_le(&mut self) -> u16 { ... }
+ fn get_u16_ne(&mut self) -> u16 { ... }
+ fn get_i16(&mut self) -> i16 { ... }
+ fn get_i16_le(&mut self) -> i16 { ... }
+ fn get_i16_ne(&mut self) -> i16 { ... }
+ fn get_u32(&mut self) -> u32 { ... }
+ fn get_u32_le(&mut self) -> u32 { ... }
+ fn get_u32_ne(&mut self) -> u32 { ... }
+ fn get_i32(&mut self) -> i32 { ... }
+ fn get_i32_le(&mut self) -> i32 { ... }
+ fn get_i32_ne(&mut self) -> i32 { ... }
+ fn get_u64(&mut self) -> u64 { ... }
+ fn get_u64_le(&mut self) -> u64 { ... }
+ fn get_u64_ne(&mut self) -> u64 { ... }
+ fn get_i64(&mut self) -> i64 { ... }
+ fn get_i64_le(&mut self) -> i64 { ... }
+ fn get_i64_ne(&mut self) -> i64 { ... }
+ fn get_u128(&mut self) -> u128 { ... }
+ fn get_u128_le(&mut self) -> u128 { ... }
+ fn get_u128_ne(&mut self) -> u128 { ... }
+ fn get_i128(&mut self) -> i128 { ... }
+ fn get_i128_le(&mut self) -> i128 { ... }
+ fn get_i128_ne(&mut self) -> i128 { ... }
+ fn get_uint(&mut self, nbytes: usize) -> u64 { ... }
+ fn get_uint_le(&mut self, nbytes: usize) -> u64 { ... }
+ fn get_uint_ne(&mut self, nbytes: usize) -> u64 { ... }
+ fn get_int(&mut self, nbytes: usize) -> i64 { ... }
+ fn get_int_le(&mut self, nbytes: usize) -> i64 { ... }
+ fn get_int_ne(&mut self, nbytes: usize) -> i64 { ... }
+ fn get_f32(&mut self) -> f32 { ... }
+ fn get_f32_le(&mut self) -> f32 { ... }
+ fn get_f32_ne(&mut self) -> f32 { ... }
+ fn get_f64(&mut self) -> f64 { ... }
+ fn get_f64_le(&mut self) -> f64 { ... }
+ fn get_f64_ne(&mut self) -> f64 { ... }
+ fn copy_to_bytes(&mut self, len: usize) -> Bytes { ... }
+ fn take(self, limit: usize) -> Take<Self>
where
Self: Sized,
+ { ... }
+ fn chain<U: Buf>(self, next: U) -> Chain<Self, U>
where
Self: Sized,
+ { ... }
+ fn reader(self) -> Reader<Self>ⓘ
where
Self: Sized,
+ { ... }
+}
Read bytes from a buffer.
+A buffer stores bytes in memory such that read operations are infallible.
+The underlying storage may or may not be in contiguous memory. A Buf
value
+is a cursor into the buffer. Reading from Buf
advances the cursor
+position. It can be thought of as an efficient Iterator
for collections of
+bytes.
The simplest Buf
is a &[u8]
.
use bytes::Buf;
+
+let mut buf = &b"hello world"[..];
+
+assert_eq!(b'h', buf.get_u8());
+assert_eq!(b'e', buf.get_u8());
+assert_eq!(b'l', buf.get_u8());
+
+let mut rest = [0; 8];
+buf.copy_to_slice(&mut rest);
+
+assert_eq!(&rest[..], &b"lo world"[..]);
Returns the number of bytes between the current position and the end of +the buffer.
+This value is greater than or equal to the length of the slice returned
+by chunk()
.
use bytes::Buf;
+
+let mut buf = &b"hello world"[..];
+
+assert_eq!(buf.remaining(), 11);
+
+buf.get_u8();
+
+assert_eq!(buf.remaining(), 10);
Implementations of remaining
should ensure that the return value does
+not change unless a call is made to advance
or any other function that
+is documented to change the Buf
’s current position.
Returns a slice starting at the current position and of length between 0
+and Buf::remaining()
. Note that this can return shorter slice (this allows
+non-continuous internal representation).
This is a lower level function. Most operations are done with other +functions.
+use bytes::Buf;
+
+let mut buf = &b"hello world"[..];
+
+assert_eq!(buf.chunk(), &b"hello world"[..]);
+
+buf.advance(6);
+
+assert_eq!(buf.chunk(), &b"world"[..]);
This function should never panic. Once the end of the buffer is reached,
+i.e., Buf::remaining
returns 0, calls to chunk()
should return an
+empty slice.
Advance the internal cursor of the Buf
+The next call to chunk()
will return a slice starting cnt
bytes
+further into the underlying buffer.
use bytes::Buf;
+
+let mut buf = &b"hello world"[..];
+
+assert_eq!(buf.chunk(), &b"hello world"[..]);
+
+buf.advance(6);
+
+assert_eq!(buf.chunk(), &b"world"[..]);
This function may panic if cnt > self.remaining()
.
It is recommended for implementations of advance
to panic if cnt > self.remaining()
. If the implementation does not panic, the call must
+behave as if cnt == self.remaining()
.
A call with cnt == 0
should never panic and be a no-op.
std
only.Fills dst
with potentially multiple slices starting at self
’s
+current position.
If the Buf
is backed by disjoint slices of bytes, chunk_vectored
enables
+fetching more than one slice at once. dst
is a slice of IoSlice
+references, enabling the slice to be directly used with writev
+without any further conversion. The sum of the lengths of all the
+buffers in dst
will be less than or equal to Buf::remaining()
.
The entries in dst
will be overwritten, but the data contained by
+the slices will not be modified. If chunk_vectored
does not fill every
+entry in dst
, then dst
is guaranteed to contain all remaining slices
+in `self.
This is a lower level function. Most operations are done with other +functions.
+This function should never panic. Once the end of the buffer is reached,
+i.e., Buf::remaining
returns 0, calls to chunk_vectored
must return 0
+without mutating dst
.
Implementations should also take care to properly handle being called
+with dst
being a zero length slice.
Returns true if there are any more bytes to consume
+This is equivalent to self.remaining() != 0
.
use bytes::Buf;
+
+let mut buf = &b"a"[..];
+
+assert!(buf.has_remaining());
+
+buf.get_u8();
+
+assert!(!buf.has_remaining());
Copies bytes from self
into dst
.
The cursor is advanced by the number of bytes copied. self
must have
+enough remaining bytes to fill dst
.
use bytes::Buf;
+
+let mut buf = &b"hello world"[..];
+let mut dst = [0; 5];
+
+buf.copy_to_slice(&mut dst);
+assert_eq!(&b"hello"[..], &dst);
+assert_eq!(6, buf.remaining());
This function panics if self.remaining() < dst.len()
.
Gets an unsigned 16 bit integer from self
in native-endian byte order.
The current position is advanced by 2.
+use bytes::Buf;
+
+let mut buf: &[u8] = match cfg!(target_endian = "big") {
+ true => b"\x08\x09 hello",
+ false => b"\x09\x08 hello",
+};
+assert_eq!(0x0809, buf.get_u16_ne());
This function panics if there is not enough remaining data in self
.
Gets a signed 16 bit integer from self
in native-endian byte order.
The current position is advanced by 2.
+use bytes::Buf;
+
+let mut buf: &[u8] = match cfg!(target_endian = "big") {
+ true => b"\x08\x09 hello",
+ false => b"\x09\x08 hello",
+};
+assert_eq!(0x0809, buf.get_i16_ne());
This function panics if there is not enough remaining data in self
.
Gets an unsigned 32 bit integer from self
in the big-endian byte order.
The current position is advanced by 4.
+use bytes::Buf;
+
+let mut buf = &b"\x08\x09\xA0\xA1 hello"[..];
+assert_eq!(0x0809A0A1, buf.get_u32());
This function panics if there is not enough remaining data in self
.
Gets an unsigned 32 bit integer from self
in the little-endian byte order.
The current position is advanced by 4.
+use bytes::Buf;
+
+let mut buf = &b"\xA1\xA0\x09\x08 hello"[..];
+assert_eq!(0x0809A0A1, buf.get_u32_le());
This function panics if there is not enough remaining data in self
.
Gets an unsigned 32 bit integer from self
in native-endian byte order.
The current position is advanced by 4.
+use bytes::Buf;
+
+let mut buf: &[u8] = match cfg!(target_endian = "big") {
+ true => b"\x08\x09\xA0\xA1 hello",
+ false => b"\xA1\xA0\x09\x08 hello",
+};
+assert_eq!(0x0809A0A1, buf.get_u32_ne());
This function panics if there is not enough remaining data in self
.
Gets a signed 32 bit integer from self
in little-endian byte order.
The current position is advanced by 4.
+use bytes::Buf;
+
+let mut buf = &b"\xA1\xA0\x09\x08 hello"[..];
+assert_eq!(0x0809A0A1, buf.get_i32_le());
This function panics if there is not enough remaining data in self
.
Gets a signed 32 bit integer from self
in native-endian byte order.
The current position is advanced by 4.
+use bytes::Buf;
+
+let mut buf: &[u8] = match cfg!(target_endian = "big") {
+ true => b"\x08\x09\xA0\xA1 hello",
+ false => b"\xA1\xA0\x09\x08 hello",
+};
+assert_eq!(0x0809A0A1, buf.get_i32_ne());
This function panics if there is not enough remaining data in self
.
Gets an unsigned 64 bit integer from self
in big-endian byte order.
The current position is advanced by 8.
+use bytes::Buf;
+
+let mut buf = &b"\x01\x02\x03\x04\x05\x06\x07\x08 hello"[..];
+assert_eq!(0x0102030405060708, buf.get_u64());
This function panics if there is not enough remaining data in self
.
Gets an unsigned 64 bit integer from self
in little-endian byte order.
The current position is advanced by 8.
+use bytes::Buf;
+
+let mut buf = &b"\x08\x07\x06\x05\x04\x03\x02\x01 hello"[..];
+assert_eq!(0x0102030405060708, buf.get_u64_le());
This function panics if there is not enough remaining data in self
.
Gets an unsigned 64 bit integer from self
in native-endian byte order.
The current position is advanced by 8.
+use bytes::Buf;
+
+let mut buf: &[u8] = match cfg!(target_endian = "big") {
+ true => b"\x01\x02\x03\x04\x05\x06\x07\x08 hello",
+ false => b"\x08\x07\x06\x05\x04\x03\x02\x01 hello",
+};
+assert_eq!(0x0102030405060708, buf.get_u64_ne());
This function panics if there is not enough remaining data in self
.
Gets a signed 64 bit integer from self
in big-endian byte order.
The current position is advanced by 8.
+use bytes::Buf;
+
+let mut buf = &b"\x01\x02\x03\x04\x05\x06\x07\x08 hello"[..];
+assert_eq!(0x0102030405060708, buf.get_i64());
This function panics if there is not enough remaining data in self
.
Gets a signed 64 bit integer from self
in little-endian byte order.
The current position is advanced by 8.
+use bytes::Buf;
+
+let mut buf = &b"\x08\x07\x06\x05\x04\x03\x02\x01 hello"[..];
+assert_eq!(0x0102030405060708, buf.get_i64_le());
This function panics if there is not enough remaining data in self
.
Gets a signed 64 bit integer from self
in native-endian byte order.
The current position is advanced by 8.
+use bytes::Buf;
+
+let mut buf: &[u8] = match cfg!(target_endian = "big") {
+ true => b"\x01\x02\x03\x04\x05\x06\x07\x08 hello",
+ false => b"\x08\x07\x06\x05\x04\x03\x02\x01 hello",
+};
+assert_eq!(0x0102030405060708, buf.get_i64_ne());
This function panics if there is not enough remaining data in self
.
Gets an unsigned 128 bit integer from self
in big-endian byte order.
The current position is advanced by 16.
+use bytes::Buf;
+
+let mut buf = &b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16 hello"[..];
+assert_eq!(0x01020304050607080910111213141516, buf.get_u128());
This function panics if there is not enough remaining data in self
.
Gets an unsigned 128 bit integer from self
in little-endian byte order.
The current position is advanced by 16.
+use bytes::Buf;
+
+let mut buf = &b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01 hello"[..];
+assert_eq!(0x01020304050607080910111213141516, buf.get_u128_le());
This function panics if there is not enough remaining data in self
.
Gets an unsigned 128 bit integer from self
in native-endian byte order.
The current position is advanced by 16.
+use bytes::Buf;
+
+let mut buf: &[u8] = match cfg!(target_endian = "big") {
+ true => b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16 hello",
+ false => b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01 hello",
+};
+assert_eq!(0x01020304050607080910111213141516, buf.get_u128_ne());
This function panics if there is not enough remaining data in self
.
Gets a signed 128 bit integer from self
in big-endian byte order.
The current position is advanced by 16.
+use bytes::Buf;
+
+let mut buf = &b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16 hello"[..];
+assert_eq!(0x01020304050607080910111213141516, buf.get_i128());
This function panics if there is not enough remaining data in self
.
Gets a signed 128 bit integer from self
in little-endian byte order.
The current position is advanced by 16.
+use bytes::Buf;
+
+let mut buf = &b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01 hello"[..];
+assert_eq!(0x01020304050607080910111213141516, buf.get_i128_le());
This function panics if there is not enough remaining data in self
.
Gets a signed 128 bit integer from self
in native-endian byte order.
The current position is advanced by 16.
+use bytes::Buf;
+
+let mut buf: &[u8] = match cfg!(target_endian = "big") {
+ true => b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16 hello",
+ false => b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01 hello",
+};
+assert_eq!(0x01020304050607080910111213141516, buf.get_i128_ne());
This function panics if there is not enough remaining data in self
.
Gets an unsigned n-byte integer from self
in big-endian byte order.
The current position is advanced by nbytes
.
use bytes::Buf;
+
+let mut buf = &b"\x01\x02\x03 hello"[..];
+assert_eq!(0x010203, buf.get_uint(3));
This function panics if there is not enough remaining data in self
.
Gets an unsigned n-byte integer from self
in little-endian byte order.
The current position is advanced by nbytes
.
use bytes::Buf;
+
+let mut buf = &b"\x03\x02\x01 hello"[..];
+assert_eq!(0x010203, buf.get_uint_le(3));
This function panics if there is not enough remaining data in self
.
Gets an unsigned n-byte integer from self
in native-endian byte order.
The current position is advanced by nbytes
.
use bytes::Buf;
+
+let mut buf: &[u8] = match cfg!(target_endian = "big") {
+ true => b"\x01\x02\x03 hello",
+ false => b"\x03\x02\x01 hello",
+};
+assert_eq!(0x010203, buf.get_uint_ne(3));
This function panics if there is not enough remaining data in self
, or
+if nbytes
is greater than 8.
Gets a signed n-byte integer from self
in big-endian byte order.
The current position is advanced by nbytes
.
use bytes::Buf;
+
+let mut buf = &b"\x01\x02\x03 hello"[..];
+assert_eq!(0x010203, buf.get_int(3));
This function panics if there is not enough remaining data in self
, or
+if nbytes
is greater than 8.
Gets a signed n-byte integer from self
in little-endian byte order.
The current position is advanced by nbytes
.
use bytes::Buf;
+
+let mut buf = &b"\x03\x02\x01 hello"[..];
+assert_eq!(0x010203, buf.get_int_le(3));
This function panics if there is not enough remaining data in self
, or
+if nbytes
is greater than 8.
Gets a signed n-byte integer from self
in native-endian byte order.
The current position is advanced by nbytes
.
use bytes::Buf;
+
+let mut buf: &[u8] = match cfg!(target_endian = "big") {
+ true => b"\x01\x02\x03 hello",
+ false => b"\x03\x02\x01 hello",
+};
+assert_eq!(0x010203, buf.get_int_ne(3));
This function panics if there is not enough remaining data in self
, or
+if nbytes
is greater than 8.
Gets an IEEE754 single-precision (4 bytes) floating point number from
+self
in big-endian byte order.
The current position is advanced by 4.
+use bytes::Buf;
+
+let mut buf = &b"\x3F\x99\x99\x9A hello"[..];
+assert_eq!(1.2f32, buf.get_f32());
This function panics if there is not enough remaining data in self
.
Gets an IEEE754 single-precision (4 bytes) floating point number from
+self
in little-endian byte order.
The current position is advanced by 4.
+use bytes::Buf;
+
+let mut buf = &b"\x9A\x99\x99\x3F hello"[..];
+assert_eq!(1.2f32, buf.get_f32_le());
This function panics if there is not enough remaining data in self
.
Gets an IEEE754 single-precision (4 bytes) floating point number from
+self
in native-endian byte order.
The current position is advanced by 4.
+use bytes::Buf;
+
+let mut buf: &[u8] = match cfg!(target_endian = "big") {
+ true => b"\x3F\x99\x99\x9A hello",
+ false => b"\x9A\x99\x99\x3F hello",
+};
+assert_eq!(1.2f32, buf.get_f32_ne());
This function panics if there is not enough remaining data in self
.
Gets an IEEE754 double-precision (8 bytes) floating point number from
+self
in big-endian byte order.
The current position is advanced by 8.
+use bytes::Buf;
+
+let mut buf = &b"\x3F\xF3\x33\x33\x33\x33\x33\x33 hello"[..];
+assert_eq!(1.2f64, buf.get_f64());
This function panics if there is not enough remaining data in self
.
Gets an IEEE754 double-precision (8 bytes) floating point number from
+self
in little-endian byte order.
The current position is advanced by 8.
+use bytes::Buf;
+
+let mut buf = &b"\x33\x33\x33\x33\x33\x33\xF3\x3F hello"[..];
+assert_eq!(1.2f64, buf.get_f64_le());
This function panics if there is not enough remaining data in self
.
Gets an IEEE754 double-precision (8 bytes) floating point number from
+self
in native-endian byte order.
The current position is advanced by 8.
+use bytes::Buf;
+
+let mut buf: &[u8] = match cfg!(target_endian = "big") {
+ true => b"\x3F\xF3\x33\x33\x33\x33\x33\x33 hello",
+ false => b"\x33\x33\x33\x33\x33\x33\xF3\x3F hello",
+};
+assert_eq!(1.2f64, buf.get_f64_ne());
This function panics if there is not enough remaining data in self
.
Consumes len
bytes inside self and returns new instance of Bytes
+with this data.
This function may be optimized by the underlying type to avoid actual
+copies. For example, Bytes
implementation will do a shallow copy
+(ref-count increment).
use bytes::Buf;
+
+let bytes = (&b"hello world"[..]).copy_to_bytes(5);
+assert_eq!(&bytes[..], &b"hello"[..]);
Creates an adaptor which will read at most limit
bytes from self
.
This function returns a new instance of Buf
which will read at most
+limit
bytes.
use bytes::{Buf, BufMut};
+
+let mut buf = b"hello world"[..].take(5);
+let mut dst = vec![];
+
+dst.put(&mut buf);
+assert_eq!(dst, b"hello");
+
+let mut buf = buf.into_inner();
+dst.clear();
+dst.put(&mut buf);
+assert_eq!(dst, b" world");
Creates an adaptor which will chain this buffer with another.
+The returned Buf
instance will first consume all bytes from self
.
+Afterwards the output is equivalent to the output of next.
use bytes::Buf;
+
+let mut chain = b"hello "[..].chain(&b"world"[..]);
+
+let full = chain.copy_to_bytes(11);
+assert_eq!(full.chunk(), b"hello world");
std
only.Creates an adaptor which implements the Read
trait for self
.
This function returns a new value which implements Read
by adapting
+the Read
trait functions to the Buf
trait functions. Given that
+Buf
operations are infallible, none of the Read
functions will
+return with Err
.
use bytes::{Bytes, Buf};
+use std::io::Read;
+
+let buf = Bytes::from("hello world");
+
+let mut reader = buf.reader();
+let mut dst = [0; 1024];
+
+let num = reader.read(&mut dst).unwrap();
+
+assert_eq!(11, num);
+assert_eq!(&dst[..11], &b"hello world"[..]);
std
only.std
only.pub unsafe trait BufMut {
+Show 48 methods
fn remaining_mut(&self) -> usize;
+ unsafe fn advance_mut(&mut self, cnt: usize);
+ fn chunk_mut(&mut self) -> &mut UninitSlice;
+
+ fn has_remaining_mut(&self) -> bool { ... }
+ fn put<T: Buf>(&mut self, src: T)
where
Self: Sized,
+ { ... }
+ fn put_slice(&mut self, src: &[u8]) { ... }
+ fn put_bytes(&mut self, val: u8, cnt: usize) { ... }
+ fn put_u8(&mut self, n: u8) { ... }
+ fn put_i8(&mut self, n: i8) { ... }
+ fn put_u16(&mut self, n: u16) { ... }
+ fn put_u16_le(&mut self, n: u16) { ... }
+ fn put_u16_ne(&mut self, n: u16) { ... }
+ fn put_i16(&mut self, n: i16) { ... }
+ fn put_i16_le(&mut self, n: i16) { ... }
+ fn put_i16_ne(&mut self, n: i16) { ... }
+ fn put_u32(&mut self, n: u32) { ... }
+ fn put_u32_le(&mut self, n: u32) { ... }
+ fn put_u32_ne(&mut self, n: u32) { ... }
+ fn put_i32(&mut self, n: i32) { ... }
+ fn put_i32_le(&mut self, n: i32) { ... }
+ fn put_i32_ne(&mut self, n: i32) { ... }
+ fn put_u64(&mut self, n: u64) { ... }
+ fn put_u64_le(&mut self, n: u64) { ... }
+ fn put_u64_ne(&mut self, n: u64) { ... }
+ fn put_i64(&mut self, n: i64) { ... }
+ fn put_i64_le(&mut self, n: i64) { ... }
+ fn put_i64_ne(&mut self, n: i64) { ... }
+ fn put_u128(&mut self, n: u128) { ... }
+ fn put_u128_le(&mut self, n: u128) { ... }
+ fn put_u128_ne(&mut self, n: u128) { ... }
+ fn put_i128(&mut self, n: i128) { ... }
+ fn put_i128_le(&mut self, n: i128) { ... }
+ fn put_i128_ne(&mut self, n: i128) { ... }
+ fn put_uint(&mut self, n: u64, nbytes: usize) { ... }
+ fn put_uint_le(&mut self, n: u64, nbytes: usize) { ... }
+ fn put_uint_ne(&mut self, n: u64, nbytes: usize) { ... }
+ fn put_int(&mut self, n: i64, nbytes: usize) { ... }
+ fn put_int_le(&mut self, n: i64, nbytes: usize) { ... }
+ fn put_int_ne(&mut self, n: i64, nbytes: usize) { ... }
+ fn put_f32(&mut self, n: f32) { ... }
+ fn put_f32_le(&mut self, n: f32) { ... }
+ fn put_f32_ne(&mut self, n: f32) { ... }
+ fn put_f64(&mut self, n: f64) { ... }
+ fn put_f64_le(&mut self, n: f64) { ... }
+ fn put_f64_ne(&mut self, n: f64) { ... }
+ fn limit(self, limit: usize) -> Limit<Self>
where
Self: Sized,
+ { ... }
+ fn writer(self) -> Writer<Self>ⓘ
where
Self: Sized,
+ { ... }
+ fn chain_mut<U: BufMut>(self, next: U) -> Chain<Self, U>
where
Self: Sized,
+ { ... }
+}
A trait for values that provide sequential write access to bytes.
+Write bytes to a buffer
+A buffer stores bytes in memory such that write operations are infallible.
+The underlying storage may or may not be in contiguous memory. A BufMut
+value is a cursor into the buffer. Writing to BufMut
advances the cursor
+position.
The simplest BufMut
is a Vec<u8>
.
use bytes::BufMut;
+
+let mut buf = vec![];
+
+buf.put(&b"hello world"[..]);
+
+assert_eq!(buf, b"hello world");
Returns the number of bytes that can be written from the current +position until the end of the buffer is reached.
+This value is greater than or equal to the length of the slice returned
+by chunk_mut()
.
Writing to a BufMut
may involve allocating more memory on the fly.
+Implementations may fail before reaching the number of bytes indicated
+by this method if they encounter an allocation failure.
use bytes::BufMut;
+
+let mut dst = [0; 10];
+let mut buf = &mut dst[..];
+
+let original_remaining = buf.remaining_mut();
+buf.put(&b"hello"[..]);
+
+assert_eq!(original_remaining - 5, buf.remaining_mut());
Implementations of remaining_mut
should ensure that the return value
+does not change unless a call is made to advance_mut
or any other
+function that is documented to change the BufMut
’s current position.
remaining_mut
may return value smaller than actual available space.
Advance the internal cursor of the BufMut
+The next call to chunk_mut
will return a slice starting cnt
bytes
+further into the underlying buffer.
The caller must ensure that the next cnt
bytes of chunk
are
+initialized.
use bytes::BufMut;
+
+let mut buf = Vec::with_capacity(16);
+
+// Write some data
+buf.chunk_mut()[0..2].copy_from_slice(b"he");
+unsafe { buf.advance_mut(2) };
+
+// write more bytes
+buf.chunk_mut()[0..3].copy_from_slice(b"llo");
+
+unsafe { buf.advance_mut(3); }
+
+assert_eq!(5, buf.len());
+assert_eq!(buf, b"hello");
This function may panic if cnt > self.remaining_mut()
.
It is recommended for implementations of advance_mut
to panic if
+cnt > self.remaining_mut()
. If the implementation does not panic,
+the call must behave as if cnt == self.remaining_mut()
.
A call with cnt == 0
should never panic and be a no-op.
Returns a mutable slice starting at the current BufMut position and of
+length between 0 and BufMut::remaining_mut()
. Note that this can be shorter than the
+whole remainder of the buffer (this allows non-continuous implementation).
This is a lower level function. Most operations are done with other +functions.
+The returned byte slice may represent uninitialized memory.
+use bytes::BufMut;
+
+let mut buf = Vec::with_capacity(16);
+
+unsafe {
+ // MaybeUninit::as_mut_ptr
+ buf.chunk_mut()[0..].as_mut_ptr().write(b'h');
+ buf.chunk_mut()[1..].as_mut_ptr().write(b'e');
+
+ buf.advance_mut(2);
+
+ buf.chunk_mut()[0..].as_mut_ptr().write(b'l');
+ buf.chunk_mut()[1..].as_mut_ptr().write(b'l');
+ buf.chunk_mut()[2..].as_mut_ptr().write(b'o');
+
+ buf.advance_mut(3);
+}
+
+assert_eq!(5, buf.len());
+assert_eq!(buf, b"hello");
This function should never panic. chunk_mut
should return an empty
+slice if and only if remaining_mut()
returns 0. In other words,
+chunk_mut()
returning an empty slice implies that remaining_mut()
will
+return 0 and remaining_mut()
returning 0 implies that chunk_mut()
will
+return an empty slice.
This function may trigger an out-of-memory abort if it tries to allocate +memory and fails to do so.
+Returns true if there is space in self
for more bytes.
This is equivalent to self.remaining_mut() != 0
.
use bytes::BufMut;
+
+let mut dst = [0; 5];
+let mut buf = &mut dst[..];
+
+assert!(buf.has_remaining_mut());
+
+buf.put(&b"hello"[..]);
+
+assert!(!buf.has_remaining_mut());
Transfer bytes into self
from src
and advance the cursor by the
+number of bytes written.
use bytes::BufMut;
+
+let mut buf = vec![];
+
+buf.put_u8(b'h');
+buf.put(&b"ello"[..]);
+buf.put(&b" world"[..]);
+
+assert_eq!(buf, b"hello world");
Panics if self
does not have enough capacity to contain src
.
Transfer bytes into self
from src
and advance the cursor by the
+number of bytes written.
self
must have enough remaining capacity to contain all of src
.
use bytes::BufMut;
+
+let mut dst = [0; 6];
+
+{
+ let mut buf = &mut dst[..];
+ buf.put_slice(b"hello");
+
+ assert_eq!(1, buf.remaining_mut());
+}
+
+assert_eq!(b"hello\0", &dst);
Put cnt
bytes val
into self
.
Logically equivalent to calling self.put_u8(val)
cnt
times, but may work faster.
self
must have at least cnt
remaining capacity.
use bytes::BufMut;
+
+let mut dst = [0; 6];
+
+{
+ let mut buf = &mut dst[..];
+ buf.put_bytes(b'a', 4);
+
+ assert_eq!(2, buf.remaining_mut());
+}
+
+assert_eq!(b"aaaa\0\0", &dst);
This function panics if there is not enough remaining capacity in
+self
.
Writes an unsigned 16 bit integer to self
in little-endian byte order.
The current position is advanced by 2.
+use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_u16_le(0x0809);
+assert_eq!(buf, b"\x09\x08");
This function panics if there is not enough remaining capacity in
+self
.
Writes an unsigned 16 bit integer to self
in native-endian byte order.
The current position is advanced by 2.
+use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_u16_ne(0x0809);
+if cfg!(target_endian = "big") {
+ assert_eq!(buf, b"\x08\x09");
+} else {
+ assert_eq!(buf, b"\x09\x08");
+}
This function panics if there is not enough remaining capacity in
+self
.
Writes a signed 16 bit integer to self
in little-endian byte order.
The current position is advanced by 2.
+use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_i16_le(0x0809);
+assert_eq!(buf, b"\x09\x08");
This function panics if there is not enough remaining capacity in
+self
.
Writes a signed 16 bit integer to self
in native-endian byte order.
The current position is advanced by 2.
+use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_i16_ne(0x0809);
+if cfg!(target_endian = "big") {
+ assert_eq!(buf, b"\x08\x09");
+} else {
+ assert_eq!(buf, b"\x09\x08");
+}
This function panics if there is not enough remaining capacity in
+self
.
Writes an unsigned 32 bit integer to self
in big-endian byte order.
The current position is advanced by 4.
+use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_u32(0x0809A0A1);
+assert_eq!(buf, b"\x08\x09\xA0\xA1");
This function panics if there is not enough remaining capacity in
+self
.
Writes an unsigned 32 bit integer to self
in little-endian byte order.
The current position is advanced by 4.
+use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_u32_le(0x0809A0A1);
+assert_eq!(buf, b"\xA1\xA0\x09\x08");
This function panics if there is not enough remaining capacity in
+self
.
Writes an unsigned 32 bit integer to self
in native-endian byte order.
The current position is advanced by 4.
+use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_u32_ne(0x0809A0A1);
+if cfg!(target_endian = "big") {
+ assert_eq!(buf, b"\x08\x09\xA0\xA1");
+} else {
+ assert_eq!(buf, b"\xA1\xA0\x09\x08");
+}
This function panics if there is not enough remaining capacity in
+self
.
Writes a signed 32 bit integer to self
in big-endian byte order.
The current position is advanced by 4.
+use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_i32(0x0809A0A1);
+assert_eq!(buf, b"\x08\x09\xA0\xA1");
This function panics if there is not enough remaining capacity in
+self
.
Writes a signed 32 bit integer to self
in little-endian byte order.
The current position is advanced by 4.
+use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_i32_le(0x0809A0A1);
+assert_eq!(buf, b"\xA1\xA0\x09\x08");
This function panics if there is not enough remaining capacity in
+self
.
Writes a signed 32 bit integer to self
in native-endian byte order.
The current position is advanced by 4.
+use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_i32_ne(0x0809A0A1);
+if cfg!(target_endian = "big") {
+ assert_eq!(buf, b"\x08\x09\xA0\xA1");
+} else {
+ assert_eq!(buf, b"\xA1\xA0\x09\x08");
+}
This function panics if there is not enough remaining capacity in
+self
.
Writes an unsigned 64 bit integer to self
in the big-endian byte order.
The current position is advanced by 8.
+use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_u64(0x0102030405060708);
+assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08");
This function panics if there is not enough remaining capacity in
+self
.
Writes an unsigned 64 bit integer to self
in little-endian byte order.
The current position is advanced by 8.
+use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_u64_le(0x0102030405060708);
+assert_eq!(buf, b"\x08\x07\x06\x05\x04\x03\x02\x01");
This function panics if there is not enough remaining capacity in
+self
.
Writes an unsigned 64 bit integer to self
in native-endian byte order.
The current position is advanced by 8.
+use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_u64_ne(0x0102030405060708);
+if cfg!(target_endian = "big") {
+ assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08");
+} else {
+ assert_eq!(buf, b"\x08\x07\x06\x05\x04\x03\x02\x01");
+}
This function panics if there is not enough remaining capacity in
+self
.
Writes a signed 64 bit integer to self
in the big-endian byte order.
The current position is advanced by 8.
+use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_i64(0x0102030405060708);
+assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08");
This function panics if there is not enough remaining capacity in
+self
.
Writes a signed 64 bit integer to self
in little-endian byte order.
The current position is advanced by 8.
+use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_i64_le(0x0102030405060708);
+assert_eq!(buf, b"\x08\x07\x06\x05\x04\x03\x02\x01");
This function panics if there is not enough remaining capacity in
+self
.
Writes a signed 64 bit integer to self
in native-endian byte order.
The current position is advanced by 8.
+use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_i64_ne(0x0102030405060708);
+if cfg!(target_endian = "big") {
+ assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08");
+} else {
+ assert_eq!(buf, b"\x08\x07\x06\x05\x04\x03\x02\x01");
+}
This function panics if there is not enough remaining capacity in
+self
.
Writes an unsigned 128 bit integer to self
in the big-endian byte order.
The current position is advanced by 16.
+use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_u128(0x01020304050607080910111213141516);
+assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16");
This function panics if there is not enough remaining capacity in
+self
.
Writes an unsigned 128 bit integer to self
in little-endian byte order.
The current position is advanced by 16.
+use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_u128_le(0x01020304050607080910111213141516);
+assert_eq!(buf, b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01");
This function panics if there is not enough remaining capacity in
+self
.
Writes an unsigned 128 bit integer to self
in native-endian byte order.
The current position is advanced by 16.
+use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_u128_ne(0x01020304050607080910111213141516);
+if cfg!(target_endian = "big") {
+ assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16");
+} else {
+ assert_eq!(buf, b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01");
+}
This function panics if there is not enough remaining capacity in
+self
.
Writes a signed 128 bit integer to self
in the big-endian byte order.
The current position is advanced by 16.
+use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_i128(0x01020304050607080910111213141516);
+assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16");
This function panics if there is not enough remaining capacity in
+self
.
Writes a signed 128 bit integer to self
in little-endian byte order.
The current position is advanced by 16.
+use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_i128_le(0x01020304050607080910111213141516);
+assert_eq!(buf, b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01");
This function panics if there is not enough remaining capacity in
+self
.
Writes a signed 128 bit integer to self
in native-endian byte order.
The current position is advanced by 16.
+use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_i128_ne(0x01020304050607080910111213141516);
+if cfg!(target_endian = "big") {
+ assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16");
+} else {
+ assert_eq!(buf, b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01");
+}
This function panics if there is not enough remaining capacity in
+self
.
Writes an unsigned n-byte integer to self
in big-endian byte order.
The current position is advanced by nbytes
.
use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_uint(0x010203, 3);
+assert_eq!(buf, b"\x01\x02\x03");
This function panics if there is not enough remaining capacity in
+self
or if nbytes
is greater than 8.
Writes an unsigned n-byte integer to self
in the little-endian byte order.
The current position is advanced by nbytes
.
use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_uint_le(0x010203, 3);
+assert_eq!(buf, b"\x03\x02\x01");
This function panics if there is not enough remaining capacity in
+self
or if nbytes
is greater than 8.
Writes an unsigned n-byte integer to self
in the native-endian byte order.
The current position is advanced by nbytes
.
use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_uint_ne(0x010203, 3);
+if cfg!(target_endian = "big") {
+ assert_eq!(buf, b"\x01\x02\x03");
+} else {
+ assert_eq!(buf, b"\x03\x02\x01");
+}
This function panics if there is not enough remaining capacity in
+self
or if nbytes
is greater than 8.
Writes low nbytes
of a signed integer to self
in big-endian byte order.
The current position is advanced by nbytes
.
use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_int(0x0504010203, 3);
+assert_eq!(buf, b"\x01\x02\x03");
This function panics if there is not enough remaining capacity in
+self
or if nbytes
is greater than 8.
Writes low nbytes
of a signed integer to self
in little-endian byte order.
The current position is advanced by nbytes
.
use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_int_le(0x0504010203, 3);
+assert_eq!(buf, b"\x03\x02\x01");
This function panics if there is not enough remaining capacity in
+self
or if nbytes
is greater than 8.
Writes low nbytes
of a signed integer to self
in native-endian byte order.
The current position is advanced by nbytes
.
use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_int_ne(0x010203, 3);
+if cfg!(target_endian = "big") {
+ assert_eq!(buf, b"\x01\x02\x03");
+} else {
+ assert_eq!(buf, b"\x03\x02\x01");
+}
This function panics if there is not enough remaining capacity in
+self
or if nbytes
is greater than 8.
Writes an IEEE754 single-precision (4 bytes) floating point number to
+self
in big-endian byte order.
The current position is advanced by 4.
+use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_f32(1.2f32);
+assert_eq!(buf, b"\x3F\x99\x99\x9A");
This function panics if there is not enough remaining capacity in
+self
.
Writes an IEEE754 single-precision (4 bytes) floating point number to
+self
in little-endian byte order.
The current position is advanced by 4.
+use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_f32_le(1.2f32);
+assert_eq!(buf, b"\x9A\x99\x99\x3F");
This function panics if there is not enough remaining capacity in
+self
.
Writes an IEEE754 single-precision (4 bytes) floating point number to
+self
in native-endian byte order.
The current position is advanced by 4.
+use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_f32_ne(1.2f32);
+if cfg!(target_endian = "big") {
+ assert_eq!(buf, b"\x3F\x99\x99\x9A");
+} else {
+ assert_eq!(buf, b"\x9A\x99\x99\x3F");
+}
This function panics if there is not enough remaining capacity in
+self
.
Writes an IEEE754 double-precision (8 bytes) floating point number to
+self
in big-endian byte order.
The current position is advanced by 8.
+use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_f64(1.2f64);
+assert_eq!(buf, b"\x3F\xF3\x33\x33\x33\x33\x33\x33");
This function panics if there is not enough remaining capacity in
+self
.
Writes an IEEE754 double-precision (8 bytes) floating point number to
+self
in little-endian byte order.
The current position is advanced by 8.
+use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_f64_le(1.2f64);
+assert_eq!(buf, b"\x33\x33\x33\x33\x33\x33\xF3\x3F");
This function panics if there is not enough remaining capacity in
+self
.
Writes an IEEE754 double-precision (8 bytes) floating point number to
+self
in native-endian byte order.
The current position is advanced by 8.
+use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_f64_ne(1.2f64);
+if cfg!(target_endian = "big") {
+ assert_eq!(buf, b"\x3F\xF3\x33\x33\x33\x33\x33\x33");
+} else {
+ assert_eq!(buf, b"\x33\x33\x33\x33\x33\x33\xF3\x3F");
+}
This function panics if there is not enough remaining capacity in
+self
.
Creates an adaptor which can write at most limit
bytes to self
.
use bytes::BufMut;
+
+let arr = &mut [0u8; 128][..];
+assert_eq!(arr.remaining_mut(), 128);
+
+let dst = arr.limit(10);
+assert_eq!(dst.remaining_mut(), 10);
std
only.Creates an adaptor which implements the Write
trait for self
.
This function returns a new value which implements Write
by adapting
+the Write
trait functions to the BufMut
trait functions. Given that
+BufMut
operations are infallible, none of the Write
functions will
+return with Err
.
use bytes::BufMut;
+use std::io::Write;
+
+let mut buf = vec![].writer();
+
+let num = buf.write(&b"hello world"[..]).unwrap();
+assert_eq!(11, num);
+
+let buf = buf.into_inner();
+
+assert_eq!(*buf, b"hello world"[..]);
Creates an adapter which will chain this buffer with another.
+The returned BufMut
instance will first write to all bytes from
+self
. Afterwards, it will write to next
.
use bytes::BufMut;
+
+let mut a = [0u8; 5];
+let mut b = [0u8; 6];
+
+let mut chain = (&mut a[..]).chain_mut(&mut b[..]);
+
+chain.put_slice(b"hello world");
+
+assert_eq!(&a[..], b"hello");
+assert_eq!(&b[..], b" world");
Redirecting to ../../../bytes/buf/struct.UninitSlice.html...
+ + + \ No newline at end of file diff --git a/bytes/buf/writer/struct.Writer.html b/bytes/buf/writer/struct.Writer.html new file mode 100644 index 000000000..3a3ba29ac --- /dev/null +++ b/bytes/buf/writer/struct.Writer.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../../bytes/buf/struct.Writer.html...
+ + + \ No newline at end of file diff --git a/bytes/bytes/struct.Bytes.html b/bytes/bytes/struct.Bytes.html new file mode 100644 index 000000000..4b3bc3a80 --- /dev/null +++ b/bytes/bytes/struct.Bytes.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../bytes/struct.Bytes.html...
+ + + \ No newline at end of file diff --git a/bytes/bytes_mut/struct.BytesMut.html b/bytes/bytes_mut/struct.BytesMut.html new file mode 100644 index 000000000..45ba8f32e --- /dev/null +++ b/bytes/bytes_mut/struct.BytesMut.html @@ -0,0 +1,11 @@ + + + + +Redirecting to ../../bytes/struct.BytesMut.html...
+ + + \ No newline at end of file diff --git a/bytes/index.html b/bytes/index.html new file mode 100644 index 000000000..331e3cb88 --- /dev/null +++ b/bytes/index.html @@ -0,0 +1,50 @@ +Provides abstractions for working with bytes.
+The bytes
crate provides an efficient byte buffer structure
+(Bytes
) and traits for working with buffer
+implementations (Buf
, BufMut
).
Bytes
Bytes
is an efficient container for storing and operating on contiguous
+slices of memory. It is intended for use primarily in networking code, but
+could have applications elsewhere as well.
Bytes
values facilitate zero-copy network programming by allowing multiple
+Bytes
objects to point to the same underlying memory. This is managed by
+using a reference count to track when the memory is no longer needed and can
+be freed.
A Bytes
handle can be created directly from an existing byte store (such as &[u8]
+or Vec<u8>
), but usually a BytesMut
is used first and written to. For
+example:
use bytes::{BytesMut, BufMut};
+
+let mut buf = BytesMut::with_capacity(1024);
+buf.put(&b"hello world"[..]);
+buf.put_u16(1234);
+
+let a = buf.split();
+assert_eq!(a, b"hello world\x04\xD2"[..]);
+
+buf.put(&b"goodbye world"[..]);
+
+let b = buf.split();
+assert_eq!(b, b"goodbye world"[..]);
+
+assert_eq!(buf.capacity(), 998);
In the above example, only a single buffer of 1024 is allocated. The handles
+a
and b
will share the underlying buffer and maintain indices tracking
+the view into the buffer represented by the handle.
See the struct docs for more details.
+Buf
, BufMut
These two traits provide read and write access to buffers. The underlying
+storage may or may not be in contiguous memory. For example, Bytes
is a
+buffer that guarantees contiguous memory, but a rope stores the bytes in
+disjoint chunks. Buf
and BufMut
maintain cursors tracking the current
+position in the underlying byte storage. When bytes are read or written, the
+cursor is advanced.
Read
and Write
At first glance, it may seem that Buf
and BufMut
overlap in
+functionality with std::io::Read
and std::io::Write
. However, they
+serve different purposes. A buffer is the value that is provided as an
+argument to Read::read
and Write::write
. Read
and Write
may then
+perform a syscall, which has the potential of failing. Operations on Buf
+and BufMut
are infallible.
pub struct Bytes { /* private fields */ }
A cheaply cloneable and sliceable chunk of contiguous memory.
+Bytes
is an efficient container for storing and operating on contiguous
+slices of memory. It is intended for use primarily in networking code, but
+could have applications elsewhere as well.
Bytes
values facilitate zero-copy network programming by allowing multiple
+Bytes
objects to point to the same underlying memory.
Bytes
does not have a single implementation. It is an interface, whose
+exact behavior is implemented through dynamic dispatch in several underlying
+implementations of Bytes
.
All Bytes
implementations must fulfill the following requirements:
use bytes::Bytes;
+
+let mut mem = Bytes::from("Hello world");
+let a = mem.slice(0..5);
+
+assert_eq!(a, "Hello");
+
+let b = mem.split_to(6);
+
+assert_eq!(mem, "world");
+assert_eq!(b, "Hello ");
The Bytes
struct itself is fairly small, limited to 4 usize
fields used
+to track information about which segment of the underlying memory the
+Bytes
handle has access to.
Bytes
keeps both a pointer to the shared state containing the full memory
+slice and a pointer to the start of the region visible by the handle.
+Bytes
also tracks the length of its view into the memory.
Bytes
contains a vtable, which allows implementations of Bytes
to define
+how sharing/cloning is implemented in detail.
+When Bytes::clone()
is called, Bytes
will call the vtable function for
+cloning the backing storage in order to share it behind multiple Bytes
+instances.
For Bytes
implementations which refer to constant memory (e.g. created
+via Bytes::from_static()
) the cloning implementation will be a no-op.
For Bytes
implementations which point to a reference counted shared storage
+(e.g. an Arc<[u8]>
), sharing will be implemented by increasing the
+reference count.
Due to this mechanism, multiple Bytes
instances may point to the same
+shared memory region.
+Each Bytes
instance can point to different sections within that
+memory region, and Bytes
instances may or may not have overlapping views
+into the memory.
The following diagram visualizes a scenario where 2 Bytes
instances make
+use of an Arc
-based backing storage, and provide access to different views:
+ Arc ptrs ┌─────────┐
+ ________________________ / │ Bytes 2 │
+ / └─────────┘
+ / ┌───────────┐ | |
+|_________/ │ Bytes 1 │ | |
+| └───────────┘ | |
+| | | ___/ data | tail
+| data | tail |/ |
+v v v v
+┌─────┬─────┬───────────┬───────────────┬─────┐
+│ Arc │ │ │ │ │
+└─────┴─────┴───────────┴───────────────┴─────┘
Creates a new empty Bytes
.
This will not allocate and the returned Bytes
handle will be empty.
use bytes::Bytes;
+
+let b = Bytes::new();
+assert_eq!(&b[..], b"");
Creates a new Bytes
from a static slice.
The returned Bytes
will point directly to the static slice. There is
+no allocating or copying.
use bytes::Bytes;
+
+let b = Bytes::from_static(b"hello");
+assert_eq!(&b[..], b"hello");
Returns the number of bytes contained in this Bytes
.
use bytes::Bytes;
+
+let b = Bytes::from(&b"hello"[..]);
+assert_eq!(b.len(), 5);
Returns true if the Bytes
has a length of 0.
use bytes::Bytes;
+
+let b = Bytes::new();
+assert!(b.is_empty());
Returns true if this is the only reference to the data.
+Always returns false if the data is backed by a static slice.
+The result of this method may be invalidated immediately if another
+thread clones this value while this is being called. Ensure you have
+unique access to this value (&mut Bytes
) first if you need to be
+certain the result is valid (i.e. for safety reasons)
use bytes::Bytes;
+
+let a = Bytes::from(vec![1, 2, 3]);
+assert!(a.is_unique());
+let b = a.clone();
+assert!(!a.is_unique());
Creates Bytes
instance from slice, by copying it.
Returns a slice of self for the provided range.
+This will increment the reference count for the underlying memory and
+return a new Bytes
handle set to the slice.
This operation is O(1)
.
use bytes::Bytes;
+
+let a = Bytes::from(&b"hello world"[..]);
+let b = a.slice(2..5);
+
+assert_eq!(&b[..], b"llo");
Requires that begin <= end
and end <= self.len()
, otherwise slicing
+will panic.
Returns a slice of self that is equivalent to the given subset
.
When processing a Bytes
buffer with other tools, one often gets a
+&[u8]
which is in fact a slice of the Bytes
, i.e. a subset of it.
+This function turns that &[u8]
into another Bytes
, as if one had
+called self.slice()
with the offsets that correspond to subset
.
This operation is O(1)
.
use bytes::Bytes;
+
+let bytes = Bytes::from(&b"012345678"[..]);
+let as_slice = bytes.as_ref();
+let subset = &as_slice[2..6];
+let subslice = bytes.slice_ref(&subset);
+assert_eq!(&subslice[..], b"2345");
Requires that the given sub
slice is in fact contained within the
+Bytes
buffer; otherwise this function will panic.
Splits the bytes into two at the given index.
+Afterwards self
contains elements [0, at)
, and the returned Bytes
+contains elements [at, len)
.
This is an O(1)
operation that just increases the reference count and
+sets a few indices.
use bytes::Bytes;
+
+let mut a = Bytes::from(&b"hello world"[..]);
+let b = a.split_off(5);
+
+assert_eq!(&a[..], b"hello");
+assert_eq!(&b[..], b" world");
Panics if at > len
.
Splits the bytes into two at the given index.
+Afterwards self
contains elements [at, len)
, and the returned
+Bytes
contains elements [0, at)
.
This is an O(1)
operation that just increases the reference count and
+sets a few indices.
use bytes::Bytes;
+
+let mut a = Bytes::from(&b"hello world"[..]);
+let b = a.split_to(5);
+
+assert_eq!(&a[..], b" world");
+assert_eq!(&b[..], b"hello");
Panics if at > len
.
Shortens the buffer, keeping the first len
bytes and dropping the
+rest.
If len
is greater than the buffer’s current length, this has no
+effect.
The split_off method can emulate truncate
, but this causes the
+excess bytes to be returned instead of dropped.
use bytes::Bytes;
+
+let mut buf = Bytes::from(&b"hello world"[..]);
+buf.truncate(5);
+assert_eq!(buf, b"hello"[..]);
Checks if all bytes in this slice are within the ASCII range.
+Checks that two slices are an ASCII case-insensitive match.
+Same as to_ascii_lowercase(a) == to_ascii_lowercase(b)
,
+but without allocating and copying temporaries.
Returns an iterator that produces an escaped version of this slice, +treating it as an ASCII string.
+
+let s = b"0\t\r\n'\"\\\x9d";
+let escaped = s.escape_ascii().to_string();
+assert_eq!(escaped, "0\\t\\r\\n\\'\\\"\\\\\\x9d");
byte_slice_trim_ascii
)Returns a byte slice with leading ASCII whitespace bytes removed.
+‘Whitespace’ refers to the definition used by
+u8::is_ascii_whitespace
.
#![feature(byte_slice_trim_ascii)]
+
+assert_eq!(b" \t hello world\n".trim_ascii_start(), b"hello world\n");
+assert_eq!(b" ".trim_ascii_start(), b"");
+assert_eq!(b"".trim_ascii_start(), b"");
byte_slice_trim_ascii
)Returns a byte slice with trailing ASCII whitespace bytes removed.
+‘Whitespace’ refers to the definition used by
+u8::is_ascii_whitespace
.
#![feature(byte_slice_trim_ascii)]
+
+assert_eq!(b"\r hello world\n ".trim_ascii_end(), b"\r hello world");
+assert_eq!(b" ".trim_ascii_end(), b"");
+assert_eq!(b"".trim_ascii_end(), b"");
byte_slice_trim_ascii
)Returns a byte slice with leading and trailing ASCII whitespace bytes +removed.
+‘Whitespace’ refers to the definition used by
+u8::is_ascii_whitespace
.
#![feature(byte_slice_trim_ascii)]
+
+assert_eq!(b"\r hello world\n ".trim_ascii(), b"hello world");
+assert_eq!(b" ".trim_ascii(), b"");
+assert_eq!(b"".trim_ascii(), b"");
Returns the number of elements in the slice.
+let a = [1, 2, 3];
+assert_eq!(a.len(), 3);
Returns true
if the slice has a length of 0.
let a = [1, 2, 3];
+assert!(!a.is_empty());
Returns the first element of the slice, or None
if it is empty.
let v = [10, 40, 30];
+assert_eq!(Some(&10), v.first());
+
+let w: &[i32] = &[];
+assert_eq!(None, w.first());
Returns the first and all the rest of the elements of the slice, or None
if it is empty.
let x = &[0, 1, 2];
+
+if let Some((first, elements)) = x.split_first() {
+ assert_eq!(first, &0);
+ assert_eq!(elements, &[1, 2]);
+}
Returns the last and all the rest of the elements of the slice, or None
if it is empty.
let x = &[0, 1, 2];
+
+if let Some((last, elements)) = x.split_last() {
+ assert_eq!(last, &2);
+ assert_eq!(elements, &[0, 1]);
+}
Returns the last element of the slice, or None
if it is empty.
let v = [10, 40, 30];
+assert_eq!(Some(&30), v.last());
+
+let w: &[i32] = &[];
+assert_eq!(None, w.last());
Returns a reference to an element or subslice depending on the type of +index.
+None
if out of bounds.None
if out of bounds.let v = [10, 40, 30];
+assert_eq!(Some(&40), v.get(1));
+assert_eq!(Some(&[10, 40][..]), v.get(0..2));
+assert_eq!(None, v.get(3));
+assert_eq!(None, v.get(0..4));
Returns a reference to an element or subslice, without doing bounds +checking.
+For a safe alternative see get
.
Calling this method with an out-of-bounds index is undefined behavior +even if the resulting reference is not used.
+let x = &[1, 2, 4];
+
+unsafe {
+ assert_eq!(x.get_unchecked(1), &2);
+}
Returns a raw pointer to the slice’s buffer.
+The caller must ensure that the slice outlives the pointer this +function returns, or else it will end up pointing to garbage.
+The caller must also ensure that the memory the pointer (non-transitively) points to
+is never written to (except inside an UnsafeCell
) using this pointer or any pointer
+derived from it. If you need to mutate the contents of the slice, use as_mut_ptr
.
Modifying the container referenced by this slice may cause its buffer +to be reallocated, which would also make any pointers to it invalid.
+let x = &[1, 2, 4];
+let x_ptr = x.as_ptr();
+
+unsafe {
+ for i in 0..x.len() {
+ assert_eq!(x.get_unchecked(i), &*x_ptr.add(i));
+ }
+}
Returns the two raw pointers spanning the slice.
+The returned range is half-open, which means that the end pointer +points one past the last element of the slice. This way, an empty +slice is represented by two equal pointers, and the difference between +the two pointers represents the size of the slice.
+See as_ptr
for warnings on using these pointers. The end pointer
+requires extra caution, as it does not point to a valid element in the
+slice.
This function is useful for interacting with foreign interfaces which +use two pointers to refer to a range of elements in memory, as is +common in C++.
+It can also be useful to check if a pointer to an element refers to an +element of this slice:
+ +let a = [1, 2, 3];
+let x = &a[1] as *const _;
+let y = &5 as *const _;
+
+assert!(a.as_ptr_range().contains(&x));
+assert!(!a.as_ptr_range().contains(&y));
Returns an iterator over the slice.
+The iterator yields all items from start to end.
+let x = &[1, 2, 4];
+let mut iterator = x.iter();
+
+assert_eq!(iterator.next(), Some(&1));
+assert_eq!(iterator.next(), Some(&2));
+assert_eq!(iterator.next(), Some(&4));
+assert_eq!(iterator.next(), None);
Returns an iterator over all contiguous windows of length
+size
. The windows overlap. If the slice is shorter than
+size
, the iterator returns no values.
Panics if size
is 0.
let slice = ['r', 'u', 's', 't'];
+let mut iter = slice.windows(2);
+assert_eq!(iter.next().unwrap(), &['r', 'u']);
+assert_eq!(iter.next().unwrap(), &['u', 's']);
+assert_eq!(iter.next().unwrap(), &['s', 't']);
+assert!(iter.next().is_none());
If the slice is shorter than size
:
let slice = ['f', 'o', 'o'];
+let mut iter = slice.windows(4);
+assert!(iter.next().is_none());
Returns an iterator over chunk_size
elements of the slice at a time, starting at the
+beginning of the slice.
The chunks are slices and do not overlap. If chunk_size
does not divide the length of the
+slice, then the last chunk will not have length chunk_size
.
See chunks_exact
for a variant of this iterator that returns chunks of always exactly
+chunk_size
elements, and rchunks
for the same iterator but starting at the end of the
+slice.
Panics if chunk_size
is 0.
let slice = ['l', 'o', 'r', 'e', 'm'];
+let mut iter = slice.chunks(2);
+assert_eq!(iter.next().unwrap(), &['l', 'o']);
+assert_eq!(iter.next().unwrap(), &['r', 'e']);
+assert_eq!(iter.next().unwrap(), &['m']);
+assert!(iter.next().is_none());
Returns an iterator over chunk_size
elements of the slice at a time, starting at the
+beginning of the slice.
The chunks are slices and do not overlap. If chunk_size
does not divide the length of the
+slice, then the last up to chunk_size-1
elements will be omitted and can be retrieved
+from the remainder
function of the iterator.
Due to each chunk having exactly chunk_size
elements, the compiler can often optimize the
+resulting code better than in the case of chunks
.
See chunks
for a variant of this iterator that also returns the remainder as a smaller
+chunk, and rchunks_exact
for the same iterator but starting at the end of the slice.
Panics if chunk_size
is 0.
let slice = ['l', 'o', 'r', 'e', 'm'];
+let mut iter = slice.chunks_exact(2);
+assert_eq!(iter.next().unwrap(), &['l', 'o']);
+assert_eq!(iter.next().unwrap(), &['r', 'e']);
+assert!(iter.next().is_none());
+assert_eq!(iter.remainder(), &['m']);
slice_as_chunks
)Splits the slice into a slice of N
-element arrays,
+assuming that there’s no remainder.
This may only be called when
+N
-element chunks (aka self.len() % N == 0
).N != 0
.#![feature(slice_as_chunks)]
+let slice: &[char] = &['l', 'o', 'r', 'e', 'm', '!'];
+let chunks: &[[char; 1]] =
+ // SAFETY: 1-element chunks never have remainder
+ unsafe { slice.as_chunks_unchecked() };
+assert_eq!(chunks, &[['l'], ['o'], ['r'], ['e'], ['m'], ['!']]);
+let chunks: &[[char; 3]] =
+ // SAFETY: The slice length (6) is a multiple of 3
+ unsafe { slice.as_chunks_unchecked() };
+assert_eq!(chunks, &[['l', 'o', 'r'], ['e', 'm', '!']]);
+
+// These would be unsound:
+// let chunks: &[[_; 5]] = slice.as_chunks_unchecked() // The slice length is not a multiple of 5
+// let chunks: &[[_; 0]] = slice.as_chunks_unchecked() // Zero-length chunks are never allowed
slice_as_chunks
)Splits the slice into a slice of N
-element arrays,
+starting at the beginning of the slice,
+and a remainder slice with length strictly less than N
.
Panics if N
is 0. This check will most probably get changed to a compile time
+error before this method gets stabilized.
#![feature(slice_as_chunks)]
+let slice = ['l', 'o', 'r', 'e', 'm'];
+let (chunks, remainder) = slice.as_chunks();
+assert_eq!(chunks, &[['l', 'o'], ['r', 'e']]);
+assert_eq!(remainder, &['m']);
slice_as_chunks
)Splits the slice into a slice of N
-element arrays,
+starting at the end of the slice,
+and a remainder slice with length strictly less than N
.
Panics if N
is 0. This check will most probably get changed to a compile time
+error before this method gets stabilized.
#![feature(slice_as_chunks)]
+let slice = ['l', 'o', 'r', 'e', 'm'];
+let (remainder, chunks) = slice.as_rchunks();
+assert_eq!(remainder, &['l']);
+assert_eq!(chunks, &[['o', 'r'], ['e', 'm']]);
array_chunks
)Returns an iterator over N
elements of the slice at a time, starting at the
+beginning of the slice.
The chunks are array references and do not overlap. If N
does not divide the
+length of the slice, then the last up to N-1
elements will be omitted and can be
+retrieved from the remainder
function of the iterator.
This method is the const generic equivalent of chunks_exact
.
Panics if N
is 0. This check will most probably get changed to a compile time
+error before this method gets stabilized.
#![feature(array_chunks)]
+let slice = ['l', 'o', 'r', 'e', 'm'];
+let mut iter = slice.array_chunks();
+assert_eq!(iter.next().unwrap(), &['l', 'o']);
+assert_eq!(iter.next().unwrap(), &['r', 'e']);
+assert!(iter.next().is_none());
+assert_eq!(iter.remainder(), &['m']);
array_windows
)Returns an iterator over overlapping windows of N
elements of a slice,
+starting at the beginning of the slice.
This is the const generic equivalent of windows
.
If N
is greater than the size of the slice, it will return no windows.
Panics if N
is 0. This check will most probably get changed to a compile time
+error before this method gets stabilized.
#![feature(array_windows)]
+let slice = [0, 1, 2, 3];
+let mut iter = slice.array_windows();
+assert_eq!(iter.next().unwrap(), &[0, 1]);
+assert_eq!(iter.next().unwrap(), &[1, 2]);
+assert_eq!(iter.next().unwrap(), &[2, 3]);
+assert!(iter.next().is_none());
Returns an iterator over chunk_size
elements of the slice at a time, starting at the end
+of the slice.
The chunks are slices and do not overlap. If chunk_size
does not divide the length of the
+slice, then the last chunk will not have length chunk_size
.
See rchunks_exact
for a variant of this iterator that returns chunks of always exactly
+chunk_size
elements, and chunks
for the same iterator but starting at the beginning
+of the slice.
Panics if chunk_size
is 0.
let slice = ['l', 'o', 'r', 'e', 'm'];
+let mut iter = slice.rchunks(2);
+assert_eq!(iter.next().unwrap(), &['e', 'm']);
+assert_eq!(iter.next().unwrap(), &['o', 'r']);
+assert_eq!(iter.next().unwrap(), &['l']);
+assert!(iter.next().is_none());
Returns an iterator over chunk_size
elements of the slice at a time, starting at the
+end of the slice.
The chunks are slices and do not overlap. If chunk_size
does not divide the length of the
+slice, then the last up to chunk_size-1
elements will be omitted and can be retrieved
+from the remainder
function of the iterator.
Due to each chunk having exactly chunk_size
elements, the compiler can often optimize the
+resulting code better than in the case of rchunks
.
See rchunks
for a variant of this iterator that also returns the remainder as a smaller
+chunk, and chunks_exact
for the same iterator but starting at the beginning of the
+slice.
Panics if chunk_size
is 0.
let slice = ['l', 'o', 'r', 'e', 'm'];
+let mut iter = slice.rchunks_exact(2);
+assert_eq!(iter.next().unwrap(), &['e', 'm']);
+assert_eq!(iter.next().unwrap(), &['o', 'r']);
+assert!(iter.next().is_none());
+assert_eq!(iter.remainder(), &['l']);
slice_group_by
)Returns an iterator over the slice producing non-overlapping runs +of elements using the predicate to separate them.
+The predicate is called on two elements following themselves,
+it means the predicate is called on slice[0]
and slice[1]
+then on slice[1]
and slice[2]
and so on.
#![feature(slice_group_by)]
+
+let slice = &[1, 1, 1, 3, 3, 2, 2, 2];
+
+let mut iter = slice.group_by(|a, b| a == b);
+
+assert_eq!(iter.next(), Some(&[1, 1, 1][..]));
+assert_eq!(iter.next(), Some(&[3, 3][..]));
+assert_eq!(iter.next(), Some(&[2, 2, 2][..]));
+assert_eq!(iter.next(), None);
This method can be used to extract the sorted subslices:
+ +#![feature(slice_group_by)]
+
+let slice = &[1, 1, 2, 3, 2, 3, 2, 3, 4];
+
+let mut iter = slice.group_by(|a, b| a <= b);
+
+assert_eq!(iter.next(), Some(&[1, 1, 2, 3][..]));
+assert_eq!(iter.next(), Some(&[2, 3][..]));
+assert_eq!(iter.next(), Some(&[2, 3, 4][..]));
+assert_eq!(iter.next(), None);
Divides one slice into two at an index.
+The first will contain all indices from [0, mid)
(excluding
+the index mid
itself) and the second will contain all
+indices from [mid, len)
(excluding the index len
itself).
Panics if mid > len
.
let v = [1, 2, 3, 4, 5, 6];
+
+{
+ let (left, right) = v.split_at(0);
+ assert_eq!(left, []);
+ assert_eq!(right, [1, 2, 3, 4, 5, 6]);
+}
+
+{
+ let (left, right) = v.split_at(2);
+ assert_eq!(left, [1, 2]);
+ assert_eq!(right, [3, 4, 5, 6]);
+}
+
+{
+ let (left, right) = v.split_at(6);
+ assert_eq!(left, [1, 2, 3, 4, 5, 6]);
+ assert_eq!(right, []);
+}
slice_split_at_unchecked
)Divides one slice into two at an index, without doing bounds checking.
+The first will contain all indices from [0, mid)
(excluding
+the index mid
itself) and the second will contain all
+indices from [mid, len)
(excluding the index len
itself).
For a safe alternative see split_at
.
Calling this method with an out-of-bounds index is undefined behavior
+even if the resulting reference is not used. The caller has to ensure that
+0 <= mid <= self.len()
.
#![feature(slice_split_at_unchecked)]
+
+let v = [1, 2, 3, 4, 5, 6];
+
+unsafe {
+ let (left, right) = v.split_at_unchecked(0);
+ assert_eq!(left, []);
+ assert_eq!(right, [1, 2, 3, 4, 5, 6]);
+}
+
+unsafe {
+ let (left, right) = v.split_at_unchecked(2);
+ assert_eq!(left, [1, 2]);
+ assert_eq!(right, [3, 4, 5, 6]);
+}
+
+unsafe {
+ let (left, right) = v.split_at_unchecked(6);
+ assert_eq!(left, [1, 2, 3, 4, 5, 6]);
+ assert_eq!(right, []);
+}
split_array
)Divides one slice into an array and a remainder slice at an index.
+The array will contain all indices from [0, N)
(excluding
+the index N
itself) and the slice will contain all
+indices from [N, len)
(excluding the index len
itself).
Panics if N > len
.
#![feature(split_array)]
+
+let v = &[1, 2, 3, 4, 5, 6][..];
+
+{
+ let (left, right) = v.split_array_ref::<0>();
+ assert_eq!(left, &[]);
+ assert_eq!(right, [1, 2, 3, 4, 5, 6]);
+}
+
+{
+ let (left, right) = v.split_array_ref::<2>();
+ assert_eq!(left, &[1, 2]);
+ assert_eq!(right, [3, 4, 5, 6]);
+}
+
+{
+ let (left, right) = v.split_array_ref::<6>();
+ assert_eq!(left, &[1, 2, 3, 4, 5, 6]);
+ assert_eq!(right, []);
+}
split_array
)Divides one slice into an array and a remainder slice at an index from +the end.
+The slice will contain all indices from [0, len - N)
(excluding
+the index len - N
itself) and the array will contain all
+indices from [len - N, len)
(excluding the index len
itself).
Panics if N > len
.
#![feature(split_array)]
+
+let v = &[1, 2, 3, 4, 5, 6][..];
+
+{
+ let (left, right) = v.rsplit_array_ref::<0>();
+ assert_eq!(left, [1, 2, 3, 4, 5, 6]);
+ assert_eq!(right, &[]);
+}
+
+{
+ let (left, right) = v.rsplit_array_ref::<2>();
+ assert_eq!(left, [1, 2, 3, 4]);
+ assert_eq!(right, &[5, 6]);
+}
+
+{
+ let (left, right) = v.rsplit_array_ref::<6>();
+ assert_eq!(left, []);
+ assert_eq!(right, &[1, 2, 3, 4, 5, 6]);
+}
Returns an iterator over subslices separated by elements that match
+pred
. The matched element is not contained in the subslices.
let slice = [10, 40, 33, 20];
+let mut iter = slice.split(|num| num % 3 == 0);
+
+assert_eq!(iter.next().unwrap(), &[10, 40]);
+assert_eq!(iter.next().unwrap(), &[20]);
+assert!(iter.next().is_none());
If the first element is matched, an empty slice will be the first item +returned by the iterator. Similarly, if the last element in the slice +is matched, an empty slice will be the last item returned by the +iterator:
+ +let slice = [10, 40, 33];
+let mut iter = slice.split(|num| num % 3 == 0);
+
+assert_eq!(iter.next().unwrap(), &[10, 40]);
+assert_eq!(iter.next().unwrap(), &[]);
+assert!(iter.next().is_none());
If two matched elements are directly adjacent, an empty slice will be +present between them:
+ +let slice = [10, 6, 33, 20];
+let mut iter = slice.split(|num| num % 3 == 0);
+
+assert_eq!(iter.next().unwrap(), &[10]);
+assert_eq!(iter.next().unwrap(), &[]);
+assert_eq!(iter.next().unwrap(), &[20]);
+assert!(iter.next().is_none());
Returns an iterator over subslices separated by elements that match
+pred
. The matched element is contained in the end of the previous
+subslice as a terminator.
let slice = [10, 40, 33, 20];
+let mut iter = slice.split_inclusive(|num| num % 3 == 0);
+
+assert_eq!(iter.next().unwrap(), &[10, 40, 33]);
+assert_eq!(iter.next().unwrap(), &[20]);
+assert!(iter.next().is_none());
If the last element of the slice is matched, +that element will be considered the terminator of the preceding slice. +That slice will be the last item returned by the iterator.
+ +let slice = [3, 10, 40, 33];
+let mut iter = slice.split_inclusive(|num| num % 3 == 0);
+
+assert_eq!(iter.next().unwrap(), &[3]);
+assert_eq!(iter.next().unwrap(), &[10, 40, 33]);
+assert!(iter.next().is_none());
Returns an iterator over subslices separated by elements that match
+pred
, starting at the end of the slice and working backwards.
+The matched element is not contained in the subslices.
let slice = [11, 22, 33, 0, 44, 55];
+let mut iter = slice.rsplit(|num| *num == 0);
+
+assert_eq!(iter.next().unwrap(), &[44, 55]);
+assert_eq!(iter.next().unwrap(), &[11, 22, 33]);
+assert_eq!(iter.next(), None);
As with split()
, if the first or last element is matched, an empty
+slice will be the first (or last) item returned by the iterator.
let v = &[0, 1, 1, 2, 3, 5, 8];
+let mut it = v.rsplit(|n| *n % 2 == 0);
+assert_eq!(it.next().unwrap(), &[]);
+assert_eq!(it.next().unwrap(), &[3, 5]);
+assert_eq!(it.next().unwrap(), &[1, 1]);
+assert_eq!(it.next().unwrap(), &[]);
+assert_eq!(it.next(), None);
Returns an iterator over subslices separated by elements that match
+pred
, limited to returning at most n
items. The matched element is
+not contained in the subslices.
The last element returned, if any, will contain the remainder of the +slice.
+Print the slice split once by numbers divisible by 3 (i.e., [10, 40]
,
+[20, 60, 50]
):
let v = [10, 40, 30, 20, 60, 50];
+
+for group in v.splitn(2, |num| *num % 3 == 0) {
+ println!("{group:?}");
+}
Returns an iterator over subslices separated by elements that match
+pred
limited to returning at most n
items. This starts at the end of
+the slice and works backwards. The matched element is not contained in
+the subslices.
The last element returned, if any, will contain the remainder of the +slice.
+Print the slice split once, starting from the end, by numbers divisible
+by 3 (i.e., [50]
, [10, 40, 30, 20]
):
let v = [10, 40, 30, 20, 60, 50];
+
+for group in v.rsplitn(2, |num| *num % 3 == 0) {
+ println!("{group:?}");
+}
Returns true
if the slice contains an element with the given value.
This operation is O(n).
+Note that if you have a sorted slice, binary_search
may be faster.
let v = [10, 40, 30];
+assert!(v.contains(&30));
+assert!(!v.contains(&50));
If you do not have a &T
, but some other value that you can compare
+with one (for example, String
implements PartialEq<str>
), you can
+use iter().any
:
let v = [String::from("hello"), String::from("world")]; // slice of `String`
+assert!(v.iter().any(|e| e == "hello")); // search with `&str`
+assert!(!v.iter().any(|e| e == "hi"));
Returns true
if needle
is a prefix of the slice.
let v = [10, 40, 30];
+assert!(v.starts_with(&[10]));
+assert!(v.starts_with(&[10, 40]));
+assert!(!v.starts_with(&[50]));
+assert!(!v.starts_with(&[10, 50]));
Always returns true
if needle
is an empty slice:
let v = &[10, 40, 30];
+assert!(v.starts_with(&[]));
+let v: &[u8] = &[];
+assert!(v.starts_with(&[]));
Returns true
if needle
is a suffix of the slice.
let v = [10, 40, 30];
+assert!(v.ends_with(&[30]));
+assert!(v.ends_with(&[40, 30]));
+assert!(!v.ends_with(&[50]));
+assert!(!v.ends_with(&[50, 30]));
Always returns true
if needle
is an empty slice:
let v = &[10, 40, 30];
+assert!(v.ends_with(&[]));
+let v: &[u8] = &[];
+assert!(v.ends_with(&[]));
Returns a subslice with the prefix removed.
+If the slice starts with prefix
, returns the subslice after the prefix, wrapped in Some
.
+If prefix
is empty, simply returns the original slice.
If the slice does not start with prefix
, returns None
.
let v = &[10, 40, 30];
+assert_eq!(v.strip_prefix(&[10]), Some(&[40, 30][..]));
+assert_eq!(v.strip_prefix(&[10, 40]), Some(&[30][..]));
+assert_eq!(v.strip_prefix(&[50]), None);
+assert_eq!(v.strip_prefix(&[10, 50]), None);
+
+let prefix : &str = "he";
+assert_eq!(b"hello".strip_prefix(prefix.as_bytes()),
+ Some(b"llo".as_ref()));
Returns a subslice with the suffix removed.
+If the slice ends with suffix
, returns the subslice before the suffix, wrapped in Some
.
+If suffix
is empty, simply returns the original slice.
If the slice does not end with suffix
, returns None
.
let v = &[10, 40, 30];
+assert_eq!(v.strip_suffix(&[30]), Some(&[10, 40][..]));
+assert_eq!(v.strip_suffix(&[40, 30]), Some(&[10][..]));
+assert_eq!(v.strip_suffix(&[50]), None);
+assert_eq!(v.strip_suffix(&[50, 30]), None);
Binary searches this slice for a given element.
+This behaves similarly to contains
if this slice is sorted.
If the value is found then Result::Ok
is returned, containing the
+index of the matching element. If there are multiple matches, then any
+one of the matches could be returned. The index is chosen
+deterministically, but is subject to change in future versions of Rust.
+If the value is not found then Result::Err
is returned, containing
+the index where a matching element could be inserted while maintaining
+sorted order.
See also binary_search_by
, binary_search_by_key
, and partition_point
.
Looks up a series of four elements. The first is found, with a
+uniquely determined position; the second and third are not
+found; the fourth could match any position in [1, 4]
.
let s = [0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55];
+
+assert_eq!(s.binary_search(&13), Ok(9));
+assert_eq!(s.binary_search(&4), Err(7));
+assert_eq!(s.binary_search(&100), Err(13));
+let r = s.binary_search(&1);
+assert!(match r { Ok(1..=4) => true, _ => false, });
If you want to find that whole range of matching items, rather than
+an arbitrary matching one, that can be done using partition_point
:
let s = [0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55];
+
+let low = s.partition_point(|x| x < &1);
+assert_eq!(low, 1);
+let high = s.partition_point(|x| x <= &1);
+assert_eq!(high, 5);
+let r = s.binary_search(&1);
+assert!((low..high).contains(&r.unwrap()));
+
+assert!(s[..low].iter().all(|&x| x < 1));
+assert!(s[low..high].iter().all(|&x| x == 1));
+assert!(s[high..].iter().all(|&x| x > 1));
+
+// For something not found, the "range" of equal items is empty
+assert_eq!(s.partition_point(|x| x < &11), 9);
+assert_eq!(s.partition_point(|x| x <= &11), 9);
+assert_eq!(s.binary_search(&11), Err(9));
If you want to insert an item to a sorted vector, while maintaining
+sort order, consider using partition_point
:
let mut s = vec![0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55];
+let num = 42;
+let idx = s.partition_point(|&x| x < num);
+// The above is equivalent to `let idx = s.binary_search(&num).unwrap_or_else(|x| x);`
+s.insert(idx, num);
+assert_eq!(s, [0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 42, 55]);
Binary searches this slice with a comparator function.
+This behaves similarly to contains
if this slice is sorted.
The comparator function should implement an order consistent
+with the sort order of the underlying slice, returning an
+order code that indicates whether its argument is Less
,
+Equal
or Greater
the desired target.
If the value is found then Result::Ok
is returned, containing the
+index of the matching element. If there are multiple matches, then any
+one of the matches could be returned. The index is chosen
+deterministically, but is subject to change in future versions of Rust.
+If the value is not found then Result::Err
is returned, containing
+the index where a matching element could be inserted while maintaining
+sorted order.
See also binary_search
, binary_search_by_key
, and partition_point
.
Looks up a series of four elements. The first is found, with a
+uniquely determined position; the second and third are not
+found; the fourth could match any position in [1, 4]
.
let s = [0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55];
+
+let seek = 13;
+assert_eq!(s.binary_search_by(|probe| probe.cmp(&seek)), Ok(9));
+let seek = 4;
+assert_eq!(s.binary_search_by(|probe| probe.cmp(&seek)), Err(7));
+let seek = 100;
+assert_eq!(s.binary_search_by(|probe| probe.cmp(&seek)), Err(13));
+let seek = 1;
+let r = s.binary_search_by(|probe| probe.cmp(&seek));
+assert!(match r { Ok(1..=4) => true, _ => false, });
Binary searches this slice with a key extraction function.
+This behaves similarly to contains
if this slice is sorted.
Assumes that the slice is sorted by the key, for instance with
+sort_by_key
using the same key extraction function.
If the value is found then Result::Ok
is returned, containing the
+index of the matching element. If there are multiple matches, then any
+one of the matches could be returned. The index is chosen
+deterministically, but is subject to change in future versions of Rust.
+If the value is not found then Result::Err
is returned, containing
+the index where a matching element could be inserted while maintaining
+sorted order.
See also binary_search
, binary_search_by
, and partition_point
.
Looks up a series of four elements in a slice of pairs sorted by
+their second elements. The first is found, with a uniquely
+determined position; the second and third are not found; the
+fourth could match any position in [1, 4]
.
let s = [(0, 0), (2, 1), (4, 1), (5, 1), (3, 1),
+ (1, 2), (2, 3), (4, 5), (5, 8), (3, 13),
+ (1, 21), (2, 34), (4, 55)];
+
+assert_eq!(s.binary_search_by_key(&13, |&(a, b)| b), Ok(9));
+assert_eq!(s.binary_search_by_key(&4, |&(a, b)| b), Err(7));
+assert_eq!(s.binary_search_by_key(&100, |&(a, b)| b), Err(13));
+let r = s.binary_search_by_key(&1, |&(a, b)| b);
+assert!(match r { Ok(1..=4) => true, _ => false, });
Transmute the slice to a slice of another type, ensuring alignment of the types is +maintained.
+This method splits the slice into three distinct slices: prefix, correctly aligned middle +slice of a new type, and the suffix slice. The method may make the middle slice the greatest +length possible for a given type and input slice, but only your algorithm’s performance +should depend on that, not its correctness. It is permissible for all of the input data to +be returned as the prefix or suffix slice.
+This method has no purpose when either input element T
or output element U
are
+zero-sized and will return the original slice without splitting anything.
This method is essentially a transmute
with respect to the elements in the returned
+middle slice, so all the usual caveats pertaining to transmute::<T, U>
also apply here.
Basic usage:
+ +unsafe {
+ let bytes: [u8; 7] = [1, 2, 3, 4, 5, 6, 7];
+ let (prefix, shorts, suffix) = bytes.align_to::<u16>();
+ // less_efficient_algorithm_for_bytes(prefix);
+ // more_efficient_algorithm_for_aligned_shorts(shorts);
+ // less_efficient_algorithm_for_bytes(suffix);
+}
portable_simd
)Split a slice into a prefix, a middle of aligned SIMD types, and a suffix.
+This is a safe wrapper around slice::align_to
, so has the same weak
+postconditions as that method. You’re only assured that
+self.len() == prefix.len() + middle.len() * LANES + suffix.len()
.
Notably, all of the following are possible:
+prefix.len() >= LANES
.middle.is_empty()
despite self.len() >= 3 * LANES
.suffix.len() >= LANES
.That said, this is a safe method, so if you’re only writing safe code, +then this can at most cause incorrect logic, not unsoundness.
+This will panic if the size of the SIMD type is different from
+LANES
times that of the scalar.
At the time of writing, the trait restrictions on Simd<T, LANES>
keeps
+that from ever happening, as only power-of-two numbers of lanes are
+supported. It’s possible that, in the future, those restrictions might
+be lifted in a way that would make it possible to see panics from this
+method for something like LANES == 3
.
#![feature(portable_simd)]
+use core::simd::SimdFloat;
+
+let short = &[1, 2, 3];
+let (prefix, middle, suffix) = short.as_simd::<4>();
+assert_eq!(middle, []); // Not enough elements for anything in the middle
+
+// They might be split in any possible way between prefix and suffix
+let it = prefix.iter().chain(suffix).copied();
+assert_eq!(it.collect::<Vec<_>>(), vec![1, 2, 3]);
+
+fn basic_simd_sum(x: &[f32]) -> f32 {
+ use std::ops::Add;
+ use std::simd::f32x4;
+ let (prefix, middle, suffix) = x.as_simd();
+ let sums = f32x4::from_array([
+ prefix.iter().copied().sum(),
+ 0.0,
+ 0.0,
+ suffix.iter().copied().sum(),
+ ]);
+ let sums = middle.iter().copied().fold(sums, f32x4::add);
+ sums.reduce_sum()
+}
+
+let numbers: Vec<f32> = (1..101).map(|x| x as _).collect();
+assert_eq!(basic_simd_sum(&numbers[1..99]), 4949.0);
is_sorted
)Checks if the elements of this slice are sorted.
+That is, for each element a
and its following element b
, a <= b
must hold. If the
+slice yields exactly zero or one element, true
is returned.
Note that if Self::Item
is only PartialOrd
, but not Ord
, the above definition
+implies that this function returns false
if any two consecutive items are not
+comparable.
#![feature(is_sorted)]
+let empty: [i32; 0] = [];
+
+assert!([1, 2, 2, 9].is_sorted());
+assert!(![1, 3, 2, 4].is_sorted());
+assert!([0].is_sorted());
+assert!(empty.is_sorted());
+assert!(![0.0, 1.0, f32::NAN].is_sorted());
is_sorted
)Checks if the elements of this slice are sorted using the given comparator function.
+Instead of using PartialOrd::partial_cmp
, this function uses the given compare
+function to determine the ordering of two elements. Apart from that, it’s equivalent to
+is_sorted
; see its documentation for more information.
is_sorted
)Checks if the elements of this slice are sorted using the given key extraction function.
+Instead of comparing the slice’s elements directly, this function compares the keys of the
+elements, as determined by f
. Apart from that, it’s equivalent to is_sorted
; see its
+documentation for more information.
#![feature(is_sorted)]
+
+assert!(["c", "bb", "aaa"].is_sorted_by_key(|s| s.len()));
+assert!(![-2i32, -1, 0, 3].is_sorted_by_key(|n| n.abs()));
Returns the index of the partition point according to the given predicate +(the index of the first element of the second partition).
+The slice is assumed to be partitioned according to the given predicate. +This means that all elements for which the predicate returns true are at the start of the slice +and all elements for which the predicate returns false are at the end. +For example, [7, 15, 3, 5, 4, 12, 6] is a partitioned under the predicate x % 2 != 0 +(all odd numbers are at the start, all even at the end).
+If this slice is not partitioned, the returned result is unspecified and meaningless, +as this method performs a kind of binary search.
+See also binary_search
, binary_search_by
, and binary_search_by_key
.
let v = [1, 2, 3, 3, 5, 6, 7];
+let i = v.partition_point(|&x| x < 5);
+
+assert_eq!(i, 4);
+assert!(v[..i].iter().all(|&x| x < 5));
+assert!(v[i..].iter().all(|&x| !(x < 5)));
If all elements of the slice match the predicate, including if the slice +is empty, then the length of the slice will be returned:
+ +let a = [2, 4, 8];
+assert_eq!(a.partition_point(|x| x < &100), a.len());
+let a: [i32; 0] = [];
+assert_eq!(a.partition_point(|x| x < &100), 0);
If you want to insert an item to a sorted vector, while maintaining +sort order:
+ +let mut s = vec![0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55];
+let num = 42;
+let idx = s.partition_point(|&x| x < num);
+s.insert(idx, num);
+assert_eq!(s, [0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 42, 55]);
slice_flatten
)Takes a &[[T; N]]
, and flattens it to a &[T]
.
This panics if the length of the resulting slice would overflow a usize
.
This is only possible when flattening a slice of arrays of zero-sized
+types, and thus tends to be irrelevant in practice. If
+size_of::<T>() > 0
, this will never panic.
#![feature(slice_flatten)]
+
+assert_eq!([[1, 2, 3], [4, 5, 6]].flatten(), &[1, 2, 3, 4, 5, 6]);
+
+assert_eq!(
+ [[1, 2, 3], [4, 5, 6]].flatten(),
+ [[1, 2], [3, 4], [5, 6]].flatten(),
+);
+
+let slice_of_empty_arrays: &[[i32; 0]] = &[[], [], [], [], []];
+assert!(slice_of_empty_arrays.flatten().is_empty());
+
+let empty_slice_of_arrays: &[[u32; 10]] = &[];
+assert!(empty_slice_of_arrays.flatten().is_empty());
Copies self
into a new Vec
.
let s = [10, 40, 30];
+let x = s.to_vec();
+// Here, `s` and `x` can be modified independently.
allocator_api
)Copies self
into a new Vec
with an allocator.
#![feature(allocator_api)]
+
+use std::alloc::System;
+
+let s = [10, 40, 30];
+let x = s.to_vec_in(System);
+// Here, `s` and `x` can be modified independently.
Flattens a slice of T
into a single value Self::Output
.
assert_eq!(["hello", "world"].concat(), "helloworld");
+assert_eq!([[1, 2], [3, 4]].concat(), [1, 2, 3, 4]);
Flattens a slice of T
into a single value Self::Output
, placing a
+given separator between each.
assert_eq!(["hello", "world"].join(" "), "hello world");
+assert_eq!([[1, 2], [3, 4]].join(&0), [1, 2, 0, 3, 4]);
+assert_eq!([[1, 2], [3, 4]].join(&[0, 0][..]), [1, 2, 0, 0, 3, 4]);
Flattens a slice of T
into a single value Self::Output
, placing a
+given separator between each.
assert_eq!(["hello", "world"].connect(" "), "hello world");
+assert_eq!([[1, 2], [3, 4]].connect(&0), [1, 2, 0, 3, 4]);
Returns a vector containing a copy of this slice where each byte +is mapped to its ASCII upper case equivalent.
+ASCII letters ‘a’ to ‘z’ are mapped to ‘A’ to ‘Z’, +but non-ASCII letters are unchanged.
+To uppercase the value in-place, use make_ascii_uppercase
.
Returns a vector containing a copy of this slice where each byte +is mapped to its ASCII lower case equivalent.
+ASCII letters ‘A’ to ‘Z’ are mapped to ‘a’ to ‘z’, +but non-ASCII letters are unchanged.
+To lowercase the value in-place, use make_ascii_lowercase
.
Buf::remaining()
. Note that this can return shorter slice (this allows
+non-continuous internal representation). Read morestd
only.self
in big-endian byte order. Read moreself
in little-endian byte order. Read moreself
in native-endian byte order. Read moreself
in big-endian byte order. Read moreself
in little-endian byte order. Read moreself
in native-endian byte order. Read moreself
in the big-endian byte order. Read moreself
in the little-endian byte order. Read moreself
in native-endian byte order. Read moreself
in big-endian byte order. Read moreself
in little-endian byte order. Read moreself
in native-endian byte order. Read moreself
in big-endian byte order. Read moreself
in little-endian byte order. Read moreself
in native-endian byte order. Read moreself
in big-endian byte order. Read moreself
in little-endian byte order. Read moreself
in native-endian byte order. Read moreself
in big-endian byte order. Read moreself
in little-endian byte order. Read moreself
in native-endian byte order. Read moreself
in big-endian byte order. Read moreself
in little-endian byte order. Read moreself
in native-endian byte order. Read moreself
in big-endian byte order. Read moreself
in little-endian byte order. Read moreself
in native-endian byte order. Read moreself
in big-endian byte order. Read moreself
in little-endian byte order. Read moreself
in native-endian byte order. Read moreself
in big-endian byte order. Read moreself
in little-endian byte order. Read moreself
in native-endian byte order. Read moreself
in big-endian byte order. Read moreself
in little-endian byte order. Read moreself
in native-endian byte order. Read moreextend_one
)extend_one
)self
and other
) and is used by the <=
+operator. Read moreself
and other
) and is used by the <=
+operator. Read moreself
and other
) and is used by the <=
+operator. Read moreself
and other
) and is used by the <=
+operator. Read moreself
and other
) and is used by the <=
+operator. Read moreself
and other
) and is used by the <=
+operator. Read moreself
and other
) and is used by the <=
+operator. Read moreself
and other
) and is used by the <=
+operator. Read moreself
and other
) and is used by the <=
+operator. Read moreself
and other
) and is used by the <=
+operator. Read moreself
and other
) and is used by the <=
+operator. Read moreself
and other
) and is used by the <=
+operator. Read morepub struct BytesMut { /* private fields */ }
A unique reference to a contiguous slice of memory.
+BytesMut
represents a unique view into a potentially shared memory region.
+Given the uniqueness guarantee, owners of BytesMut
handles are able to
+mutate the memory.
BytesMut
can be thought of as containing a buf: Arc<Vec<u8>>
, an offset
+into buf
, a slice length, and a guarantee that no other BytesMut
for the
+same buf
overlaps with its slice. That guarantee means that a write lock
+is not required.
BytesMut
’s BufMut
implementation will implicitly grow its buffer as
+necessary. However, explicitly reserving the required space up-front before
+a series of inserts will be more efficient.
use bytes::{BytesMut, BufMut};
+
+let mut buf = BytesMut::with_capacity(64);
+
+buf.put_u8(b'h');
+buf.put_u8(b'e');
+buf.put(&b"llo"[..]);
+
+assert_eq!(&buf[..], b"hello");
+
+// Freeze the buffer so that it can be shared
+let a = buf.freeze();
+
+// This does not allocate, instead `b` points to the same memory.
+let b = a.clone();
+
+assert_eq!(&a[..], b"hello");
+assert_eq!(&b[..], b"hello");
Creates a new BytesMut
with the specified capacity.
The returned BytesMut
will be able to hold at least capacity
bytes
+without reallocating.
It is important to note that this function does not specify the length
+of the returned BytesMut
, but only the capacity.
use bytes::{BytesMut, BufMut};
+
+let mut bytes = BytesMut::with_capacity(64);
+
+// `bytes` contains no data, even though there is capacity
+assert_eq!(bytes.len(), 0);
+
+bytes.put(&b"hello world"[..]);
+
+assert_eq!(&bytes[..], b"hello world");
Creates a new BytesMut
with default capacity.
Resulting object has length 0 and unspecified capacity. +This function does not allocate.
+use bytes::{BytesMut, BufMut};
+
+let mut bytes = BytesMut::new();
+
+assert_eq!(0, bytes.len());
+
+bytes.reserve(2);
+bytes.put_slice(b"xy");
+
+assert_eq!(&b"xy"[..], &bytes[..]);
Returns the number of bytes contained in this BytesMut
.
use bytes::BytesMut;
+
+let b = BytesMut::from(&b"hello"[..]);
+assert_eq!(b.len(), 5);
Returns true if the BytesMut
has a length of 0.
use bytes::BytesMut;
+
+let b = BytesMut::with_capacity(64);
+assert!(b.is_empty());
Returns the number of bytes the BytesMut
can hold without reallocating.
use bytes::BytesMut;
+
+let b = BytesMut::with_capacity(64);
+assert_eq!(b.capacity(), 64);
Converts self
into an immutable Bytes
.
The conversion is zero cost and is used to indicate that the slice +referenced by the handle will no longer be mutated. Once the conversion +is done, the handle can be cloned and shared across threads.
+use bytes::{BytesMut, BufMut};
+use std::thread;
+
+let mut b = BytesMut::with_capacity(64);
+b.put(&b"hello world"[..]);
+let b1 = b.freeze();
+let b2 = b1.clone();
+
+let th = thread::spawn(move || {
+ assert_eq!(&b1[..], b"hello world");
+});
+
+assert_eq!(&b2[..], b"hello world");
+th.join().unwrap();
Creates a new BytesMut
, which is initialized with zero.
use bytes::BytesMut;
+
+let zeros = BytesMut::zeroed(42);
+
+assert_eq!(zeros.len(), 42);
+zeros.into_iter().for_each(|x| assert_eq!(x, 0));
Splits the bytes into two at the given index.
+Afterwards self
contains elements [0, at)
, and the returned
+BytesMut
contains elements [at, capacity)
.
This is an O(1)
operation that just increases the reference count
+and sets a few indices.
use bytes::BytesMut;
+
+let mut a = BytesMut::from(&b"hello world"[..]);
+let mut b = a.split_off(5);
+
+a[0] = b'j';
+b[0] = b'!';
+
+assert_eq!(&a[..], b"jello");
+assert_eq!(&b[..], b"!world");
Panics if at > capacity
.
Removes the bytes from the current view, returning them in a new
+BytesMut
handle.
Afterwards, self
will be empty, but will retain any additional
+capacity that it had before the operation. This is identical to
+self.split_to(self.len())
.
This is an O(1)
operation that just increases the reference count and
+sets a few indices.
use bytes::{BytesMut, BufMut};
+
+let mut buf = BytesMut::with_capacity(1024);
+buf.put(&b"hello world"[..]);
+
+let other = buf.split();
+
+assert!(buf.is_empty());
+assert_eq!(1013, buf.capacity());
+
+assert_eq!(other, b"hello world"[..]);
Splits the buffer into two at the given index.
+Afterwards self
contains elements [at, len)
, and the returned BytesMut
+contains elements [0, at)
.
This is an O(1)
operation that just increases the reference count and
+sets a few indices.
use bytes::BytesMut;
+
+let mut a = BytesMut::from(&b"hello world"[..]);
+let mut b = a.split_to(5);
+
+a[0] = b'!';
+b[0] = b'j';
+
+assert_eq!(&a[..], b"!world");
+assert_eq!(&b[..], b"jello");
Panics if at > len
.
Shortens the buffer, keeping the first len
bytes and dropping the
+rest.
If len
is greater than the buffer’s current length, this has no
+effect.
Existing underlying capacity is preserved.
+The split_off method can emulate truncate
, but this causes the
+excess bytes to be returned instead of dropped.
use bytes::BytesMut;
+
+let mut buf = BytesMut::from(&b"hello world"[..]);
+buf.truncate(5);
+assert_eq!(buf, b"hello"[..]);
Clears the buffer, removing all data. Existing capacity is preserved.
+use bytes::BytesMut;
+
+let mut buf = BytesMut::from(&b"hello world"[..]);
+buf.clear();
+assert!(buf.is_empty());
Resizes the buffer so that len
is equal to new_len
.
If new_len
is greater than len
, the buffer is extended by the
+difference with each additional byte set to value
. If new_len
is
+less than len
, the buffer is simply truncated.
use bytes::BytesMut;
+
+let mut buf = BytesMut::new();
+
+buf.resize(3, 0x1);
+assert_eq!(&buf[..], &[0x1, 0x1, 0x1]);
+
+buf.resize(2, 0x2);
+assert_eq!(&buf[..], &[0x1, 0x1]);
+
+buf.resize(4, 0x3);
+assert_eq!(&buf[..], &[0x1, 0x1, 0x3, 0x3]);
Sets the length of the buffer.
+This will explicitly set the size of the buffer without actually +modifying the data, so it is up to the caller to ensure that the data +has been initialized.
+use bytes::BytesMut;
+
+let mut b = BytesMut::from(&b"hello world"[..]);
+
+unsafe {
+ b.set_len(5);
+}
+
+assert_eq!(&b[..], b"hello");
+
+unsafe {
+ b.set_len(11);
+}
+
+assert_eq!(&b[..], b"hello world");
Reserves capacity for at least additional
more bytes to be inserted
+into the given BytesMut
.
More than additional
bytes may be reserved in order to avoid frequent
+reallocations. A call to reserve
may result in an allocation.
Before allocating new buffer space, the function will attempt to reclaim +space in the existing buffer. If the current handle references a view +into a larger original buffer, and all other handles referencing part +of the same original buffer have been dropped, then the current view +can be copied/shifted to the front of the buffer and the handle can take +ownership of the full buffer, provided that the full buffer is large +enough to fit the requested additional capacity.
+This optimization will only happen if shifting the data from the current +view to the front of the buffer is not too expensive in terms of the +(amortized) time required. The precise condition is subject to change; +as of now, the length of the data being shifted needs to be at least as +large as the distance that it’s shifted by. If the current view is empty +and the original buffer is large enough to fit the requested additional +capacity, then reallocations will never happen.
+In the following example, a new buffer is allocated.
+ +use bytes::BytesMut;
+
+let mut buf = BytesMut::from(&b"hello"[..]);
+buf.reserve(64);
+assert!(buf.capacity() >= 69);
In the following example, the existing buffer is reclaimed.
+ +use bytes::{BytesMut, BufMut};
+
+let mut buf = BytesMut::with_capacity(128);
+buf.put(&[0; 64][..]);
+
+let ptr = buf.as_ptr();
+let other = buf.split();
+
+assert!(buf.is_empty());
+assert_eq!(buf.capacity(), 64);
+
+drop(other);
+buf.reserve(128);
+
+assert_eq!(buf.capacity(), 128);
+assert_eq!(buf.as_ptr(), ptr);
Panics if the new capacity overflows usize
.
Appends given bytes to this BytesMut
.
If this BytesMut
object does not have enough capacity, it is resized
+first.
use bytes::BytesMut;
+
+let mut buf = BytesMut::with_capacity(0);
+buf.extend_from_slice(b"aaabbb");
+buf.extend_from_slice(b"cccddd");
+
+assert_eq!(b"aaabbbcccddd", &buf[..]);
Absorbs a BytesMut
that was previously split off.
If the two BytesMut
objects were previously contiguous and not mutated
+in a way that causes re-allocation i.e., if other
was created by
+calling split_off
on this BytesMut
, then this is an O(1)
operation
+that just decreases a reference count and sets a few indices.
+Otherwise this method degenerates to
+self.extend_from_slice(other.as_ref())
.
use bytes::BytesMut;
+
+let mut buf = BytesMut::with_capacity(64);
+buf.extend_from_slice(b"aaabbbcccddd");
+
+let split = buf.split_off(6);
+assert_eq!(b"aaabbb", &buf[..]);
+assert_eq!(b"cccddd", &split[..]);
+
+buf.unsplit(split);
+assert_eq!(b"aaabbbcccddd", &buf[..]);
Returns the remaining spare capacity of the buffer as a slice of MaybeUninit<u8>
.
The returned slice can be used to fill the buffer with data (e.g. by
+reading from a file) before marking the data as initialized using the
+set_len
method.
use bytes::BytesMut;
+
+// Allocate buffer big enough for 10 bytes.
+let mut buf = BytesMut::with_capacity(10);
+
+// Fill in the first 3 elements.
+let uninit = buf.spare_capacity_mut();
+uninit[0].write(0);
+uninit[1].write(1);
+uninit[2].write(2);
+
+// Mark the first 3 bytes of the buffer as being initialized.
+unsafe {
+ buf.set_len(3);
+}
+
+assert_eq!(&buf[..], &[0, 1, 2]);
Checks if all bytes in this slice are within the ASCII range.
+Checks that two slices are an ASCII case-insensitive match.
+Same as to_ascii_lowercase(a) == to_ascii_lowercase(b)
,
+but without allocating and copying temporaries.
Converts this slice to its ASCII upper case equivalent in-place.
+ASCII letters ‘a’ to ‘z’ are mapped to ‘A’ to ‘Z’, +but non-ASCII letters are unchanged.
+To return a new uppercased value without modifying the existing one, use
+to_ascii_uppercase
.
Converts this slice to its ASCII lower case equivalent in-place.
+ASCII letters ‘A’ to ‘Z’ are mapped to ‘a’ to ‘z’, +but non-ASCII letters are unchanged.
+To return a new lowercased value without modifying the existing one, use
+to_ascii_lowercase
.
Returns an iterator that produces an escaped version of this slice, +treating it as an ASCII string.
+
+let s = b"0\t\r\n'\"\\\x9d";
+let escaped = s.escape_ascii().to_string();
+assert_eq!(escaped, "0\\t\\r\\n\\'\\\"\\\\\\x9d");
byte_slice_trim_ascii
)Returns a byte slice with leading ASCII whitespace bytes removed.
+‘Whitespace’ refers to the definition used by
+u8::is_ascii_whitespace
.
#![feature(byte_slice_trim_ascii)]
+
+assert_eq!(b" \t hello world\n".trim_ascii_start(), b"hello world\n");
+assert_eq!(b" ".trim_ascii_start(), b"");
+assert_eq!(b"".trim_ascii_start(), b"");
byte_slice_trim_ascii
)Returns a byte slice with trailing ASCII whitespace bytes removed.
+‘Whitespace’ refers to the definition used by
+u8::is_ascii_whitespace
.
#![feature(byte_slice_trim_ascii)]
+
+assert_eq!(b"\r hello world\n ".trim_ascii_end(), b"\r hello world");
+assert_eq!(b" ".trim_ascii_end(), b"");
+assert_eq!(b"".trim_ascii_end(), b"");
byte_slice_trim_ascii
)Returns a byte slice with leading and trailing ASCII whitespace bytes +removed.
+‘Whitespace’ refers to the definition used by
+u8::is_ascii_whitespace
.
#![feature(byte_slice_trim_ascii)]
+
+assert_eq!(b"\r hello world\n ".trim_ascii(), b"hello world");
+assert_eq!(b" ".trim_ascii(), b"");
+assert_eq!(b"".trim_ascii(), b"");
sort_floats
)Sorts the slice of floats.
+This sort is in-place (i.e. does not allocate), O(n * log(n)) worst-case, and uses
+the ordering defined by f64::total_cmp
.
This uses the same sorting algorithm as sort_unstable_by
.
#![feature(sort_floats)]
+let mut v = [2.6, -5e-8, f64::NAN, 8.29, f64::INFINITY, -1.0, 0.0, -f64::INFINITY, -0.0];
+
+v.sort_floats();
+let sorted = [-f64::INFINITY, -1.0, -5e-8, -0.0, 0.0, 2.6, 8.29, f64::INFINITY, f64::NAN];
+assert_eq!(&v[..8], &sorted[..8]);
+assert!(v[8].is_nan());
sort_floats
)Sorts the slice of floats.
+This sort is in-place (i.e. does not allocate), O(n * log(n)) worst-case, and uses
+the ordering defined by f32::total_cmp
.
This uses the same sorting algorithm as sort_unstable_by
.
#![feature(sort_floats)]
+let mut v = [2.6, -5e-8, f32::NAN, 8.29, f32::INFINITY, -1.0, 0.0, -f32::INFINITY, -0.0];
+
+v.sort_floats();
+let sorted = [-f32::INFINITY, -1.0, -5e-8, -0.0, 0.0, 2.6, 8.29, f32::INFINITY, f32::NAN];
+assert_eq!(&v[..8], &sorted[..8]);
+assert!(v[8].is_nan());
Returns the number of elements in the slice.
+let a = [1, 2, 3];
+assert_eq!(a.len(), 3);
Returns true
if the slice has a length of 0.
let a = [1, 2, 3];
+assert!(!a.is_empty());
Returns the first element of the slice, or None
if it is empty.
let v = [10, 40, 30];
+assert_eq!(Some(&10), v.first());
+
+let w: &[i32] = &[];
+assert_eq!(None, w.first());
Returns a mutable pointer to the first element of the slice, or None
if it is empty.
let x = &mut [0, 1, 2];
+
+if let Some(first) = x.first_mut() {
+ *first = 5;
+}
+assert_eq!(x, &[5, 1, 2]);
Returns the first and all the rest of the elements of the slice, or None
if it is empty.
let x = &[0, 1, 2];
+
+if let Some((first, elements)) = x.split_first() {
+ assert_eq!(first, &0);
+ assert_eq!(elements, &[1, 2]);
+}
Returns the first and all the rest of the elements of the slice, or None
if it is empty.
let x = &mut [0, 1, 2];
+
+if let Some((first, elements)) = x.split_first_mut() {
+ *first = 3;
+ elements[0] = 4;
+ elements[1] = 5;
+}
+assert_eq!(x, &[3, 4, 5]);
Returns the last and all the rest of the elements of the slice, or None
if it is empty.
let x = &[0, 1, 2];
+
+if let Some((last, elements)) = x.split_last() {
+ assert_eq!(last, &2);
+ assert_eq!(elements, &[0, 1]);
+}
Returns the last and all the rest of the elements of the slice, or None
if it is empty.
let x = &mut [0, 1, 2];
+
+if let Some((last, elements)) = x.split_last_mut() {
+ *last = 3;
+ elements[0] = 4;
+ elements[1] = 5;
+}
+assert_eq!(x, &[4, 5, 3]);
Returns the last element of the slice, or None
if it is empty.
let v = [10, 40, 30];
+assert_eq!(Some(&30), v.last());
+
+let w: &[i32] = &[];
+assert_eq!(None, w.last());
Returns a mutable pointer to the last item in the slice.
+let x = &mut [0, 1, 2];
+
+if let Some(last) = x.last_mut() {
+ *last = 10;
+}
+assert_eq!(x, &[0, 1, 10]);
Returns a reference to an element or subslice depending on the type of +index.
+None
if out of bounds.None
if out of bounds.let v = [10, 40, 30];
+assert_eq!(Some(&40), v.get(1));
+assert_eq!(Some(&[10, 40][..]), v.get(0..2));
+assert_eq!(None, v.get(3));
+assert_eq!(None, v.get(0..4));
Returns a reference to an element or subslice, without doing bounds +checking.
+For a safe alternative see get
.
Calling this method with an out-of-bounds index is undefined behavior +even if the resulting reference is not used.
+let x = &[1, 2, 4];
+
+unsafe {
+ assert_eq!(x.get_unchecked(1), &2);
+}
Returns a mutable reference to an element or subslice, without doing +bounds checking.
+For a safe alternative see get_mut
.
Calling this method with an out-of-bounds index is undefined behavior +even if the resulting reference is not used.
+let x = &mut [1, 2, 4];
+
+unsafe {
+ let elem = x.get_unchecked_mut(1);
+ *elem = 13;
+}
+assert_eq!(x, &[1, 13, 4]);
Returns a raw pointer to the slice’s buffer.
+The caller must ensure that the slice outlives the pointer this +function returns, or else it will end up pointing to garbage.
+The caller must also ensure that the memory the pointer (non-transitively) points to
+is never written to (except inside an UnsafeCell
) using this pointer or any pointer
+derived from it. If you need to mutate the contents of the slice, use as_mut_ptr
.
Modifying the container referenced by this slice may cause its buffer +to be reallocated, which would also make any pointers to it invalid.
+let x = &[1, 2, 4];
+let x_ptr = x.as_ptr();
+
+unsafe {
+ for i in 0..x.len() {
+ assert_eq!(x.get_unchecked(i), &*x_ptr.add(i));
+ }
+}
Returns an unsafe mutable pointer to the slice’s buffer.
+The caller must ensure that the slice outlives the pointer this +function returns, or else it will end up pointing to garbage.
+Modifying the container referenced by this slice may cause its buffer +to be reallocated, which would also make any pointers to it invalid.
+let x = &mut [1, 2, 4];
+let x_ptr = x.as_mut_ptr();
+
+unsafe {
+ for i in 0..x.len() {
+ *x_ptr.add(i) += 2;
+ }
+}
+assert_eq!(x, &[3, 4, 6]);
Returns the two raw pointers spanning the slice.
+The returned range is half-open, which means that the end pointer +points one past the last element of the slice. This way, an empty +slice is represented by two equal pointers, and the difference between +the two pointers represents the size of the slice.
+See as_ptr
for warnings on using these pointers. The end pointer
+requires extra caution, as it does not point to a valid element in the
+slice.
This function is useful for interacting with foreign interfaces which +use two pointers to refer to a range of elements in memory, as is +common in C++.
+It can also be useful to check if a pointer to an element refers to an +element of this slice:
+ +let a = [1, 2, 3];
+let x = &a[1] as *const _;
+let y = &5 as *const _;
+
+assert!(a.as_ptr_range().contains(&x));
+assert!(!a.as_ptr_range().contains(&y));
Returns the two unsafe mutable pointers spanning the slice.
+The returned range is half-open, which means that the end pointer +points one past the last element of the slice. This way, an empty +slice is represented by two equal pointers, and the difference between +the two pointers represents the size of the slice.
+See as_mut_ptr
for warnings on using these pointers. The end
+pointer requires extra caution, as it does not point to a valid element
+in the slice.
This function is useful for interacting with foreign interfaces which +use two pointers to refer to a range of elements in memory, as is +common in C++.
+slice_swap_unchecked
)Swaps two elements in the slice, without doing bounds checking.
+For a safe alternative see swap
.
Calling this method with an out-of-bounds index is undefined behavior.
+The caller has to ensure that a < self.len()
and b < self.len()
.
#![feature(slice_swap_unchecked)]
+
+let mut v = ["a", "b", "c", "d"];
+// SAFETY: we know that 1 and 3 are both indices of the slice
+unsafe { v.swap_unchecked(1, 3) };
+assert!(v == ["a", "d", "c", "b"]);
Reverses the order of elements in the slice, in place.
+let mut v = [1, 2, 3];
+v.reverse();
+assert!(v == [3, 2, 1]);
Returns an iterator over the slice.
+The iterator yields all items from start to end.
+let x = &[1, 2, 4];
+let mut iterator = x.iter();
+
+assert_eq!(iterator.next(), Some(&1));
+assert_eq!(iterator.next(), Some(&2));
+assert_eq!(iterator.next(), Some(&4));
+assert_eq!(iterator.next(), None);
Returns an iterator that allows modifying each value.
+The iterator yields all items from start to end.
+let x = &mut [1, 2, 4];
+for elem in x.iter_mut() {
+ *elem += 2;
+}
+assert_eq!(x, &[3, 4, 6]);
Returns an iterator over all contiguous windows of length
+size
. The windows overlap. If the slice is shorter than
+size
, the iterator returns no values.
Panics if size
is 0.
let slice = ['r', 'u', 's', 't'];
+let mut iter = slice.windows(2);
+assert_eq!(iter.next().unwrap(), &['r', 'u']);
+assert_eq!(iter.next().unwrap(), &['u', 's']);
+assert_eq!(iter.next().unwrap(), &['s', 't']);
+assert!(iter.next().is_none());
If the slice is shorter than size
:
let slice = ['f', 'o', 'o'];
+let mut iter = slice.windows(4);
+assert!(iter.next().is_none());
Returns an iterator over chunk_size
elements of the slice at a time, starting at the
+beginning of the slice.
The chunks are slices and do not overlap. If chunk_size
does not divide the length of the
+slice, then the last chunk will not have length chunk_size
.
See chunks_exact
for a variant of this iterator that returns chunks of always exactly
+chunk_size
elements, and rchunks
for the same iterator but starting at the end of the
+slice.
Panics if chunk_size
is 0.
let slice = ['l', 'o', 'r', 'e', 'm'];
+let mut iter = slice.chunks(2);
+assert_eq!(iter.next().unwrap(), &['l', 'o']);
+assert_eq!(iter.next().unwrap(), &['r', 'e']);
+assert_eq!(iter.next().unwrap(), &['m']);
+assert!(iter.next().is_none());
Returns an iterator over chunk_size
elements of the slice at a time, starting at the
+beginning of the slice.
The chunks are mutable slices, and do not overlap. If chunk_size
does not divide the
+length of the slice, then the last chunk will not have length chunk_size
.
See chunks_exact_mut
for a variant of this iterator that returns chunks of always
+exactly chunk_size
elements, and rchunks_mut
for the same iterator but starting at
+the end of the slice.
Panics if chunk_size
is 0.
let v = &mut [0, 0, 0, 0, 0];
+let mut count = 1;
+
+for chunk in v.chunks_mut(2) {
+ for elem in chunk.iter_mut() {
+ *elem += count;
+ }
+ count += 1;
+}
+assert_eq!(v, &[1, 1, 2, 2, 3]);
Returns an iterator over chunk_size
elements of the slice at a time, starting at the
+beginning of the slice.
The chunks are slices and do not overlap. If chunk_size
does not divide the length of the
+slice, then the last up to chunk_size-1
elements will be omitted and can be retrieved
+from the remainder
function of the iterator.
Due to each chunk having exactly chunk_size
elements, the compiler can often optimize the
+resulting code better than in the case of chunks
.
See chunks
for a variant of this iterator that also returns the remainder as a smaller
+chunk, and rchunks_exact
for the same iterator but starting at the end of the slice.
Panics if chunk_size
is 0.
let slice = ['l', 'o', 'r', 'e', 'm'];
+let mut iter = slice.chunks_exact(2);
+assert_eq!(iter.next().unwrap(), &['l', 'o']);
+assert_eq!(iter.next().unwrap(), &['r', 'e']);
+assert!(iter.next().is_none());
+assert_eq!(iter.remainder(), &['m']);
Returns an iterator over chunk_size
elements of the slice at a time, starting at the
+beginning of the slice.
The chunks are mutable slices, and do not overlap. If chunk_size
does not divide the
+length of the slice, then the last up to chunk_size-1
elements will be omitted and can be
+retrieved from the into_remainder
function of the iterator.
Due to each chunk having exactly chunk_size
elements, the compiler can often optimize the
+resulting code better than in the case of chunks_mut
.
See chunks_mut
for a variant of this iterator that also returns the remainder as a
+smaller chunk, and rchunks_exact_mut
for the same iterator but starting at the end of
+the slice.
Panics if chunk_size
is 0.
let v = &mut [0, 0, 0, 0, 0];
+let mut count = 1;
+
+for chunk in v.chunks_exact_mut(2) {
+ for elem in chunk.iter_mut() {
+ *elem += count;
+ }
+ count += 1;
+}
+assert_eq!(v, &[1, 1, 2, 2, 0]);
slice_as_chunks
)Splits the slice into a slice of N
-element arrays,
+assuming that there’s no remainder.
This may only be called when
+N
-element chunks (aka self.len() % N == 0
).N != 0
.#![feature(slice_as_chunks)]
+let slice: &[char] = &['l', 'o', 'r', 'e', 'm', '!'];
+let chunks: &[[char; 1]] =
+ // SAFETY: 1-element chunks never have remainder
+ unsafe { slice.as_chunks_unchecked() };
+assert_eq!(chunks, &[['l'], ['o'], ['r'], ['e'], ['m'], ['!']]);
+let chunks: &[[char; 3]] =
+ // SAFETY: The slice length (6) is a multiple of 3
+ unsafe { slice.as_chunks_unchecked() };
+assert_eq!(chunks, &[['l', 'o', 'r'], ['e', 'm', '!']]);
+
+// These would be unsound:
+// let chunks: &[[_; 5]] = slice.as_chunks_unchecked() // The slice length is not a multiple of 5
+// let chunks: &[[_; 0]] = slice.as_chunks_unchecked() // Zero-length chunks are never allowed
slice_as_chunks
)Splits the slice into a slice of N
-element arrays,
+starting at the beginning of the slice,
+and a remainder slice with length strictly less than N
.
Panics if N
is 0. This check will most probably get changed to a compile time
+error before this method gets stabilized.
#![feature(slice_as_chunks)]
+let slice = ['l', 'o', 'r', 'e', 'm'];
+let (chunks, remainder) = slice.as_chunks();
+assert_eq!(chunks, &[['l', 'o'], ['r', 'e']]);
+assert_eq!(remainder, &['m']);
slice_as_chunks
)Splits the slice into a slice of N
-element arrays,
+starting at the end of the slice,
+and a remainder slice with length strictly less than N
.
Panics if N
is 0. This check will most probably get changed to a compile time
+error before this method gets stabilized.
#![feature(slice_as_chunks)]
+let slice = ['l', 'o', 'r', 'e', 'm'];
+let (remainder, chunks) = slice.as_rchunks();
+assert_eq!(remainder, &['l']);
+assert_eq!(chunks, &[['o', 'r'], ['e', 'm']]);
array_chunks
)Returns an iterator over N
elements of the slice at a time, starting at the
+beginning of the slice.
The chunks are array references and do not overlap. If N
does not divide the
+length of the slice, then the last up to N-1
elements will be omitted and can be
+retrieved from the remainder
function of the iterator.
This method is the const generic equivalent of chunks_exact
.
Panics if N
is 0. This check will most probably get changed to a compile time
+error before this method gets stabilized.
#![feature(array_chunks)]
+let slice = ['l', 'o', 'r', 'e', 'm'];
+let mut iter = slice.array_chunks();
+assert_eq!(iter.next().unwrap(), &['l', 'o']);
+assert_eq!(iter.next().unwrap(), &['r', 'e']);
+assert!(iter.next().is_none());
+assert_eq!(iter.remainder(), &['m']);
slice_as_chunks
)Splits the slice into a slice of N
-element arrays,
+assuming that there’s no remainder.
This may only be called when
+N
-element chunks (aka self.len() % N == 0
).N != 0
.#![feature(slice_as_chunks)]
+let slice: &mut [char] = &mut ['l', 'o', 'r', 'e', 'm', '!'];
+let chunks: &mut [[char; 1]] =
+ // SAFETY: 1-element chunks never have remainder
+ unsafe { slice.as_chunks_unchecked_mut() };
+chunks[0] = ['L'];
+assert_eq!(chunks, &[['L'], ['o'], ['r'], ['e'], ['m'], ['!']]);
+let chunks: &mut [[char; 3]] =
+ // SAFETY: The slice length (6) is a multiple of 3
+ unsafe { slice.as_chunks_unchecked_mut() };
+chunks[1] = ['a', 'x', '?'];
+assert_eq!(slice, &['L', 'o', 'r', 'a', 'x', '?']);
+
+// These would be unsound:
+// let chunks: &[[_; 5]] = slice.as_chunks_unchecked_mut() // The slice length is not a multiple of 5
+// let chunks: &[[_; 0]] = slice.as_chunks_unchecked_mut() // Zero-length chunks are never allowed
slice_as_chunks
)Splits the slice into a slice of N
-element arrays,
+starting at the beginning of the slice,
+and a remainder slice with length strictly less than N
.
Panics if N
is 0. This check will most probably get changed to a compile time
+error before this method gets stabilized.
#![feature(slice_as_chunks)]
+let v = &mut [0, 0, 0, 0, 0];
+let mut count = 1;
+
+let (chunks, remainder) = v.as_chunks_mut();
+remainder[0] = 9;
+for chunk in chunks {
+ *chunk = [count; 2];
+ count += 1;
+}
+assert_eq!(v, &[1, 1, 2, 2, 9]);
slice_as_chunks
)Splits the slice into a slice of N
-element arrays,
+starting at the end of the slice,
+and a remainder slice with length strictly less than N
.
Panics if N
is 0. This check will most probably get changed to a compile time
+error before this method gets stabilized.
#![feature(slice_as_chunks)]
+let v = &mut [0, 0, 0, 0, 0];
+let mut count = 1;
+
+let (remainder, chunks) = v.as_rchunks_mut();
+remainder[0] = 9;
+for chunk in chunks {
+ *chunk = [count; 2];
+ count += 1;
+}
+assert_eq!(v, &[9, 1, 1, 2, 2]);
array_chunks
)Returns an iterator over N
elements of the slice at a time, starting at the
+beginning of the slice.
The chunks are mutable array references and do not overlap. If N
does not divide
+the length of the slice, then the last up to N-1
elements will be omitted and
+can be retrieved from the into_remainder
function of the iterator.
This method is the const generic equivalent of chunks_exact_mut
.
Panics if N
is 0. This check will most probably get changed to a compile time
+error before this method gets stabilized.
#![feature(array_chunks)]
+let v = &mut [0, 0, 0, 0, 0];
+let mut count = 1;
+
+for chunk in v.array_chunks_mut() {
+ *chunk = [count; 2];
+ count += 1;
+}
+assert_eq!(v, &[1, 1, 2, 2, 0]);
array_windows
)Returns an iterator over overlapping windows of N
elements of a slice,
+starting at the beginning of the slice.
This is the const generic equivalent of windows
.
If N
is greater than the size of the slice, it will return no windows.
Panics if N
is 0. This check will most probably get changed to a compile time
+error before this method gets stabilized.
#![feature(array_windows)]
+let slice = [0, 1, 2, 3];
+let mut iter = slice.array_windows();
+assert_eq!(iter.next().unwrap(), &[0, 1]);
+assert_eq!(iter.next().unwrap(), &[1, 2]);
+assert_eq!(iter.next().unwrap(), &[2, 3]);
+assert!(iter.next().is_none());
Returns an iterator over chunk_size
elements of the slice at a time, starting at the end
+of the slice.
The chunks are slices and do not overlap. If chunk_size
does not divide the length of the
+slice, then the last chunk will not have length chunk_size
.
See rchunks_exact
for a variant of this iterator that returns chunks of always exactly
+chunk_size
elements, and chunks
for the same iterator but starting at the beginning
+of the slice.
Panics if chunk_size
is 0.
let slice = ['l', 'o', 'r', 'e', 'm'];
+let mut iter = slice.rchunks(2);
+assert_eq!(iter.next().unwrap(), &['e', 'm']);
+assert_eq!(iter.next().unwrap(), &['o', 'r']);
+assert_eq!(iter.next().unwrap(), &['l']);
+assert!(iter.next().is_none());
Returns an iterator over chunk_size
elements of the slice at a time, starting at the end
+of the slice.
The chunks are mutable slices, and do not overlap. If chunk_size
does not divide the
+length of the slice, then the last chunk will not have length chunk_size
.
See rchunks_exact_mut
for a variant of this iterator that returns chunks of always
+exactly chunk_size
elements, and chunks_mut
for the same iterator but starting at the
+beginning of the slice.
Panics if chunk_size
is 0.
let v = &mut [0, 0, 0, 0, 0];
+let mut count = 1;
+
+for chunk in v.rchunks_mut(2) {
+ for elem in chunk.iter_mut() {
+ *elem += count;
+ }
+ count += 1;
+}
+assert_eq!(v, &[3, 2, 2, 1, 1]);
Returns an iterator over chunk_size
elements of the slice at a time, starting at the
+end of the slice.
The chunks are slices and do not overlap. If chunk_size
does not divide the length of the
+slice, then the last up to chunk_size-1
elements will be omitted and can be retrieved
+from the remainder
function of the iterator.
Due to each chunk having exactly chunk_size
elements, the compiler can often optimize the
+resulting code better than in the case of rchunks
.
See rchunks
for a variant of this iterator that also returns the remainder as a smaller
+chunk, and chunks_exact
for the same iterator but starting at the beginning of the
+slice.
Panics if chunk_size
is 0.
let slice = ['l', 'o', 'r', 'e', 'm'];
+let mut iter = slice.rchunks_exact(2);
+assert_eq!(iter.next().unwrap(), &['e', 'm']);
+assert_eq!(iter.next().unwrap(), &['o', 'r']);
+assert!(iter.next().is_none());
+assert_eq!(iter.remainder(), &['l']);
Returns an iterator over chunk_size
elements of the slice at a time, starting at the end
+of the slice.
The chunks are mutable slices, and do not overlap. If chunk_size
does not divide the
+length of the slice, then the last up to chunk_size-1
elements will be omitted and can be
+retrieved from the into_remainder
function of the iterator.
Due to each chunk having exactly chunk_size
elements, the compiler can often optimize the
+resulting code better than in the case of chunks_mut
.
See rchunks_mut
for a variant of this iterator that also returns the remainder as a
+smaller chunk, and chunks_exact_mut
for the same iterator but starting at the beginning
+of the slice.
Panics if chunk_size
is 0.
let v = &mut [0, 0, 0, 0, 0];
+let mut count = 1;
+
+for chunk in v.rchunks_exact_mut(2) {
+ for elem in chunk.iter_mut() {
+ *elem += count;
+ }
+ count += 1;
+}
+assert_eq!(v, &[0, 2, 2, 1, 1]);
slice_group_by
)Returns an iterator over the slice producing non-overlapping runs +of elements using the predicate to separate them.
+The predicate is called on two elements following themselves,
+it means the predicate is called on slice[0]
and slice[1]
+then on slice[1]
and slice[2]
and so on.
#![feature(slice_group_by)]
+
+let slice = &[1, 1, 1, 3, 3, 2, 2, 2];
+
+let mut iter = slice.group_by(|a, b| a == b);
+
+assert_eq!(iter.next(), Some(&[1, 1, 1][..]));
+assert_eq!(iter.next(), Some(&[3, 3][..]));
+assert_eq!(iter.next(), Some(&[2, 2, 2][..]));
+assert_eq!(iter.next(), None);
This method can be used to extract the sorted subslices:
+ +#![feature(slice_group_by)]
+
+let slice = &[1, 1, 2, 3, 2, 3, 2, 3, 4];
+
+let mut iter = slice.group_by(|a, b| a <= b);
+
+assert_eq!(iter.next(), Some(&[1, 1, 2, 3][..]));
+assert_eq!(iter.next(), Some(&[2, 3][..]));
+assert_eq!(iter.next(), Some(&[2, 3, 4][..]));
+assert_eq!(iter.next(), None);
slice_group_by
)Returns an iterator over the slice producing non-overlapping mutable +runs of elements using the predicate to separate them.
+The predicate is called on two elements following themselves,
+it means the predicate is called on slice[0]
and slice[1]
+then on slice[1]
and slice[2]
and so on.
#![feature(slice_group_by)]
+
+let slice = &mut [1, 1, 1, 3, 3, 2, 2, 2];
+
+let mut iter = slice.group_by_mut(|a, b| a == b);
+
+assert_eq!(iter.next(), Some(&mut [1, 1, 1][..]));
+assert_eq!(iter.next(), Some(&mut [3, 3][..]));
+assert_eq!(iter.next(), Some(&mut [2, 2, 2][..]));
+assert_eq!(iter.next(), None);
This method can be used to extract the sorted subslices:
+ +#![feature(slice_group_by)]
+
+let slice = &mut [1, 1, 2, 3, 2, 3, 2, 3, 4];
+
+let mut iter = slice.group_by_mut(|a, b| a <= b);
+
+assert_eq!(iter.next(), Some(&mut [1, 1, 2, 3][..]));
+assert_eq!(iter.next(), Some(&mut [2, 3][..]));
+assert_eq!(iter.next(), Some(&mut [2, 3, 4][..]));
+assert_eq!(iter.next(), None);
Divides one slice into two at an index.
+The first will contain all indices from [0, mid)
(excluding
+the index mid
itself) and the second will contain all
+indices from [mid, len)
(excluding the index len
itself).
Panics if mid > len
.
let v = [1, 2, 3, 4, 5, 6];
+
+{
+ let (left, right) = v.split_at(0);
+ assert_eq!(left, []);
+ assert_eq!(right, [1, 2, 3, 4, 5, 6]);
+}
+
+{
+ let (left, right) = v.split_at(2);
+ assert_eq!(left, [1, 2]);
+ assert_eq!(right, [3, 4, 5, 6]);
+}
+
+{
+ let (left, right) = v.split_at(6);
+ assert_eq!(left, [1, 2, 3, 4, 5, 6]);
+ assert_eq!(right, []);
+}
Divides one mutable slice into two at an index.
+The first will contain all indices from [0, mid)
(excluding
+the index mid
itself) and the second will contain all
+indices from [mid, len)
(excluding the index len
itself).
Panics if mid > len
.
let mut v = [1, 0, 3, 0, 5, 6];
+let (left, right) = v.split_at_mut(2);
+assert_eq!(left, [1, 0]);
+assert_eq!(right, [3, 0, 5, 6]);
+left[1] = 2;
+right[1] = 4;
+assert_eq!(v, [1, 2, 3, 4, 5, 6]);
slice_split_at_unchecked
)Divides one slice into two at an index, without doing bounds checking.
+The first will contain all indices from [0, mid)
(excluding
+the index mid
itself) and the second will contain all
+indices from [mid, len)
(excluding the index len
itself).
For a safe alternative see split_at
.
Calling this method with an out-of-bounds index is undefined behavior
+even if the resulting reference is not used. The caller has to ensure that
+0 <= mid <= self.len()
.
#![feature(slice_split_at_unchecked)]
+
+let v = [1, 2, 3, 4, 5, 6];
+
+unsafe {
+ let (left, right) = v.split_at_unchecked(0);
+ assert_eq!(left, []);
+ assert_eq!(right, [1, 2, 3, 4, 5, 6]);
+}
+
+unsafe {
+ let (left, right) = v.split_at_unchecked(2);
+ assert_eq!(left, [1, 2]);
+ assert_eq!(right, [3, 4, 5, 6]);
+}
+
+unsafe {
+ let (left, right) = v.split_at_unchecked(6);
+ assert_eq!(left, [1, 2, 3, 4, 5, 6]);
+ assert_eq!(right, []);
+}
slice_split_at_unchecked
)Divides one mutable slice into two at an index, without doing bounds checking.
+The first will contain all indices from [0, mid)
(excluding
+the index mid
itself) and the second will contain all
+indices from [mid, len)
(excluding the index len
itself).
For a safe alternative see split_at_mut
.
Calling this method with an out-of-bounds index is undefined behavior
+even if the resulting reference is not used. The caller has to ensure that
+0 <= mid <= self.len()
.
#![feature(slice_split_at_unchecked)]
+
+let mut v = [1, 0, 3, 0, 5, 6];
+// scoped to restrict the lifetime of the borrows
+unsafe {
+ let (left, right) = v.split_at_mut_unchecked(2);
+ assert_eq!(left, [1, 0]);
+ assert_eq!(right, [3, 0, 5, 6]);
+ left[1] = 2;
+ right[1] = 4;
+}
+assert_eq!(v, [1, 2, 3, 4, 5, 6]);
split_array
)Divides one slice into an array and a remainder slice at an index.
+The array will contain all indices from [0, N)
(excluding
+the index N
itself) and the slice will contain all
+indices from [N, len)
(excluding the index len
itself).
Panics if N > len
.
#![feature(split_array)]
+
+let v = &[1, 2, 3, 4, 5, 6][..];
+
+{
+ let (left, right) = v.split_array_ref::<0>();
+ assert_eq!(left, &[]);
+ assert_eq!(right, [1, 2, 3, 4, 5, 6]);
+}
+
+{
+ let (left, right) = v.split_array_ref::<2>();
+ assert_eq!(left, &[1, 2]);
+ assert_eq!(right, [3, 4, 5, 6]);
+}
+
+{
+ let (left, right) = v.split_array_ref::<6>();
+ assert_eq!(left, &[1, 2, 3, 4, 5, 6]);
+ assert_eq!(right, []);
+}
split_array
)Divides one mutable slice into an array and a remainder slice at an index.
+The array will contain all indices from [0, N)
(excluding
+the index N
itself) and the slice will contain all
+indices from [N, len)
(excluding the index len
itself).
Panics if N > len
.
#![feature(split_array)]
+
+let mut v = &mut [1, 0, 3, 0, 5, 6][..];
+let (left, right) = v.split_array_mut::<2>();
+assert_eq!(left, &mut [1, 0]);
+assert_eq!(right, [3, 0, 5, 6]);
+left[1] = 2;
+right[1] = 4;
+assert_eq!(v, [1, 2, 3, 4, 5, 6]);
split_array
)Divides one slice into an array and a remainder slice at an index from +the end.
+The slice will contain all indices from [0, len - N)
(excluding
+the index len - N
itself) and the array will contain all
+indices from [len - N, len)
(excluding the index len
itself).
Panics if N > len
.
#![feature(split_array)]
+
+let v = &[1, 2, 3, 4, 5, 6][..];
+
+{
+ let (left, right) = v.rsplit_array_ref::<0>();
+ assert_eq!(left, [1, 2, 3, 4, 5, 6]);
+ assert_eq!(right, &[]);
+}
+
+{
+ let (left, right) = v.rsplit_array_ref::<2>();
+ assert_eq!(left, [1, 2, 3, 4]);
+ assert_eq!(right, &[5, 6]);
+}
+
+{
+ let (left, right) = v.rsplit_array_ref::<6>();
+ assert_eq!(left, []);
+ assert_eq!(right, &[1, 2, 3, 4, 5, 6]);
+}
split_array
)Divides one mutable slice into an array and a remainder slice at an +index from the end.
+The slice will contain all indices from [0, len - N)
(excluding
+the index N
itself) and the array will contain all
+indices from [len - N, len)
(excluding the index len
itself).
Panics if N > len
.
#![feature(split_array)]
+
+let mut v = &mut [1, 0, 3, 0, 5, 6][..];
+let (left, right) = v.rsplit_array_mut::<4>();
+assert_eq!(left, [1, 0]);
+assert_eq!(right, &mut [3, 0, 5, 6]);
+left[1] = 2;
+right[1] = 4;
+assert_eq!(v, [1, 2, 3, 4, 5, 6]);
Returns an iterator over subslices separated by elements that match
+pred
. The matched element is not contained in the subslices.
let slice = [10, 40, 33, 20];
+let mut iter = slice.split(|num| num % 3 == 0);
+
+assert_eq!(iter.next().unwrap(), &[10, 40]);
+assert_eq!(iter.next().unwrap(), &[20]);
+assert!(iter.next().is_none());
If the first element is matched, an empty slice will be the first item +returned by the iterator. Similarly, if the last element in the slice +is matched, an empty slice will be the last item returned by the +iterator:
+ +let slice = [10, 40, 33];
+let mut iter = slice.split(|num| num % 3 == 0);
+
+assert_eq!(iter.next().unwrap(), &[10, 40]);
+assert_eq!(iter.next().unwrap(), &[]);
+assert!(iter.next().is_none());
If two matched elements are directly adjacent, an empty slice will be +present between them:
+ +let slice = [10, 6, 33, 20];
+let mut iter = slice.split(|num| num % 3 == 0);
+
+assert_eq!(iter.next().unwrap(), &[10]);
+assert_eq!(iter.next().unwrap(), &[]);
+assert_eq!(iter.next().unwrap(), &[20]);
+assert!(iter.next().is_none());
Returns an iterator over mutable subslices separated by elements that
+match pred
. The matched element is not contained in the subslices.
let mut v = [10, 40, 30, 20, 60, 50];
+
+for group in v.split_mut(|num| *num % 3 == 0) {
+ group[0] = 1;
+}
+assert_eq!(v, [1, 40, 30, 1, 60, 1]);
Returns an iterator over subslices separated by elements that match
+pred
. The matched element is contained in the end of the previous
+subslice as a terminator.
let slice = [10, 40, 33, 20];
+let mut iter = slice.split_inclusive(|num| num % 3 == 0);
+
+assert_eq!(iter.next().unwrap(), &[10, 40, 33]);
+assert_eq!(iter.next().unwrap(), &[20]);
+assert!(iter.next().is_none());
If the last element of the slice is matched, +that element will be considered the terminator of the preceding slice. +That slice will be the last item returned by the iterator.
+ +let slice = [3, 10, 40, 33];
+let mut iter = slice.split_inclusive(|num| num % 3 == 0);
+
+assert_eq!(iter.next().unwrap(), &[3]);
+assert_eq!(iter.next().unwrap(), &[10, 40, 33]);
+assert!(iter.next().is_none());
Returns an iterator over mutable subslices separated by elements that
+match pred
. The matched element is contained in the previous
+subslice as a terminator.
let mut v = [10, 40, 30, 20, 60, 50];
+
+for group in v.split_inclusive_mut(|num| *num % 3 == 0) {
+ let terminator_idx = group.len()-1;
+ group[terminator_idx] = 1;
+}
+assert_eq!(v, [10, 40, 1, 20, 1, 1]);
Returns an iterator over subslices separated by elements that match
+pred
, starting at the end of the slice and working backwards.
+The matched element is not contained in the subslices.
let slice = [11, 22, 33, 0, 44, 55];
+let mut iter = slice.rsplit(|num| *num == 0);
+
+assert_eq!(iter.next().unwrap(), &[44, 55]);
+assert_eq!(iter.next().unwrap(), &[11, 22, 33]);
+assert_eq!(iter.next(), None);
As with split()
, if the first or last element is matched, an empty
+slice will be the first (or last) item returned by the iterator.
let v = &[0, 1, 1, 2, 3, 5, 8];
+let mut it = v.rsplit(|n| *n % 2 == 0);
+assert_eq!(it.next().unwrap(), &[]);
+assert_eq!(it.next().unwrap(), &[3, 5]);
+assert_eq!(it.next().unwrap(), &[1, 1]);
+assert_eq!(it.next().unwrap(), &[]);
+assert_eq!(it.next(), None);
Returns an iterator over mutable subslices separated by elements that
+match pred
, starting at the end of the slice and working
+backwards. The matched element is not contained in the subslices.
let mut v = [100, 400, 300, 200, 600, 500];
+
+let mut count = 0;
+for group in v.rsplit_mut(|num| *num % 3 == 0) {
+ count += 1;
+ group[0] = count;
+}
+assert_eq!(v, [3, 400, 300, 2, 600, 1]);
Returns an iterator over subslices separated by elements that match
+pred
, limited to returning at most n
items. The matched element is
+not contained in the subslices.
The last element returned, if any, will contain the remainder of the +slice.
+Print the slice split once by numbers divisible by 3 (i.e., [10, 40]
,
+[20, 60, 50]
):
let v = [10, 40, 30, 20, 60, 50];
+
+for group in v.splitn(2, |num| *num % 3 == 0) {
+ println!("{group:?}");
+}
Returns an iterator over mutable subslices separated by elements that match
+pred
, limited to returning at most n
items. The matched element is
+not contained in the subslices.
The last element returned, if any, will contain the remainder of the +slice.
+let mut v = [10, 40, 30, 20, 60, 50];
+
+for group in v.splitn_mut(2, |num| *num % 3 == 0) {
+ group[0] = 1;
+}
+assert_eq!(v, [1, 40, 30, 1, 60, 50]);
Returns an iterator over subslices separated by elements that match
+pred
limited to returning at most n
items. This starts at the end of
+the slice and works backwards. The matched element is not contained in
+the subslices.
The last element returned, if any, will contain the remainder of the +slice.
+Print the slice split once, starting from the end, by numbers divisible
+by 3 (i.e., [50]
, [10, 40, 30, 20]
):
let v = [10, 40, 30, 20, 60, 50];
+
+for group in v.rsplitn(2, |num| *num % 3 == 0) {
+ println!("{group:?}");
+}
Returns an iterator over subslices separated by elements that match
+pred
limited to returning at most n
items. This starts at the end of
+the slice and works backwards. The matched element is not contained in
+the subslices.
The last element returned, if any, will contain the remainder of the +slice.
+let mut s = [10, 40, 30, 20, 60, 50];
+
+for group in s.rsplitn_mut(2, |num| *num % 3 == 0) {
+ group[0] = 1;
+}
+assert_eq!(s, [1, 40, 30, 20, 60, 1]);
Returns true
if the slice contains an element with the given value.
This operation is O(n).
+Note that if you have a sorted slice, binary_search
may be faster.
let v = [10, 40, 30];
+assert!(v.contains(&30));
+assert!(!v.contains(&50));
If you do not have a &T
, but some other value that you can compare
+with one (for example, String
implements PartialEq<str>
), you can
+use iter().any
:
let v = [String::from("hello"), String::from("world")]; // slice of `String`
+assert!(v.iter().any(|e| e == "hello")); // search with `&str`
+assert!(!v.iter().any(|e| e == "hi"));
Returns true
if needle
is a prefix of the slice.
let v = [10, 40, 30];
+assert!(v.starts_with(&[10]));
+assert!(v.starts_with(&[10, 40]));
+assert!(!v.starts_with(&[50]));
+assert!(!v.starts_with(&[10, 50]));
Always returns true
if needle
is an empty slice:
let v = &[10, 40, 30];
+assert!(v.starts_with(&[]));
+let v: &[u8] = &[];
+assert!(v.starts_with(&[]));
Returns true
if needle
is a suffix of the slice.
let v = [10, 40, 30];
+assert!(v.ends_with(&[30]));
+assert!(v.ends_with(&[40, 30]));
+assert!(!v.ends_with(&[50]));
+assert!(!v.ends_with(&[50, 30]));
Always returns true
if needle
is an empty slice:
let v = &[10, 40, 30];
+assert!(v.ends_with(&[]));
+let v: &[u8] = &[];
+assert!(v.ends_with(&[]));
Returns a subslice with the prefix removed.
+If the slice starts with prefix
, returns the subslice after the prefix, wrapped in Some
.
+If prefix
is empty, simply returns the original slice.
If the slice does not start with prefix
, returns None
.
let v = &[10, 40, 30];
+assert_eq!(v.strip_prefix(&[10]), Some(&[40, 30][..]));
+assert_eq!(v.strip_prefix(&[10, 40]), Some(&[30][..]));
+assert_eq!(v.strip_prefix(&[50]), None);
+assert_eq!(v.strip_prefix(&[10, 50]), None);
+
+let prefix : &str = "he";
+assert_eq!(b"hello".strip_prefix(prefix.as_bytes()),
+ Some(b"llo".as_ref()));
Returns a subslice with the suffix removed.
+If the slice ends with suffix
, returns the subslice before the suffix, wrapped in Some
.
+If suffix
is empty, simply returns the original slice.
If the slice does not end with suffix
, returns None
.
let v = &[10, 40, 30];
+assert_eq!(v.strip_suffix(&[30]), Some(&[10, 40][..]));
+assert_eq!(v.strip_suffix(&[40, 30]), Some(&[10][..]));
+assert_eq!(v.strip_suffix(&[50]), None);
+assert_eq!(v.strip_suffix(&[50, 30]), None);
Binary searches this slice for a given element.
+This behaves similarly to contains
if this slice is sorted.
If the value is found then Result::Ok
is returned, containing the
+index of the matching element. If there are multiple matches, then any
+one of the matches could be returned. The index is chosen
+deterministically, but is subject to change in future versions of Rust.
+If the value is not found then Result::Err
is returned, containing
+the index where a matching element could be inserted while maintaining
+sorted order.
See also binary_search_by
, binary_search_by_key
, and partition_point
.
Looks up a series of four elements. The first is found, with a
+uniquely determined position; the second and third are not
+found; the fourth could match any position in [1, 4]
.
let s = [0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55];
+
+assert_eq!(s.binary_search(&13), Ok(9));
+assert_eq!(s.binary_search(&4), Err(7));
+assert_eq!(s.binary_search(&100), Err(13));
+let r = s.binary_search(&1);
+assert!(match r { Ok(1..=4) => true, _ => false, });
If you want to find that whole range of matching items, rather than
+an arbitrary matching one, that can be done using partition_point
:
let s = [0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55];
+
+let low = s.partition_point(|x| x < &1);
+assert_eq!(low, 1);
+let high = s.partition_point(|x| x <= &1);
+assert_eq!(high, 5);
+let r = s.binary_search(&1);
+assert!((low..high).contains(&r.unwrap()));
+
+assert!(s[..low].iter().all(|&x| x < 1));
+assert!(s[low..high].iter().all(|&x| x == 1));
+assert!(s[high..].iter().all(|&x| x > 1));
+
+// For something not found, the "range" of equal items is empty
+assert_eq!(s.partition_point(|x| x < &11), 9);
+assert_eq!(s.partition_point(|x| x <= &11), 9);
+assert_eq!(s.binary_search(&11), Err(9));
If you want to insert an item to a sorted vector, while maintaining
+sort order, consider using partition_point
:
let mut s = vec![0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55];
+let num = 42;
+let idx = s.partition_point(|&x| x < num);
+// The above is equivalent to `let idx = s.binary_search(&num).unwrap_or_else(|x| x);`
+s.insert(idx, num);
+assert_eq!(s, [0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 42, 55]);
Binary searches this slice with a comparator function.
+This behaves similarly to contains
if this slice is sorted.
The comparator function should implement an order consistent
+with the sort order of the underlying slice, returning an
+order code that indicates whether its argument is Less
,
+Equal
or Greater
the desired target.
If the value is found then Result::Ok
is returned, containing the
+index of the matching element. If there are multiple matches, then any
+one of the matches could be returned. The index is chosen
+deterministically, but is subject to change in future versions of Rust.
+If the value is not found then Result::Err
is returned, containing
+the index where a matching element could be inserted while maintaining
+sorted order.
See also binary_search
, binary_search_by_key
, and partition_point
.
Looks up a series of four elements. The first is found, with a
+uniquely determined position; the second and third are not
+found; the fourth could match any position in [1, 4]
.
let s = [0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55];
+
+let seek = 13;
+assert_eq!(s.binary_search_by(|probe| probe.cmp(&seek)), Ok(9));
+let seek = 4;
+assert_eq!(s.binary_search_by(|probe| probe.cmp(&seek)), Err(7));
+let seek = 100;
+assert_eq!(s.binary_search_by(|probe| probe.cmp(&seek)), Err(13));
+let seek = 1;
+let r = s.binary_search_by(|probe| probe.cmp(&seek));
+assert!(match r { Ok(1..=4) => true, _ => false, });
Binary searches this slice with a key extraction function.
+This behaves similarly to contains
if this slice is sorted.
Assumes that the slice is sorted by the key, for instance with
+sort_by_key
using the same key extraction function.
If the value is found then Result::Ok
is returned, containing the
+index of the matching element. If there are multiple matches, then any
+one of the matches could be returned. The index is chosen
+deterministically, but is subject to change in future versions of Rust.
+If the value is not found then Result::Err
is returned, containing
+the index where a matching element could be inserted while maintaining
+sorted order.
See also binary_search
, binary_search_by
, and partition_point
.
Looks up a series of four elements in a slice of pairs sorted by
+their second elements. The first is found, with a uniquely
+determined position; the second and third are not found; the
+fourth could match any position in [1, 4]
.
let s = [(0, 0), (2, 1), (4, 1), (5, 1), (3, 1),
+ (1, 2), (2, 3), (4, 5), (5, 8), (3, 13),
+ (1, 21), (2, 34), (4, 55)];
+
+assert_eq!(s.binary_search_by_key(&13, |&(a, b)| b), Ok(9));
+assert_eq!(s.binary_search_by_key(&4, |&(a, b)| b), Err(7));
+assert_eq!(s.binary_search_by_key(&100, |&(a, b)| b), Err(13));
+let r = s.binary_search_by_key(&1, |&(a, b)| b);
+assert!(match r { Ok(1..=4) => true, _ => false, });
Sorts the slice, but might not preserve the order of equal elements.
+This sort is unstable (i.e., may reorder equal elements), in-place +(i.e., does not allocate), and O(n * log(n)) worst-case.
+The current algorithm is based on pattern-defeating quicksort by Orson Peters, +which combines the fast average case of randomized quicksort with the fast worst case of +heapsort, while achieving linear time on slices with certain patterns. It uses some +randomization to avoid degenerate cases, but with a fixed seed to always provide +deterministic behavior.
+It is typically faster than stable sorting, except in a few special cases, e.g., when the +slice consists of several concatenated sorted sequences.
+let mut v = [-5, 4, 1, -3, 2];
+
+v.sort_unstable();
+assert!(v == [-5, -3, 1, 2, 4]);
Sorts the slice with a comparator function, but might not preserve the order of equal +elements.
+This sort is unstable (i.e., may reorder equal elements), in-place +(i.e., does not allocate), and O(n * log(n)) worst-case.
+The comparator function must define a total ordering for the elements in the slice. If
+the ordering is not total, the order of the elements is unspecified. An order is a
+total order if it is (for all a
, b
and c
):
a < b
, a == b
or a > b
is true, anda < b
and b < c
implies a < c
. The same must hold for both ==
and >
.For example, while f64
doesn’t implement Ord
because NaN != NaN
, we can use
+partial_cmp
as our sort function when we know the slice doesn’t contain a NaN
.
let mut floats = [5f64, 4.0, 1.0, 3.0, 2.0];
+floats.sort_unstable_by(|a, b| a.partial_cmp(b).unwrap());
+assert_eq!(floats, [1.0, 2.0, 3.0, 4.0, 5.0]);
The current algorithm is based on pattern-defeating quicksort by Orson Peters, +which combines the fast average case of randomized quicksort with the fast worst case of +heapsort, while achieving linear time on slices with certain patterns. It uses some +randomization to avoid degenerate cases, but with a fixed seed to always provide +deterministic behavior.
+It is typically faster than stable sorting, except in a few special cases, e.g., when the +slice consists of several concatenated sorted sequences.
+let mut v = [5, 4, 1, 3, 2];
+v.sort_unstable_by(|a, b| a.cmp(b));
+assert!(v == [1, 2, 3, 4, 5]);
+
+// reverse sorting
+v.sort_unstable_by(|a, b| b.cmp(a));
+assert!(v == [5, 4, 3, 2, 1]);
Sorts the slice with a key extraction function, but might not preserve the order of equal +elements.
+This sort is unstable (i.e., may reorder equal elements), in-place +(i.e., does not allocate), and O(m * n * log(n)) worst-case, where the key function is +O(m).
+The current algorithm is based on pattern-defeating quicksort by Orson Peters, +which combines the fast average case of randomized quicksort with the fast worst case of +heapsort, while achieving linear time on slices with certain patterns. It uses some +randomization to avoid degenerate cases, but with a fixed seed to always provide +deterministic behavior.
+Due to its key calling strategy, sort_unstable_by_key
+is likely to be slower than sort_by_cached_key
in
+cases where the key function is expensive.
let mut v = [-5i32, 4, 1, -3, 2];
+
+v.sort_unstable_by_key(|k| k.abs());
+assert!(v == [1, 2, -3, 4, -5]);
Reorder the slice such that the element at index
is at its final sorted position.
This reordering has the additional property that any value at position i < index
will be
+less than or equal to any value at a position j > index
. Additionally, this reordering is
+unstable (i.e. any number of equal elements may end up at position index
), in-place
+(i.e. does not allocate), and O(n) worst-case. This function is also/ known as “kth
+element” in other libraries. It returns a triplet of the following from the reordered slice:
+the subslice prior to index
, the element at index
, and the subslice after index
;
+accordingly, the values in those two subslices will respectively all be less-than-or-equal-to
+and greater-than-or-equal-to the value of the element at index
.
The current algorithm is based on the quickselect portion of the same quicksort algorithm
+used for sort_unstable
.
Panics when index >= len()
, meaning it always panics on empty slices.
let mut v = [-5i32, 4, 1, -3, 2];
+
+// Find the median
+v.select_nth_unstable(2);
+
+// We are only guaranteed the slice will be one of the following, based on the way we sort
+// about the specified index.
+assert!(v == [-3, -5, 1, 2, 4] ||
+ v == [-5, -3, 1, 2, 4] ||
+ v == [-3, -5, 1, 4, 2] ||
+ v == [-5, -3, 1, 4, 2]);
Reorder the slice with a comparator function such that the element at index
is at its
+final sorted position.
This reordering has the additional property that any value at position i < index
will be
+less than or equal to any value at a position j > index
using the comparator function.
+Additionally, this reordering is unstable (i.e. any number of equal elements may end up at
+position index
), in-place (i.e. does not allocate), and O(n) worst-case. This function
+is also known as “kth element” in other libraries. It returns a triplet of the following from
+the slice reordered according to the provided comparator function: the subslice prior to
+index
, the element at index
, and the subslice after index
; accordingly, the values in
+those two subslices will respectively all be less-than-or-equal-to and greater-than-or-equal-to
+the value of the element at index
.
The current algorithm is based on the quickselect portion of the same quicksort algorithm
+used for sort_unstable
.
Panics when index >= len()
, meaning it always panics on empty slices.
let mut v = [-5i32, 4, 1, -3, 2];
+
+// Find the median as if the slice were sorted in descending order.
+v.select_nth_unstable_by(2, |a, b| b.cmp(a));
+
+// We are only guaranteed the slice will be one of the following, based on the way we sort
+// about the specified index.
+assert!(v == [2, 4, 1, -5, -3] ||
+ v == [2, 4, 1, -3, -5] ||
+ v == [4, 2, 1, -5, -3] ||
+ v == [4, 2, 1, -3, -5]);
Reorder the slice with a key extraction function such that the element at index
is at its
+final sorted position.
This reordering has the additional property that any value at position i < index
will be
+less than or equal to any value at a position j > index
using the key extraction function.
+Additionally, this reordering is unstable (i.e. any number of equal elements may end up at
+position index
), in-place (i.e. does not allocate), and O(n) worst-case. This function
+is also known as “kth element” in other libraries. It returns a triplet of the following from
+the slice reordered according to the provided key extraction function: the subslice prior to
+index
, the element at index
, and the subslice after index
; accordingly, the values in
+those two subslices will respectively all be less-than-or-equal-to and greater-than-or-equal-to
+the value of the element at index
.
The current algorithm is based on the quickselect portion of the same quicksort algorithm
+used for sort_unstable
.
Panics when index >= len()
, meaning it always panics on empty slices.
let mut v = [-5i32, 4, 1, -3, 2];
+
+// Return the median as if the array were sorted according to absolute value.
+v.select_nth_unstable_by_key(2, |a| a.abs());
+
+// We are only guaranteed the slice will be one of the following, based on the way we sort
+// about the specified index.
+assert!(v == [1, 2, -3, 4, -5] ||
+ v == [1, 2, -3, -5, 4] ||
+ v == [2, 1, -3, 4, -5] ||
+ v == [2, 1, -3, -5, 4]);
slice_partition_dedup
)Moves all consecutive repeated elements to the end of the slice according to the
+PartialEq
trait implementation.
Returns two slices. The first contains no consecutive repeated elements. +The second contains all the duplicates in no specified order.
+If the slice is sorted, the first returned slice contains no duplicates.
+#![feature(slice_partition_dedup)]
+
+let mut slice = [1, 2, 2, 3, 3, 2, 1, 1];
+
+let (dedup, duplicates) = slice.partition_dedup();
+
+assert_eq!(dedup, [1, 2, 3, 2, 1]);
+assert_eq!(duplicates, [2, 3, 1]);
slice_partition_dedup
)Moves all but the first of consecutive elements to the end of the slice satisfying +a given equality relation.
+Returns two slices. The first contains no consecutive repeated elements. +The second contains all the duplicates in no specified order.
+The same_bucket
function is passed references to two elements from the slice and
+must determine if the elements compare equal. The elements are passed in opposite order
+from their order in the slice, so if same_bucket(a, b)
returns true
, a
is moved
+at the end of the slice.
If the slice is sorted, the first returned slice contains no duplicates.
+#![feature(slice_partition_dedup)]
+
+let mut slice = ["foo", "Foo", "BAZ", "Bar", "bar", "baz", "BAZ"];
+
+let (dedup, duplicates) = slice.partition_dedup_by(|a, b| a.eq_ignore_ascii_case(b));
+
+assert_eq!(dedup, ["foo", "BAZ", "Bar", "baz"]);
+assert_eq!(duplicates, ["bar", "Foo", "BAZ"]);
slice_partition_dedup
)Moves all but the first of consecutive elements to the end of the slice that resolve +to the same key.
+Returns two slices. The first contains no consecutive repeated elements. +The second contains all the duplicates in no specified order.
+If the slice is sorted, the first returned slice contains no duplicates.
+#![feature(slice_partition_dedup)]
+
+let mut slice = [10, 20, 21, 30, 30, 20, 11, 13];
+
+let (dedup, duplicates) = slice.partition_dedup_by_key(|i| *i / 10);
+
+assert_eq!(dedup, [10, 20, 30, 20, 11]);
+assert_eq!(duplicates, [21, 30, 13]);
Rotates the slice in-place such that the first mid
elements of the
+slice move to the end while the last self.len() - mid
elements move to
+the front. After calling rotate_left
, the element previously at index
+mid
will become the first element in the slice.
This function will panic if mid
is greater than the length of the
+slice. Note that mid == self.len()
does not panic and is a no-op
+rotation.
Takes linear (in self.len()
) time.
let mut a = ['a', 'b', 'c', 'd', 'e', 'f'];
+a.rotate_left(2);
+assert_eq!(a, ['c', 'd', 'e', 'f', 'a', 'b']);
Rotating a subslice:
+ +let mut a = ['a', 'b', 'c', 'd', 'e', 'f'];
+a[1..5].rotate_left(1);
+assert_eq!(a, ['a', 'c', 'd', 'e', 'b', 'f']);
Rotates the slice in-place such that the first self.len() - k
+elements of the slice move to the end while the last k
elements move
+to the front. After calling rotate_right
, the element previously at
+index self.len() - k
will become the first element in the slice.
This function will panic if k
is greater than the length of the
+slice. Note that k == self.len()
does not panic and is a no-op
+rotation.
Takes linear (in self.len()
) time.
let mut a = ['a', 'b', 'c', 'd', 'e', 'f'];
+a.rotate_right(2);
+assert_eq!(a, ['e', 'f', 'a', 'b', 'c', 'd']);
Rotate a subslice:
+ +let mut a = ['a', 'b', 'c', 'd', 'e', 'f'];
+a[1..5].rotate_right(1);
+assert_eq!(a, ['a', 'e', 'b', 'c', 'd', 'f']);
Fills self
with elements by cloning value
.
let mut buf = vec![0; 10];
+buf.fill(1);
+assert_eq!(buf, vec![1; 10]);
Fills self
with elements returned by calling a closure repeatedly.
This method uses a closure to create new values. If you’d rather
+Clone
a given value, use fill
. If you want to use the Default
+trait to generate values, you can pass Default::default
as the
+argument.
let mut buf = vec![1; 10];
+buf.fill_with(Default::default);
+assert_eq!(buf, vec![0; 10]);
Copies the elements from src
into self
.
The length of src
must be the same as self
.
This function will panic if the two slices have different lengths.
+Cloning two elements from a slice into another:
+ +let src = [1, 2, 3, 4];
+let mut dst = [0, 0];
+
+// Because the slices have to be the same length,
+// we slice the source slice from four elements
+// to two. It will panic if we don't do this.
+dst.clone_from_slice(&src[2..]);
+
+assert_eq!(src, [1, 2, 3, 4]);
+assert_eq!(dst, [3, 4]);
Rust enforces that there can only be one mutable reference with no
+immutable references to a particular piece of data in a particular
+scope. Because of this, attempting to use clone_from_slice
on a
+single slice will result in a compile failure:
let mut slice = [1, 2, 3, 4, 5];
+
+slice[..2].clone_from_slice(&slice[3..]); // compile fail!
To work around this, we can use split_at_mut
to create two distinct
+sub-slices from a slice:
let mut slice = [1, 2, 3, 4, 5];
+
+{
+ let (left, right) = slice.split_at_mut(2);
+ left.clone_from_slice(&right[1..]);
+}
+
+assert_eq!(slice, [4, 5, 3, 4, 5]);
Copies all elements from src
into self
, using a memcpy.
The length of src
must be the same as self
.
If T
does not implement Copy
, use clone_from_slice
.
This function will panic if the two slices have different lengths.
+Copying two elements from a slice into another:
+ +let src = [1, 2, 3, 4];
+let mut dst = [0, 0];
+
+// Because the slices have to be the same length,
+// we slice the source slice from four elements
+// to two. It will panic if we don't do this.
+dst.copy_from_slice(&src[2..]);
+
+assert_eq!(src, [1, 2, 3, 4]);
+assert_eq!(dst, [3, 4]);
Rust enforces that there can only be one mutable reference with no
+immutable references to a particular piece of data in a particular
+scope. Because of this, attempting to use copy_from_slice
on a
+single slice will result in a compile failure:
let mut slice = [1, 2, 3, 4, 5];
+
+slice[..2].copy_from_slice(&slice[3..]); // compile fail!
To work around this, we can use split_at_mut
to create two distinct
+sub-slices from a slice:
let mut slice = [1, 2, 3, 4, 5];
+
+{
+ let (left, right) = slice.split_at_mut(2);
+ left.copy_from_slice(&right[1..]);
+}
+
+assert_eq!(slice, [4, 5, 3, 4, 5]);
Copies elements from one part of the slice to another part of itself, +using a memmove.
+src
is the range within self
to copy from. dest
is the starting
+index of the range within self
to copy to, which will have the same
+length as src
. The two ranges may overlap. The ends of the two ranges
+must be less than or equal to self.len()
.
This function will panic if either range exceeds the end of the slice,
+or if the end of src
is before the start.
Copying four bytes within a slice:
+ +let mut bytes = *b"Hello, World!";
+
+bytes.copy_within(1..5, 8);
+
+assert_eq!(&bytes, b"Hello, Wello!");
Swaps all elements in self
with those in other
.
The length of other
must be the same as self
.
This function will panic if the two slices have different lengths.
+Swapping two elements across slices:
+ +let mut slice1 = [0, 0];
+let mut slice2 = [1, 2, 3, 4];
+
+slice1.swap_with_slice(&mut slice2[2..]);
+
+assert_eq!(slice1, [3, 4]);
+assert_eq!(slice2, [1, 2, 0, 0]);
Rust enforces that there can only be one mutable reference to a
+particular piece of data in a particular scope. Because of this,
+attempting to use swap_with_slice
on a single slice will result in
+a compile failure:
let mut slice = [1, 2, 3, 4, 5];
+slice[..2].swap_with_slice(&mut slice[3..]); // compile fail!
To work around this, we can use split_at_mut
to create two distinct
+mutable sub-slices from a slice:
let mut slice = [1, 2, 3, 4, 5];
+
+{
+ let (left, right) = slice.split_at_mut(2);
+ left.swap_with_slice(&mut right[1..]);
+}
+
+assert_eq!(slice, [4, 5, 3, 1, 2]);
Transmute the slice to a slice of another type, ensuring alignment of the types is +maintained.
+This method splits the slice into three distinct slices: prefix, correctly aligned middle +slice of a new type, and the suffix slice. The method may make the middle slice the greatest +length possible for a given type and input slice, but only your algorithm’s performance +should depend on that, not its correctness. It is permissible for all of the input data to +be returned as the prefix or suffix slice.
+This method has no purpose when either input element T
or output element U
are
+zero-sized and will return the original slice without splitting anything.
This method is essentially a transmute
with respect to the elements in the returned
+middle slice, so all the usual caveats pertaining to transmute::<T, U>
also apply here.
Basic usage:
+ +unsafe {
+ let bytes: [u8; 7] = [1, 2, 3, 4, 5, 6, 7];
+ let (prefix, shorts, suffix) = bytes.align_to::<u16>();
+ // less_efficient_algorithm_for_bytes(prefix);
+ // more_efficient_algorithm_for_aligned_shorts(shorts);
+ // less_efficient_algorithm_for_bytes(suffix);
+}
Transmute the mutable slice to a mutable slice of another type, ensuring alignment of the +types is maintained.
+This method splits the slice into three distinct slices: prefix, correctly aligned middle +slice of a new type, and the suffix slice. The method may make the middle slice the greatest +length possible for a given type and input slice, but only your algorithm’s performance +should depend on that, not its correctness. It is permissible for all of the input data to +be returned as the prefix or suffix slice.
+This method has no purpose when either input element T
or output element U
are
+zero-sized and will return the original slice without splitting anything.
This method is essentially a transmute
with respect to the elements in the returned
+middle slice, so all the usual caveats pertaining to transmute::<T, U>
also apply here.
Basic usage:
+ +unsafe {
+ let mut bytes: [u8; 7] = [1, 2, 3, 4, 5, 6, 7];
+ let (prefix, shorts, suffix) = bytes.align_to_mut::<u16>();
+ // less_efficient_algorithm_for_bytes(prefix);
+ // more_efficient_algorithm_for_aligned_shorts(shorts);
+ // less_efficient_algorithm_for_bytes(suffix);
+}
portable_simd
)Split a slice into a prefix, a middle of aligned SIMD types, and a suffix.
+This is a safe wrapper around slice::align_to
, so has the same weak
+postconditions as that method. You’re only assured that
+self.len() == prefix.len() + middle.len() * LANES + suffix.len()
.
Notably, all of the following are possible:
+prefix.len() >= LANES
.middle.is_empty()
despite self.len() >= 3 * LANES
.suffix.len() >= LANES
.That said, this is a safe method, so if you’re only writing safe code, +then this can at most cause incorrect logic, not unsoundness.
+This will panic if the size of the SIMD type is different from
+LANES
times that of the scalar.
At the time of writing, the trait restrictions on Simd<T, LANES>
keeps
+that from ever happening, as only power-of-two numbers of lanes are
+supported. It’s possible that, in the future, those restrictions might
+be lifted in a way that would make it possible to see panics from this
+method for something like LANES == 3
.
#![feature(portable_simd)]
+use core::simd::SimdFloat;
+
+let short = &[1, 2, 3];
+let (prefix, middle, suffix) = short.as_simd::<4>();
+assert_eq!(middle, []); // Not enough elements for anything in the middle
+
+// They might be split in any possible way between prefix and suffix
+let it = prefix.iter().chain(suffix).copied();
+assert_eq!(it.collect::<Vec<_>>(), vec![1, 2, 3]);
+
+fn basic_simd_sum(x: &[f32]) -> f32 {
+ use std::ops::Add;
+ use std::simd::f32x4;
+ let (prefix, middle, suffix) = x.as_simd();
+ let sums = f32x4::from_array([
+ prefix.iter().copied().sum(),
+ 0.0,
+ 0.0,
+ suffix.iter().copied().sum(),
+ ]);
+ let sums = middle.iter().copied().fold(sums, f32x4::add);
+ sums.reduce_sum()
+}
+
+let numbers: Vec<f32> = (1..101).map(|x| x as _).collect();
+assert_eq!(basic_simd_sum(&numbers[1..99]), 4949.0);
portable_simd
)Split a mutable slice into a mutable prefix, a middle of aligned SIMD types, +and a mutable suffix.
+This is a safe wrapper around slice::align_to_mut
, so has the same weak
+postconditions as that method. You’re only assured that
+self.len() == prefix.len() + middle.len() * LANES + suffix.len()
.
Notably, all of the following are possible:
+prefix.len() >= LANES
.middle.is_empty()
despite self.len() >= 3 * LANES
.suffix.len() >= LANES
.That said, this is a safe method, so if you’re only writing safe code, +then this can at most cause incorrect logic, not unsoundness.
+This is the mutable version of slice::as_simd
; see that for examples.
This will panic if the size of the SIMD type is different from
+LANES
times that of the scalar.
At the time of writing, the trait restrictions on Simd<T, LANES>
keeps
+that from ever happening, as only power-of-two numbers of lanes are
+supported. It’s possible that, in the future, those restrictions might
+be lifted in a way that would make it possible to see panics from this
+method for something like LANES == 3
.
is_sorted
)Checks if the elements of this slice are sorted.
+That is, for each element a
and its following element b
, a <= b
must hold. If the
+slice yields exactly zero or one element, true
is returned.
Note that if Self::Item
is only PartialOrd
, but not Ord
, the above definition
+implies that this function returns false
if any two consecutive items are not
+comparable.
#![feature(is_sorted)]
+let empty: [i32; 0] = [];
+
+assert!([1, 2, 2, 9].is_sorted());
+assert!(![1, 3, 2, 4].is_sorted());
+assert!([0].is_sorted());
+assert!(empty.is_sorted());
+assert!(![0.0, 1.0, f32::NAN].is_sorted());
is_sorted
)Checks if the elements of this slice are sorted using the given comparator function.
+Instead of using PartialOrd::partial_cmp
, this function uses the given compare
+function to determine the ordering of two elements. Apart from that, it’s equivalent to
+is_sorted
; see its documentation for more information.
is_sorted
)Checks if the elements of this slice are sorted using the given key extraction function.
+Instead of comparing the slice’s elements directly, this function compares the keys of the
+elements, as determined by f
. Apart from that, it’s equivalent to is_sorted
; see its
+documentation for more information.
#![feature(is_sorted)]
+
+assert!(["c", "bb", "aaa"].is_sorted_by_key(|s| s.len()));
+assert!(![-2i32, -1, 0, 3].is_sorted_by_key(|n| n.abs()));
Returns the index of the partition point according to the given predicate +(the index of the first element of the second partition).
+The slice is assumed to be partitioned according to the given predicate. +This means that all elements for which the predicate returns true are at the start of the slice +and all elements for which the predicate returns false are at the end. +For example, [7, 15, 3, 5, 4, 12, 6] is a partitioned under the predicate x % 2 != 0 +(all odd numbers are at the start, all even at the end).
+If this slice is not partitioned, the returned result is unspecified and meaningless, +as this method performs a kind of binary search.
+See also binary_search
, binary_search_by
, and binary_search_by_key
.
let v = [1, 2, 3, 3, 5, 6, 7];
+let i = v.partition_point(|&x| x < 5);
+
+assert_eq!(i, 4);
+assert!(v[..i].iter().all(|&x| x < 5));
+assert!(v[i..].iter().all(|&x| !(x < 5)));
If all elements of the slice match the predicate, including if the slice +is empty, then the length of the slice will be returned:
+ +let a = [2, 4, 8];
+assert_eq!(a.partition_point(|x| x < &100), a.len());
+let a: [i32; 0] = [];
+assert_eq!(a.partition_point(|x| x < &100), 0);
If you want to insert an item to a sorted vector, while maintaining +sort order:
+ +let mut s = vec![0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55];
+let num = 42;
+let idx = s.partition_point(|&x| x < num);
+s.insert(idx, num);
+assert_eq!(s, [0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 42, 55]);
slice_take
)Removes the subslice corresponding to the given range +and returns a reference to it.
+Returns None
and does not modify the slice if the given
+range is out of bounds.
Note that this method only accepts one-sided ranges such as
+2..
or ..6
, but not 2..6
.
Taking the first three elements of a slice:
+ +#![feature(slice_take)]
+
+let mut slice: &[_] = &['a', 'b', 'c', 'd'];
+let mut first_three = slice.take(..3).unwrap();
+
+assert_eq!(slice, &['d']);
+assert_eq!(first_three, &['a', 'b', 'c']);
Taking the last two elements of a slice:
+ +#![feature(slice_take)]
+
+let mut slice: &[_] = &['a', 'b', 'c', 'd'];
+let mut tail = slice.take(2..).unwrap();
+
+assert_eq!(slice, &['a', 'b']);
+assert_eq!(tail, &['c', 'd']);
Getting None
when range
is out of bounds:
#![feature(slice_take)]
+
+let mut slice: &[_] = &['a', 'b', 'c', 'd'];
+
+assert_eq!(None, slice.take(5..));
+assert_eq!(None, slice.take(..5));
+assert_eq!(None, slice.take(..=4));
+let expected: &[char] = &['a', 'b', 'c', 'd'];
+assert_eq!(Some(expected), slice.take(..4));
slice_take
)Removes the subslice corresponding to the given range +and returns a mutable reference to it.
+Returns None
and does not modify the slice if the given
+range is out of bounds.
Note that this method only accepts one-sided ranges such as
+2..
or ..6
, but not 2..6
.
Taking the first three elements of a slice:
+ +#![feature(slice_take)]
+
+let mut slice: &mut [_] = &mut ['a', 'b', 'c', 'd'];
+let mut first_three = slice.take_mut(..3).unwrap();
+
+assert_eq!(slice, &mut ['d']);
+assert_eq!(first_three, &mut ['a', 'b', 'c']);
Taking the last two elements of a slice:
+ +#![feature(slice_take)]
+
+let mut slice: &mut [_] = &mut ['a', 'b', 'c', 'd'];
+let mut tail = slice.take_mut(2..).unwrap();
+
+assert_eq!(slice, &mut ['a', 'b']);
+assert_eq!(tail, &mut ['c', 'd']);
Getting None
when range
is out of bounds:
#![feature(slice_take)]
+
+let mut slice: &mut [_] = &mut ['a', 'b', 'c', 'd'];
+
+assert_eq!(None, slice.take_mut(5..));
+assert_eq!(None, slice.take_mut(..5));
+assert_eq!(None, slice.take_mut(..=4));
+let expected: &mut [_] = &mut ['a', 'b', 'c', 'd'];
+assert_eq!(Some(expected), slice.take_mut(..4));
slice_take
)Removes the first element of the slice and returns a reference +to it.
+Returns None
if the slice is empty.
#![feature(slice_take)]
+
+let mut slice: &[_] = &['a', 'b', 'c'];
+let first = slice.take_first().unwrap();
+
+assert_eq!(slice, &['b', 'c']);
+assert_eq!(first, &'a');
slice_take
)Removes the first element of the slice and returns a mutable +reference to it.
+Returns None
if the slice is empty.
#![feature(slice_take)]
+
+let mut slice: &mut [_] = &mut ['a', 'b', 'c'];
+let first = slice.take_first_mut().unwrap();
+*first = 'd';
+
+assert_eq!(slice, &['b', 'c']);
+assert_eq!(first, &'d');
slice_take
)Removes the last element of the slice and returns a reference +to it.
+Returns None
if the slice is empty.
#![feature(slice_take)]
+
+let mut slice: &[_] = &['a', 'b', 'c'];
+let last = slice.take_last().unwrap();
+
+assert_eq!(slice, &['a', 'b']);
+assert_eq!(last, &'c');
slice_take
)Removes the last element of the slice and returns a mutable +reference to it.
+Returns None
if the slice is empty.
#![feature(slice_take)]
+
+let mut slice: &mut [_] = &mut ['a', 'b', 'c'];
+let last = slice.take_last_mut().unwrap();
+*last = 'd';
+
+assert_eq!(slice, &['a', 'b']);
+assert_eq!(last, &'d');
slice_flatten
)Takes a &[[T; N]]
, and flattens it to a &[T]
.
This panics if the length of the resulting slice would overflow a usize
.
This is only possible when flattening a slice of arrays of zero-sized
+types, and thus tends to be irrelevant in practice. If
+size_of::<T>() > 0
, this will never panic.
#![feature(slice_flatten)]
+
+assert_eq!([[1, 2, 3], [4, 5, 6]].flatten(), &[1, 2, 3, 4, 5, 6]);
+
+assert_eq!(
+ [[1, 2, 3], [4, 5, 6]].flatten(),
+ [[1, 2], [3, 4], [5, 6]].flatten(),
+);
+
+let slice_of_empty_arrays: &[[i32; 0]] = &[[], [], [], [], []];
+assert!(slice_of_empty_arrays.flatten().is_empty());
+
+let empty_slice_of_arrays: &[[u32; 10]] = &[];
+assert!(empty_slice_of_arrays.flatten().is_empty());
slice_flatten
)Takes a &mut [[T; N]]
, and flattens it to a &mut [T]
.
This panics if the length of the resulting slice would overflow a usize
.
This is only possible when flattening a slice of arrays of zero-sized
+types, and thus tends to be irrelevant in practice. If
+size_of::<T>() > 0
, this will never panic.
#![feature(slice_flatten)]
+
+fn add_5_to_all(slice: &mut [i32]) {
+ for i in slice {
+ *i += 5;
+ }
+}
+
+let mut array = [[1, 2, 3], [4, 5, 6], [7, 8, 9]];
+add_5_to_all(array.flatten_mut());
+assert_eq!(array, [[6, 7, 8], [9, 10, 11], [12, 13, 14]]);
Sorts the slice.
+This sort is stable (i.e., does not reorder equal elements) and O(n * log(n)) worst-case.
+When applicable, unstable sorting is preferred because it is generally faster than stable
+sorting and it doesn’t allocate auxiliary memory.
+See sort_unstable
.
The current algorithm is an adaptive, iterative merge sort inspired by +timsort. +It is designed to be very fast in cases where the slice is nearly sorted, or consists of +two or more sorted sequences concatenated one after another.
+Also, it allocates temporary storage half the size of self
, but for short slices a
+non-allocating insertion sort is used instead.
let mut v = [-5, 4, 1, -3, 2];
+
+v.sort();
+assert!(v == [-5, -3, 1, 2, 4]);
Sorts the slice with a comparator function.
+This sort is stable (i.e., does not reorder equal elements) and O(n * log(n)) worst-case.
+The comparator function must define a total ordering for the elements in the slice. If
+the ordering is not total, the order of the elements is unspecified. An order is a
+total order if it is (for all a
, b
and c
):
a < b
, a == b
or a > b
is true, anda < b
and b < c
implies a < c
. The same must hold for both ==
and >
.For example, while f64
doesn’t implement Ord
because NaN != NaN
, we can use
+partial_cmp
as our sort function when we know the slice doesn’t contain a NaN
.
let mut floats = [5f64, 4.0, 1.0, 3.0, 2.0];
+floats.sort_by(|a, b| a.partial_cmp(b).unwrap());
+assert_eq!(floats, [1.0, 2.0, 3.0, 4.0, 5.0]);
When applicable, unstable sorting is preferred because it is generally faster than stable
+sorting and it doesn’t allocate auxiliary memory.
+See sort_unstable_by
.
The current algorithm is an adaptive, iterative merge sort inspired by +timsort. +It is designed to be very fast in cases where the slice is nearly sorted, or consists of +two or more sorted sequences concatenated one after another.
+Also, it allocates temporary storage half the size of self
, but for short slices a
+non-allocating insertion sort is used instead.
let mut v = [5, 4, 1, 3, 2];
+v.sort_by(|a, b| a.cmp(b));
+assert!(v == [1, 2, 3, 4, 5]);
+
+// reverse sorting
+v.sort_by(|a, b| b.cmp(a));
+assert!(v == [5, 4, 3, 2, 1]);
Sorts the slice with a key extraction function.
+This sort is stable (i.e., does not reorder equal elements) and O(m * n * log(n)) +worst-case, where the key function is O(m).
+For expensive key functions (e.g. functions that are not simple property accesses or
+basic operations), sort_by_cached_key
is likely to be
+significantly faster, as it does not recompute element keys.
When applicable, unstable sorting is preferred because it is generally faster than stable
+sorting and it doesn’t allocate auxiliary memory.
+See sort_unstable_by_key
.
The current algorithm is an adaptive, iterative merge sort inspired by +timsort. +It is designed to be very fast in cases where the slice is nearly sorted, or consists of +two or more sorted sequences concatenated one after another.
+Also, it allocates temporary storage half the size of self
, but for short slices a
+non-allocating insertion sort is used instead.
let mut v = [-5i32, 4, 1, -3, 2];
+
+v.sort_by_key(|k| k.abs());
+assert!(v == [1, 2, -3, 4, -5]);
Sorts the slice with a key extraction function.
+During sorting, the key function is called at most once per element, by using +temporary storage to remember the results of key evaluation. +The order of calls to the key function is unspecified and may change in future versions +of the standard library.
+This sort is stable (i.e., does not reorder equal elements) and O(m * n + n * log(n)) +worst-case, where the key function is O(m).
+For simple key functions (e.g., functions that are property accesses or
+basic operations), sort_by_key
is likely to be
+faster.
The current algorithm is based on pattern-defeating quicksort by Orson Peters, +which combines the fast average case of randomized quicksort with the fast worst case of +heapsort, while achieving linear time on slices with certain patterns. It uses some +randomization to avoid degenerate cases, but with a fixed seed to always provide +deterministic behavior.
+In the worst case, the algorithm allocates temporary storage in a Vec<(K, usize)>
the
+length of the slice.
let mut v = [-5i32, 4, 32, -3, 2];
+
+v.sort_by_cached_key(|k| k.to_string());
+assert!(v == [-3, -5, 2, 32, 4]);
Copies self
into a new Vec
.
let s = [10, 40, 30];
+let x = s.to_vec();
+// Here, `s` and `x` can be modified independently.
allocator_api
)Copies self
into a new Vec
with an allocator.
#![feature(allocator_api)]
+
+use std::alloc::System;
+
+let s = [10, 40, 30];
+let x = s.to_vec_in(System);
+// Here, `s` and `x` can be modified independently.
Flattens a slice of T
into a single value Self::Output
.
assert_eq!(["hello", "world"].concat(), "helloworld");
+assert_eq!([[1, 2], [3, 4]].concat(), [1, 2, 3, 4]);
Flattens a slice of T
into a single value Self::Output
, placing a
+given separator between each.
assert_eq!(["hello", "world"].join(" "), "hello world");
+assert_eq!([[1, 2], [3, 4]].join(&0), [1, 2, 0, 3, 4]);
+assert_eq!([[1, 2], [3, 4]].join(&[0, 0][..]), [1, 2, 0, 0, 3, 4]);
Flattens a slice of T
into a single value Self::Output
, placing a
+given separator between each.
assert_eq!(["hello", "world"].connect(" "), "hello world");
+assert_eq!([[1, 2], [3, 4]].connect(&0), [1, 2, 0, 3, 4]);
Returns a vector containing a copy of this slice where each byte +is mapped to its ASCII upper case equivalent.
+ASCII letters ‘a’ to ‘z’ are mapped to ‘A’ to ‘Z’, +but non-ASCII letters are unchanged.
+To uppercase the value in-place, use make_ascii_uppercase
.
Returns a vector containing a copy of this slice where each byte +is mapped to its ASCII lower case equivalent.
+ASCII letters ‘A’ to ‘Z’ are mapped to ‘a’ to ‘z’, +but non-ASCII letters are unchanged.
+To lowercase the value in-place, use make_ascii_lowercase
.
Buf::remaining()
. Note that this can return shorter slice (this allows
+non-continuous internal representation). Read morestd
only.self
in big-endian byte order. Read moreself
in little-endian byte order. Read moreself
in native-endian byte order. Read moreself
in big-endian byte order. Read moreself
in little-endian byte order. Read moreself
in native-endian byte order. Read moreself
in the big-endian byte order. Read moreself
in the little-endian byte order. Read moreself
in native-endian byte order. Read moreself
in big-endian byte order. Read moreself
in little-endian byte order. Read moreself
in native-endian byte order. Read moreself
in big-endian byte order. Read moreself
in little-endian byte order. Read moreself
in native-endian byte order. Read moreself
in big-endian byte order. Read moreself
in little-endian byte order. Read moreself
in native-endian byte order. Read moreself
in big-endian byte order. Read moreself
in little-endian byte order. Read moreself
in native-endian byte order. Read moreself
in big-endian byte order. Read moreself
in little-endian byte order. Read moreself
in native-endian byte order. Read moreself
in big-endian byte order. Read moreself
in little-endian byte order. Read moreself
in native-endian byte order. Read moreself
in big-endian byte order. Read moreself
in little-endian byte order. Read moreself
in native-endian byte order. Read moreself
in big-endian byte order. Read moreself
in little-endian byte order. Read moreself
in native-endian byte order. Read moreself
in big-endian byte order. Read moreself
in little-endian byte order. Read moreself
in native-endian byte order. Read moreBufMut::remaining_mut()
. Note that this can be shorter than the
+whole remainder of the buffer (this allows non-continuous implementation). Read moreself
for more bytes. Read moreself
in big-endian byte order. Read moreself
in little-endian byte order. Read moreself
in native-endian byte order. Read moreself
in big-endian byte order. Read moreself
in little-endian byte order. Read moreself
in native-endian byte order. Read moreself
in big-endian byte order. Read moreself
in little-endian byte order. Read moreself
in native-endian byte order. Read moreself
in big-endian byte order. Read moreself
in little-endian byte order. Read moreself
in native-endian byte order. Read moreself
in the big-endian byte order. Read moreself
in little-endian byte order. Read moreself
in native-endian byte order. Read moreself
in the big-endian byte order. Read moreself
in little-endian byte order. Read moreself
in native-endian byte order. Read moreself
in the big-endian byte order. Read moreself
in little-endian byte order. Read moreself
in native-endian byte order. Read moreself
in the big-endian byte order. Read moreself
in little-endian byte order. Read moreself
in native-endian byte order. Read moreself
in big-endian byte order. Read moreself
in the little-endian byte order. Read moreself
in the native-endian byte order. Read moreself
in big-endian byte order. Read moreself
in little-endian byte order. Read moreself
in native-endian byte order. Read moreself
in big-endian byte order. Read moreself
in little-endian byte order. Read moreself
in native-endian byte order. Read moreextend_one
)extend_one
)extend_one
)extend_one
)extend_one
)extend_one
)self
and other
) and is used by the <=
+operator. Read moreself
and other
) and is used by the <=
+operator. Read moreself
and other
) and is used by the <=
+operator. Read moreself
and other
) and is used by the <=
+operator. Read moreself
and other
) and is used by the <=
+operator. Read moreself
and other
) and is used by the <=
+operator. Read moreself
and other
) and is used by the <=
+operator. Read moreself
and other
) and is used by the <=
+operator. Read moreself
and other
) and is used by the <=
+operator. Read moreself
and other
) and is used by the <=
+operator. Read moreself
and other
) and is used by the <=
+operator. Read moreself
and other
) and is used by the <=
+operator. Read morepub trait Buf {
+Show 48 methods
fn remaining(&self) -> usize;
+ fn chunk(&self) -> &[u8]ⓘ;
+ fn advance(&mut self, cnt: usize);
+
+ fn chunks_vectored<'a>(&'a self, dst: &mut [IoSlice<'a>]) -> usize { ... }
+ fn has_remaining(&self) -> bool { ... }
+ fn copy_to_slice(&mut self, dst: &mut [u8]) { ... }
+ fn get_u8(&mut self) -> u8 { ... }
+ fn get_i8(&mut self) -> i8 { ... }
+ fn get_u16(&mut self) -> u16 { ... }
+ fn get_u16_le(&mut self) -> u16 { ... }
+ fn get_u16_ne(&mut self) -> u16 { ... }
+ fn get_i16(&mut self) -> i16 { ... }
+ fn get_i16_le(&mut self) -> i16 { ... }
+ fn get_i16_ne(&mut self) -> i16 { ... }
+ fn get_u32(&mut self) -> u32 { ... }
+ fn get_u32_le(&mut self) -> u32 { ... }
+ fn get_u32_ne(&mut self) -> u32 { ... }
+ fn get_i32(&mut self) -> i32 { ... }
+ fn get_i32_le(&mut self) -> i32 { ... }
+ fn get_i32_ne(&mut self) -> i32 { ... }
+ fn get_u64(&mut self) -> u64 { ... }
+ fn get_u64_le(&mut self) -> u64 { ... }
+ fn get_u64_ne(&mut self) -> u64 { ... }
+ fn get_i64(&mut self) -> i64 { ... }
+ fn get_i64_le(&mut self) -> i64 { ... }
+ fn get_i64_ne(&mut self) -> i64 { ... }
+ fn get_u128(&mut self) -> u128 { ... }
+ fn get_u128_le(&mut self) -> u128 { ... }
+ fn get_u128_ne(&mut self) -> u128 { ... }
+ fn get_i128(&mut self) -> i128 { ... }
+ fn get_i128_le(&mut self) -> i128 { ... }
+ fn get_i128_ne(&mut self) -> i128 { ... }
+ fn get_uint(&mut self, nbytes: usize) -> u64 { ... }
+ fn get_uint_le(&mut self, nbytes: usize) -> u64 { ... }
+ fn get_uint_ne(&mut self, nbytes: usize) -> u64 { ... }
+ fn get_int(&mut self, nbytes: usize) -> i64 { ... }
+ fn get_int_le(&mut self, nbytes: usize) -> i64 { ... }
+ fn get_int_ne(&mut self, nbytes: usize) -> i64 { ... }
+ fn get_f32(&mut self) -> f32 { ... }
+ fn get_f32_le(&mut self) -> f32 { ... }
+ fn get_f32_ne(&mut self) -> f32 { ... }
+ fn get_f64(&mut self) -> f64 { ... }
+ fn get_f64_le(&mut self) -> f64 { ... }
+ fn get_f64_ne(&mut self) -> f64 { ... }
+ fn copy_to_bytes(&mut self, len: usize) -> Bytes { ... }
+ fn take(self, limit: usize) -> Take<Self>
where
Self: Sized,
+ { ... }
+ fn chain<U: Buf>(self, next: U) -> Chain<Self, U>
where
Self: Sized,
+ { ... }
+ fn reader(self) -> Reader<Self>ⓘ
where
Self: Sized,
+ { ... }
+}
Read bytes from a buffer.
+A buffer stores bytes in memory such that read operations are infallible.
+The underlying storage may or may not be in contiguous memory. A Buf
value
+is a cursor into the buffer. Reading from Buf
advances the cursor
+position. It can be thought of as an efficient Iterator
for collections of
+bytes.
The simplest Buf
is a &[u8]
.
use bytes::Buf;
+
+let mut buf = &b"hello world"[..];
+
+assert_eq!(b'h', buf.get_u8());
+assert_eq!(b'e', buf.get_u8());
+assert_eq!(b'l', buf.get_u8());
+
+let mut rest = [0; 8];
+buf.copy_to_slice(&mut rest);
+
+assert_eq!(&rest[..], &b"lo world"[..]);
Returns the number of bytes between the current position and the end of +the buffer.
+This value is greater than or equal to the length of the slice returned
+by chunk()
.
use bytes::Buf;
+
+let mut buf = &b"hello world"[..];
+
+assert_eq!(buf.remaining(), 11);
+
+buf.get_u8();
+
+assert_eq!(buf.remaining(), 10);
Implementations of remaining
should ensure that the return value does
+not change unless a call is made to advance
or any other function that
+is documented to change the Buf
’s current position.
Returns a slice starting at the current position and of length between 0
+and Buf::remaining()
. Note that this can return shorter slice (this allows
+non-continuous internal representation).
This is a lower level function. Most operations are done with other +functions.
+use bytes::Buf;
+
+let mut buf = &b"hello world"[..];
+
+assert_eq!(buf.chunk(), &b"hello world"[..]);
+
+buf.advance(6);
+
+assert_eq!(buf.chunk(), &b"world"[..]);
This function should never panic. Once the end of the buffer is reached,
+i.e., Buf::remaining
returns 0, calls to chunk()
should return an
+empty slice.
Advance the internal cursor of the Buf
+The next call to chunk()
will return a slice starting cnt
bytes
+further into the underlying buffer.
use bytes::Buf;
+
+let mut buf = &b"hello world"[..];
+
+assert_eq!(buf.chunk(), &b"hello world"[..]);
+
+buf.advance(6);
+
+assert_eq!(buf.chunk(), &b"world"[..]);
This function may panic if cnt > self.remaining()
.
It is recommended for implementations of advance
to panic if cnt > self.remaining()
. If the implementation does not panic, the call must
+behave as if cnt == self.remaining()
.
A call with cnt == 0
should never panic and be a no-op.
std
only.Fills dst
with potentially multiple slices starting at self
’s
+current position.
If the Buf
is backed by disjoint slices of bytes, chunk_vectored
enables
+fetching more than one slice at once. dst
is a slice of IoSlice
+references, enabling the slice to be directly used with writev
+without any further conversion. The sum of the lengths of all the
+buffers in dst
will be less than or equal to Buf::remaining()
.
The entries in dst
will be overwritten, but the data contained by
+the slices will not be modified. If chunk_vectored
does not fill every
+entry in dst
, then dst
is guaranteed to contain all remaining slices
+in `self.
This is a lower level function. Most operations are done with other +functions.
+This function should never panic. Once the end of the buffer is reached,
+i.e., Buf::remaining
returns 0, calls to chunk_vectored
must return 0
+without mutating dst
.
Implementations should also take care to properly handle being called
+with dst
being a zero length slice.
Returns true if there are any more bytes to consume
+This is equivalent to self.remaining() != 0
.
use bytes::Buf;
+
+let mut buf = &b"a"[..];
+
+assert!(buf.has_remaining());
+
+buf.get_u8();
+
+assert!(!buf.has_remaining());
Copies bytes from self
into dst
.
The cursor is advanced by the number of bytes copied. self
must have
+enough remaining bytes to fill dst
.
use bytes::Buf;
+
+let mut buf = &b"hello world"[..];
+let mut dst = [0; 5];
+
+buf.copy_to_slice(&mut dst);
+assert_eq!(&b"hello"[..], &dst);
+assert_eq!(6, buf.remaining());
This function panics if self.remaining() < dst.len()
.
Gets an unsigned 16 bit integer from self
in native-endian byte order.
The current position is advanced by 2.
+use bytes::Buf;
+
+let mut buf: &[u8] = match cfg!(target_endian = "big") {
+ true => b"\x08\x09 hello",
+ false => b"\x09\x08 hello",
+};
+assert_eq!(0x0809, buf.get_u16_ne());
This function panics if there is not enough remaining data in self
.
Gets a signed 16 bit integer from self
in native-endian byte order.
The current position is advanced by 2.
+use bytes::Buf;
+
+let mut buf: &[u8] = match cfg!(target_endian = "big") {
+ true => b"\x08\x09 hello",
+ false => b"\x09\x08 hello",
+};
+assert_eq!(0x0809, buf.get_i16_ne());
This function panics if there is not enough remaining data in self
.
Gets an unsigned 32 bit integer from self
in the big-endian byte order.
The current position is advanced by 4.
+use bytes::Buf;
+
+let mut buf = &b"\x08\x09\xA0\xA1 hello"[..];
+assert_eq!(0x0809A0A1, buf.get_u32());
This function panics if there is not enough remaining data in self
.
Gets an unsigned 32 bit integer from self
in the little-endian byte order.
The current position is advanced by 4.
+use bytes::Buf;
+
+let mut buf = &b"\xA1\xA0\x09\x08 hello"[..];
+assert_eq!(0x0809A0A1, buf.get_u32_le());
This function panics if there is not enough remaining data in self
.
Gets an unsigned 32 bit integer from self
in native-endian byte order.
The current position is advanced by 4.
+use bytes::Buf;
+
+let mut buf: &[u8] = match cfg!(target_endian = "big") {
+ true => b"\x08\x09\xA0\xA1 hello",
+ false => b"\xA1\xA0\x09\x08 hello",
+};
+assert_eq!(0x0809A0A1, buf.get_u32_ne());
This function panics if there is not enough remaining data in self
.
Gets a signed 32 bit integer from self
in little-endian byte order.
The current position is advanced by 4.
+use bytes::Buf;
+
+let mut buf = &b"\xA1\xA0\x09\x08 hello"[..];
+assert_eq!(0x0809A0A1, buf.get_i32_le());
This function panics if there is not enough remaining data in self
.
Gets a signed 32 bit integer from self
in native-endian byte order.
The current position is advanced by 4.
+use bytes::Buf;
+
+let mut buf: &[u8] = match cfg!(target_endian = "big") {
+ true => b"\x08\x09\xA0\xA1 hello",
+ false => b"\xA1\xA0\x09\x08 hello",
+};
+assert_eq!(0x0809A0A1, buf.get_i32_ne());
This function panics if there is not enough remaining data in self
.
Gets an unsigned 64 bit integer from self
in big-endian byte order.
The current position is advanced by 8.
+use bytes::Buf;
+
+let mut buf = &b"\x01\x02\x03\x04\x05\x06\x07\x08 hello"[..];
+assert_eq!(0x0102030405060708, buf.get_u64());
This function panics if there is not enough remaining data in self
.
Gets an unsigned 64 bit integer from self
in little-endian byte order.
The current position is advanced by 8.
+use bytes::Buf;
+
+let mut buf = &b"\x08\x07\x06\x05\x04\x03\x02\x01 hello"[..];
+assert_eq!(0x0102030405060708, buf.get_u64_le());
This function panics if there is not enough remaining data in self
.
Gets an unsigned 64 bit integer from self
in native-endian byte order.
The current position is advanced by 8.
+use bytes::Buf;
+
+let mut buf: &[u8] = match cfg!(target_endian = "big") {
+ true => b"\x01\x02\x03\x04\x05\x06\x07\x08 hello",
+ false => b"\x08\x07\x06\x05\x04\x03\x02\x01 hello",
+};
+assert_eq!(0x0102030405060708, buf.get_u64_ne());
This function panics if there is not enough remaining data in self
.
Gets a signed 64 bit integer from self
in big-endian byte order.
The current position is advanced by 8.
+use bytes::Buf;
+
+let mut buf = &b"\x01\x02\x03\x04\x05\x06\x07\x08 hello"[..];
+assert_eq!(0x0102030405060708, buf.get_i64());
This function panics if there is not enough remaining data in self
.
Gets a signed 64 bit integer from self
in little-endian byte order.
The current position is advanced by 8.
+use bytes::Buf;
+
+let mut buf = &b"\x08\x07\x06\x05\x04\x03\x02\x01 hello"[..];
+assert_eq!(0x0102030405060708, buf.get_i64_le());
This function panics if there is not enough remaining data in self
.
Gets a signed 64 bit integer from self
in native-endian byte order.
The current position is advanced by 8.
+use bytes::Buf;
+
+let mut buf: &[u8] = match cfg!(target_endian = "big") {
+ true => b"\x01\x02\x03\x04\x05\x06\x07\x08 hello",
+ false => b"\x08\x07\x06\x05\x04\x03\x02\x01 hello",
+};
+assert_eq!(0x0102030405060708, buf.get_i64_ne());
This function panics if there is not enough remaining data in self
.
Gets an unsigned 128 bit integer from self
in big-endian byte order.
The current position is advanced by 16.
+use bytes::Buf;
+
+let mut buf = &b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16 hello"[..];
+assert_eq!(0x01020304050607080910111213141516, buf.get_u128());
This function panics if there is not enough remaining data in self
.
Gets an unsigned 128 bit integer from self
in little-endian byte order.
The current position is advanced by 16.
+use bytes::Buf;
+
+let mut buf = &b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01 hello"[..];
+assert_eq!(0x01020304050607080910111213141516, buf.get_u128_le());
This function panics if there is not enough remaining data in self
.
Gets an unsigned 128 bit integer from self
in native-endian byte order.
The current position is advanced by 16.
+use bytes::Buf;
+
+let mut buf: &[u8] = match cfg!(target_endian = "big") {
+ true => b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16 hello",
+ false => b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01 hello",
+};
+assert_eq!(0x01020304050607080910111213141516, buf.get_u128_ne());
This function panics if there is not enough remaining data in self
.
Gets a signed 128 bit integer from self
in big-endian byte order.
The current position is advanced by 16.
+use bytes::Buf;
+
+let mut buf = &b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16 hello"[..];
+assert_eq!(0x01020304050607080910111213141516, buf.get_i128());
This function panics if there is not enough remaining data in self
.
Gets a signed 128 bit integer from self
in little-endian byte order.
The current position is advanced by 16.
+use bytes::Buf;
+
+let mut buf = &b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01 hello"[..];
+assert_eq!(0x01020304050607080910111213141516, buf.get_i128_le());
This function panics if there is not enough remaining data in self
.
Gets a signed 128 bit integer from self
in native-endian byte order.
The current position is advanced by 16.
+use bytes::Buf;
+
+let mut buf: &[u8] = match cfg!(target_endian = "big") {
+ true => b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16 hello",
+ false => b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01 hello",
+};
+assert_eq!(0x01020304050607080910111213141516, buf.get_i128_ne());
This function panics if there is not enough remaining data in self
.
Gets an unsigned n-byte integer from self
in big-endian byte order.
The current position is advanced by nbytes
.
use bytes::Buf;
+
+let mut buf = &b"\x01\x02\x03 hello"[..];
+assert_eq!(0x010203, buf.get_uint(3));
This function panics if there is not enough remaining data in self
.
Gets an unsigned n-byte integer from self
in little-endian byte order.
The current position is advanced by nbytes
.
use bytes::Buf;
+
+let mut buf = &b"\x03\x02\x01 hello"[..];
+assert_eq!(0x010203, buf.get_uint_le(3));
This function panics if there is not enough remaining data in self
.
Gets an unsigned n-byte integer from self
in native-endian byte order.
The current position is advanced by nbytes
.
use bytes::Buf;
+
+let mut buf: &[u8] = match cfg!(target_endian = "big") {
+ true => b"\x01\x02\x03 hello",
+ false => b"\x03\x02\x01 hello",
+};
+assert_eq!(0x010203, buf.get_uint_ne(3));
This function panics if there is not enough remaining data in self
, or
+if nbytes
is greater than 8.
Gets a signed n-byte integer from self
in big-endian byte order.
The current position is advanced by nbytes
.
use bytes::Buf;
+
+let mut buf = &b"\x01\x02\x03 hello"[..];
+assert_eq!(0x010203, buf.get_int(3));
This function panics if there is not enough remaining data in self
, or
+if nbytes
is greater than 8.
Gets a signed n-byte integer from self
in little-endian byte order.
The current position is advanced by nbytes
.
use bytes::Buf;
+
+let mut buf = &b"\x03\x02\x01 hello"[..];
+assert_eq!(0x010203, buf.get_int_le(3));
This function panics if there is not enough remaining data in self
, or
+if nbytes
is greater than 8.
Gets a signed n-byte integer from self
in native-endian byte order.
The current position is advanced by nbytes
.
use bytes::Buf;
+
+let mut buf: &[u8] = match cfg!(target_endian = "big") {
+ true => b"\x01\x02\x03 hello",
+ false => b"\x03\x02\x01 hello",
+};
+assert_eq!(0x010203, buf.get_int_ne(3));
This function panics if there is not enough remaining data in self
, or
+if nbytes
is greater than 8.
Gets an IEEE754 single-precision (4 bytes) floating point number from
+self
in big-endian byte order.
The current position is advanced by 4.
+use bytes::Buf;
+
+let mut buf = &b"\x3F\x99\x99\x9A hello"[..];
+assert_eq!(1.2f32, buf.get_f32());
This function panics if there is not enough remaining data in self
.
Gets an IEEE754 single-precision (4 bytes) floating point number from
+self
in little-endian byte order.
The current position is advanced by 4.
+use bytes::Buf;
+
+let mut buf = &b"\x9A\x99\x99\x3F hello"[..];
+assert_eq!(1.2f32, buf.get_f32_le());
This function panics if there is not enough remaining data in self
.
Gets an IEEE754 single-precision (4 bytes) floating point number from
+self
in native-endian byte order.
The current position is advanced by 4.
+use bytes::Buf;
+
+let mut buf: &[u8] = match cfg!(target_endian = "big") {
+ true => b"\x3F\x99\x99\x9A hello",
+ false => b"\x9A\x99\x99\x3F hello",
+};
+assert_eq!(1.2f32, buf.get_f32_ne());
This function panics if there is not enough remaining data in self
.
Gets an IEEE754 double-precision (8 bytes) floating point number from
+self
in big-endian byte order.
The current position is advanced by 8.
+use bytes::Buf;
+
+let mut buf = &b"\x3F\xF3\x33\x33\x33\x33\x33\x33 hello"[..];
+assert_eq!(1.2f64, buf.get_f64());
This function panics if there is not enough remaining data in self
.
Gets an IEEE754 double-precision (8 bytes) floating point number from
+self
in little-endian byte order.
The current position is advanced by 8.
+use bytes::Buf;
+
+let mut buf = &b"\x33\x33\x33\x33\x33\x33\xF3\x3F hello"[..];
+assert_eq!(1.2f64, buf.get_f64_le());
This function panics if there is not enough remaining data in self
.
Gets an IEEE754 double-precision (8 bytes) floating point number from
+self
in native-endian byte order.
The current position is advanced by 8.
+use bytes::Buf;
+
+let mut buf: &[u8] = match cfg!(target_endian = "big") {
+ true => b"\x3F\xF3\x33\x33\x33\x33\x33\x33 hello",
+ false => b"\x33\x33\x33\x33\x33\x33\xF3\x3F hello",
+};
+assert_eq!(1.2f64, buf.get_f64_ne());
This function panics if there is not enough remaining data in self
.
Consumes len
bytes inside self and returns new instance of Bytes
+with this data.
This function may be optimized by the underlying type to avoid actual
+copies. For example, Bytes
implementation will do a shallow copy
+(ref-count increment).
use bytes::Buf;
+
+let bytes = (&b"hello world"[..]).copy_to_bytes(5);
+assert_eq!(&bytes[..], &b"hello"[..]);
Creates an adaptor which will read at most limit
bytes from self
.
This function returns a new instance of Buf
which will read at most
+limit
bytes.
use bytes::{Buf, BufMut};
+
+let mut buf = b"hello world"[..].take(5);
+let mut dst = vec![];
+
+dst.put(&mut buf);
+assert_eq!(dst, b"hello");
+
+let mut buf = buf.into_inner();
+dst.clear();
+dst.put(&mut buf);
+assert_eq!(dst, b" world");
Creates an adaptor which will chain this buffer with another.
+The returned Buf
instance will first consume all bytes from self
.
+Afterwards the output is equivalent to the output of next.
use bytes::Buf;
+
+let mut chain = b"hello "[..].chain(&b"world"[..]);
+
+let full = chain.copy_to_bytes(11);
+assert_eq!(full.chunk(), b"hello world");
std
only.Creates an adaptor which implements the Read
trait for self
.
This function returns a new value which implements Read
by adapting
+the Read
trait functions to the Buf
trait functions. Given that
+Buf
operations are infallible, none of the Read
functions will
+return with Err
.
use bytes::{Bytes, Buf};
+use std::io::Read;
+
+let buf = Bytes::from("hello world");
+
+let mut reader = buf.reader();
+let mut dst = [0; 1024];
+
+let num = reader.read(&mut dst).unwrap();
+
+assert_eq!(11, num);
+assert_eq!(&dst[..11], &b"hello world"[..]);
std
only.std
only.pub unsafe trait BufMut {
+Show 48 methods
fn remaining_mut(&self) -> usize;
+ unsafe fn advance_mut(&mut self, cnt: usize);
+ fn chunk_mut(&mut self) -> &mut UninitSlice;
+
+ fn has_remaining_mut(&self) -> bool { ... }
+ fn put<T: Buf>(&mut self, src: T)
where
Self: Sized,
+ { ... }
+ fn put_slice(&mut self, src: &[u8]) { ... }
+ fn put_bytes(&mut self, val: u8, cnt: usize) { ... }
+ fn put_u8(&mut self, n: u8) { ... }
+ fn put_i8(&mut self, n: i8) { ... }
+ fn put_u16(&mut self, n: u16) { ... }
+ fn put_u16_le(&mut self, n: u16) { ... }
+ fn put_u16_ne(&mut self, n: u16) { ... }
+ fn put_i16(&mut self, n: i16) { ... }
+ fn put_i16_le(&mut self, n: i16) { ... }
+ fn put_i16_ne(&mut self, n: i16) { ... }
+ fn put_u32(&mut self, n: u32) { ... }
+ fn put_u32_le(&mut self, n: u32) { ... }
+ fn put_u32_ne(&mut self, n: u32) { ... }
+ fn put_i32(&mut self, n: i32) { ... }
+ fn put_i32_le(&mut self, n: i32) { ... }
+ fn put_i32_ne(&mut self, n: i32) { ... }
+ fn put_u64(&mut self, n: u64) { ... }
+ fn put_u64_le(&mut self, n: u64) { ... }
+ fn put_u64_ne(&mut self, n: u64) { ... }
+ fn put_i64(&mut self, n: i64) { ... }
+ fn put_i64_le(&mut self, n: i64) { ... }
+ fn put_i64_ne(&mut self, n: i64) { ... }
+ fn put_u128(&mut self, n: u128) { ... }
+ fn put_u128_le(&mut self, n: u128) { ... }
+ fn put_u128_ne(&mut self, n: u128) { ... }
+ fn put_i128(&mut self, n: i128) { ... }
+ fn put_i128_le(&mut self, n: i128) { ... }
+ fn put_i128_ne(&mut self, n: i128) { ... }
+ fn put_uint(&mut self, n: u64, nbytes: usize) { ... }
+ fn put_uint_le(&mut self, n: u64, nbytes: usize) { ... }
+ fn put_uint_ne(&mut self, n: u64, nbytes: usize) { ... }
+ fn put_int(&mut self, n: i64, nbytes: usize) { ... }
+ fn put_int_le(&mut self, n: i64, nbytes: usize) { ... }
+ fn put_int_ne(&mut self, n: i64, nbytes: usize) { ... }
+ fn put_f32(&mut self, n: f32) { ... }
+ fn put_f32_le(&mut self, n: f32) { ... }
+ fn put_f32_ne(&mut self, n: f32) { ... }
+ fn put_f64(&mut self, n: f64) { ... }
+ fn put_f64_le(&mut self, n: f64) { ... }
+ fn put_f64_ne(&mut self, n: f64) { ... }
+ fn limit(self, limit: usize) -> Limit<Self>
where
Self: Sized,
+ { ... }
+ fn writer(self) -> Writer<Self>ⓘ
where
Self: Sized,
+ { ... }
+ fn chain_mut<U: BufMut>(self, next: U) -> Chain<Self, U>
where
Self: Sized,
+ { ... }
+}
A trait for values that provide sequential write access to bytes.
+Write bytes to a buffer
+A buffer stores bytes in memory such that write operations are infallible.
+The underlying storage may or may not be in contiguous memory. A BufMut
+value is a cursor into the buffer. Writing to BufMut
advances the cursor
+position.
The simplest BufMut
is a Vec<u8>
.
use bytes::BufMut;
+
+let mut buf = vec![];
+
+buf.put(&b"hello world"[..]);
+
+assert_eq!(buf, b"hello world");
Returns the number of bytes that can be written from the current +position until the end of the buffer is reached.
+This value is greater than or equal to the length of the slice returned
+by chunk_mut()
.
Writing to a BufMut
may involve allocating more memory on the fly.
+Implementations may fail before reaching the number of bytes indicated
+by this method if they encounter an allocation failure.
use bytes::BufMut;
+
+let mut dst = [0; 10];
+let mut buf = &mut dst[..];
+
+let original_remaining = buf.remaining_mut();
+buf.put(&b"hello"[..]);
+
+assert_eq!(original_remaining - 5, buf.remaining_mut());
Implementations of remaining_mut
should ensure that the return value
+does not change unless a call is made to advance_mut
or any other
+function that is documented to change the BufMut
’s current position.
remaining_mut
may return value smaller than actual available space.
Advance the internal cursor of the BufMut
+The next call to chunk_mut
will return a slice starting cnt
bytes
+further into the underlying buffer.
The caller must ensure that the next cnt
bytes of chunk
are
+initialized.
use bytes::BufMut;
+
+let mut buf = Vec::with_capacity(16);
+
+// Write some data
+buf.chunk_mut()[0..2].copy_from_slice(b"he");
+unsafe { buf.advance_mut(2) };
+
+// write more bytes
+buf.chunk_mut()[0..3].copy_from_slice(b"llo");
+
+unsafe { buf.advance_mut(3); }
+
+assert_eq!(5, buf.len());
+assert_eq!(buf, b"hello");
This function may panic if cnt > self.remaining_mut()
.
It is recommended for implementations of advance_mut
to panic if
+cnt > self.remaining_mut()
. If the implementation does not panic,
+the call must behave as if cnt == self.remaining_mut()
.
A call with cnt == 0
should never panic and be a no-op.
Returns a mutable slice starting at the current BufMut position and of
+length between 0 and BufMut::remaining_mut()
. Note that this can be shorter than the
+whole remainder of the buffer (this allows non-continuous implementation).
This is a lower level function. Most operations are done with other +functions.
+The returned byte slice may represent uninitialized memory.
+use bytes::BufMut;
+
+let mut buf = Vec::with_capacity(16);
+
+unsafe {
+ // MaybeUninit::as_mut_ptr
+ buf.chunk_mut()[0..].as_mut_ptr().write(b'h');
+ buf.chunk_mut()[1..].as_mut_ptr().write(b'e');
+
+ buf.advance_mut(2);
+
+ buf.chunk_mut()[0..].as_mut_ptr().write(b'l');
+ buf.chunk_mut()[1..].as_mut_ptr().write(b'l');
+ buf.chunk_mut()[2..].as_mut_ptr().write(b'o');
+
+ buf.advance_mut(3);
+}
+
+assert_eq!(5, buf.len());
+assert_eq!(buf, b"hello");
This function should never panic. chunk_mut
should return an empty
+slice if and only if remaining_mut()
returns 0. In other words,
+chunk_mut()
returning an empty slice implies that remaining_mut()
will
+return 0 and remaining_mut()
returning 0 implies that chunk_mut()
will
+return an empty slice.
This function may trigger an out-of-memory abort if it tries to allocate +memory and fails to do so.
+Returns true if there is space in self
for more bytes.
This is equivalent to self.remaining_mut() != 0
.
use bytes::BufMut;
+
+let mut dst = [0; 5];
+let mut buf = &mut dst[..];
+
+assert!(buf.has_remaining_mut());
+
+buf.put(&b"hello"[..]);
+
+assert!(!buf.has_remaining_mut());
Transfer bytes into self
from src
and advance the cursor by the
+number of bytes written.
use bytes::BufMut;
+
+let mut buf = vec![];
+
+buf.put_u8(b'h');
+buf.put(&b"ello"[..]);
+buf.put(&b" world"[..]);
+
+assert_eq!(buf, b"hello world");
Panics if self
does not have enough capacity to contain src
.
Transfer bytes into self
from src
and advance the cursor by the
+number of bytes written.
self
must have enough remaining capacity to contain all of src
.
use bytes::BufMut;
+
+let mut dst = [0; 6];
+
+{
+ let mut buf = &mut dst[..];
+ buf.put_slice(b"hello");
+
+ assert_eq!(1, buf.remaining_mut());
+}
+
+assert_eq!(b"hello\0", &dst);
Put cnt
bytes val
into self
.
Logically equivalent to calling self.put_u8(val)
cnt
times, but may work faster.
self
must have at least cnt
remaining capacity.
use bytes::BufMut;
+
+let mut dst = [0; 6];
+
+{
+ let mut buf = &mut dst[..];
+ buf.put_bytes(b'a', 4);
+
+ assert_eq!(2, buf.remaining_mut());
+}
+
+assert_eq!(b"aaaa\0\0", &dst);
This function panics if there is not enough remaining capacity in
+self
.
Writes an unsigned 16 bit integer to self
in little-endian byte order.
The current position is advanced by 2.
+use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_u16_le(0x0809);
+assert_eq!(buf, b"\x09\x08");
This function panics if there is not enough remaining capacity in
+self
.
Writes an unsigned 16 bit integer to self
in native-endian byte order.
The current position is advanced by 2.
+use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_u16_ne(0x0809);
+if cfg!(target_endian = "big") {
+ assert_eq!(buf, b"\x08\x09");
+} else {
+ assert_eq!(buf, b"\x09\x08");
+}
This function panics if there is not enough remaining capacity in
+self
.
Writes a signed 16 bit integer to self
in little-endian byte order.
The current position is advanced by 2.
+use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_i16_le(0x0809);
+assert_eq!(buf, b"\x09\x08");
This function panics if there is not enough remaining capacity in
+self
.
Writes a signed 16 bit integer to self
in native-endian byte order.
The current position is advanced by 2.
+use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_i16_ne(0x0809);
+if cfg!(target_endian = "big") {
+ assert_eq!(buf, b"\x08\x09");
+} else {
+ assert_eq!(buf, b"\x09\x08");
+}
This function panics if there is not enough remaining capacity in
+self
.
Writes an unsigned 32 bit integer to self
in big-endian byte order.
The current position is advanced by 4.
+use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_u32(0x0809A0A1);
+assert_eq!(buf, b"\x08\x09\xA0\xA1");
This function panics if there is not enough remaining capacity in
+self
.
Writes an unsigned 32 bit integer to self
in little-endian byte order.
The current position is advanced by 4.
+use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_u32_le(0x0809A0A1);
+assert_eq!(buf, b"\xA1\xA0\x09\x08");
This function panics if there is not enough remaining capacity in
+self
.
Writes an unsigned 32 bit integer to self
in native-endian byte order.
The current position is advanced by 4.
+use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_u32_ne(0x0809A0A1);
+if cfg!(target_endian = "big") {
+ assert_eq!(buf, b"\x08\x09\xA0\xA1");
+} else {
+ assert_eq!(buf, b"\xA1\xA0\x09\x08");
+}
This function panics if there is not enough remaining capacity in
+self
.
Writes a signed 32 bit integer to self
in big-endian byte order.
The current position is advanced by 4.
+use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_i32(0x0809A0A1);
+assert_eq!(buf, b"\x08\x09\xA0\xA1");
This function panics if there is not enough remaining capacity in
+self
.
Writes a signed 32 bit integer to self
in little-endian byte order.
The current position is advanced by 4.
+use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_i32_le(0x0809A0A1);
+assert_eq!(buf, b"\xA1\xA0\x09\x08");
This function panics if there is not enough remaining capacity in
+self
.
Writes a signed 32 bit integer to self
in native-endian byte order.
The current position is advanced by 4.
+use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_i32_ne(0x0809A0A1);
+if cfg!(target_endian = "big") {
+ assert_eq!(buf, b"\x08\x09\xA0\xA1");
+} else {
+ assert_eq!(buf, b"\xA1\xA0\x09\x08");
+}
This function panics if there is not enough remaining capacity in
+self
.
Writes an unsigned 64 bit integer to self
in the big-endian byte order.
The current position is advanced by 8.
+use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_u64(0x0102030405060708);
+assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08");
This function panics if there is not enough remaining capacity in
+self
.
Writes an unsigned 64 bit integer to self
in little-endian byte order.
The current position is advanced by 8.
+use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_u64_le(0x0102030405060708);
+assert_eq!(buf, b"\x08\x07\x06\x05\x04\x03\x02\x01");
This function panics if there is not enough remaining capacity in
+self
.
Writes an unsigned 64 bit integer to self
in native-endian byte order.
The current position is advanced by 8.
+use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_u64_ne(0x0102030405060708);
+if cfg!(target_endian = "big") {
+ assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08");
+} else {
+ assert_eq!(buf, b"\x08\x07\x06\x05\x04\x03\x02\x01");
+}
This function panics if there is not enough remaining capacity in
+self
.
Writes a signed 64 bit integer to self
in the big-endian byte order.
The current position is advanced by 8.
+use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_i64(0x0102030405060708);
+assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08");
This function panics if there is not enough remaining capacity in
+self
.
Writes a signed 64 bit integer to self
in little-endian byte order.
The current position is advanced by 8.
+use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_i64_le(0x0102030405060708);
+assert_eq!(buf, b"\x08\x07\x06\x05\x04\x03\x02\x01");
This function panics if there is not enough remaining capacity in
+self
.
Writes a signed 64 bit integer to self
in native-endian byte order.
The current position is advanced by 8.
+use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_i64_ne(0x0102030405060708);
+if cfg!(target_endian = "big") {
+ assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08");
+} else {
+ assert_eq!(buf, b"\x08\x07\x06\x05\x04\x03\x02\x01");
+}
This function panics if there is not enough remaining capacity in
+self
.
Writes an unsigned 128 bit integer to self
in the big-endian byte order.
The current position is advanced by 16.
+use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_u128(0x01020304050607080910111213141516);
+assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16");
This function panics if there is not enough remaining capacity in
+self
.
Writes an unsigned 128 bit integer to self
in little-endian byte order.
The current position is advanced by 16.
+use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_u128_le(0x01020304050607080910111213141516);
+assert_eq!(buf, b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01");
This function panics if there is not enough remaining capacity in
+self
.
Writes an unsigned 128 bit integer to self
in native-endian byte order.
The current position is advanced by 16.
+use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_u128_ne(0x01020304050607080910111213141516);
+if cfg!(target_endian = "big") {
+ assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16");
+} else {
+ assert_eq!(buf, b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01");
+}
This function panics if there is not enough remaining capacity in
+self
.
Writes a signed 128 bit integer to self
in the big-endian byte order.
The current position is advanced by 16.
+use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_i128(0x01020304050607080910111213141516);
+assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16");
This function panics if there is not enough remaining capacity in
+self
.
Writes a signed 128 bit integer to self
in little-endian byte order.
The current position is advanced by 16.
+use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_i128_le(0x01020304050607080910111213141516);
+assert_eq!(buf, b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01");
This function panics if there is not enough remaining capacity in
+self
.
Writes a signed 128 bit integer to self
in native-endian byte order.
The current position is advanced by 16.
+use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_i128_ne(0x01020304050607080910111213141516);
+if cfg!(target_endian = "big") {
+ assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16");
+} else {
+ assert_eq!(buf, b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01");
+}
This function panics if there is not enough remaining capacity in
+self
.
Writes an unsigned n-byte integer to self
in big-endian byte order.
The current position is advanced by nbytes
.
use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_uint(0x010203, 3);
+assert_eq!(buf, b"\x01\x02\x03");
This function panics if there is not enough remaining capacity in
+self
or if nbytes
is greater than 8.
Writes an unsigned n-byte integer to self
in the little-endian byte order.
The current position is advanced by nbytes
.
use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_uint_le(0x010203, 3);
+assert_eq!(buf, b"\x03\x02\x01");
This function panics if there is not enough remaining capacity in
+self
or if nbytes
is greater than 8.
Writes an unsigned n-byte integer to self
in the native-endian byte order.
The current position is advanced by nbytes
.
use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_uint_ne(0x010203, 3);
+if cfg!(target_endian = "big") {
+ assert_eq!(buf, b"\x01\x02\x03");
+} else {
+ assert_eq!(buf, b"\x03\x02\x01");
+}
This function panics if there is not enough remaining capacity in
+self
or if nbytes
is greater than 8.
Writes low nbytes
of a signed integer to self
in big-endian byte order.
The current position is advanced by nbytes
.
use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_int(0x0504010203, 3);
+assert_eq!(buf, b"\x01\x02\x03");
This function panics if there is not enough remaining capacity in
+self
or if nbytes
is greater than 8.
Writes low nbytes
of a signed integer to self
in little-endian byte order.
The current position is advanced by nbytes
.
use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_int_le(0x0504010203, 3);
+assert_eq!(buf, b"\x03\x02\x01");
This function panics if there is not enough remaining capacity in
+self
or if nbytes
is greater than 8.
Writes low nbytes
of a signed integer to self
in native-endian byte order.
The current position is advanced by nbytes
.
use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_int_ne(0x010203, 3);
+if cfg!(target_endian = "big") {
+ assert_eq!(buf, b"\x01\x02\x03");
+} else {
+ assert_eq!(buf, b"\x03\x02\x01");
+}
This function panics if there is not enough remaining capacity in
+self
or if nbytes
is greater than 8.
Writes an IEEE754 single-precision (4 bytes) floating point number to
+self
in big-endian byte order.
The current position is advanced by 4.
+use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_f32(1.2f32);
+assert_eq!(buf, b"\x3F\x99\x99\x9A");
This function panics if there is not enough remaining capacity in
+self
.
Writes an IEEE754 single-precision (4 bytes) floating point number to
+self
in little-endian byte order.
The current position is advanced by 4.
+use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_f32_le(1.2f32);
+assert_eq!(buf, b"\x9A\x99\x99\x3F");
This function panics if there is not enough remaining capacity in
+self
.
Writes an IEEE754 single-precision (4 bytes) floating point number to
+self
in native-endian byte order.
The current position is advanced by 4.
+use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_f32_ne(1.2f32);
+if cfg!(target_endian = "big") {
+ assert_eq!(buf, b"\x3F\x99\x99\x9A");
+} else {
+ assert_eq!(buf, b"\x9A\x99\x99\x3F");
+}
This function panics if there is not enough remaining capacity in
+self
.
Writes an IEEE754 double-precision (8 bytes) floating point number to
+self
in big-endian byte order.
The current position is advanced by 8.
+use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_f64(1.2f64);
+assert_eq!(buf, b"\x3F\xF3\x33\x33\x33\x33\x33\x33");
This function panics if there is not enough remaining capacity in
+self
.
Writes an IEEE754 double-precision (8 bytes) floating point number to
+self
in little-endian byte order.
The current position is advanced by 8.
+use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_f64_le(1.2f64);
+assert_eq!(buf, b"\x33\x33\x33\x33\x33\x33\xF3\x3F");
This function panics if there is not enough remaining capacity in
+self
.
Writes an IEEE754 double-precision (8 bytes) floating point number to
+self
in native-endian byte order.
The current position is advanced by 8.
+use bytes::BufMut;
+
+let mut buf = vec![];
+buf.put_f64_ne(1.2f64);
+if cfg!(target_endian = "big") {
+ assert_eq!(buf, b"\x3F\xF3\x33\x33\x33\x33\x33\x33");
+} else {
+ assert_eq!(buf, b"\x33\x33\x33\x33\x33\x33\xF3\x3F");
+}
This function panics if there is not enough remaining capacity in
+self
.
Creates an adaptor which can write at most limit
bytes to self
.
use bytes::BufMut;
+
+let arr = &mut [0u8; 128][..];
+assert_eq!(arr.remaining_mut(), 128);
+
+let dst = arr.limit(10);
+assert_eq!(dst.remaining_mut(), 10);
std
only.Creates an adaptor which implements the Write
trait for self
.
This function returns a new value which implements Write
by adapting
+the Write
trait functions to the BufMut
trait functions. Given that
+BufMut
operations are infallible, none of the Write
functions will
+return with Err
.
use bytes::BufMut;
+use std::io::Write;
+
+let mut buf = vec![].writer();
+
+let num = buf.write(&b"hello world"[..]).unwrap();
+assert_eq!(11, num);
+
+let buf = buf.into_inner();
+
+assert_eq!(*buf, b"hello world"[..]);
Creates an adapter which will chain this buffer with another.
+The returned BufMut
instance will first write to all bytes from
+self
. Afterwards, it will write to next
.
use bytes::BufMut;
+
+let mut a = [0u8; 5];
+let mut b = [0u8; 6];
+
+let mut chain = (&mut a[..]).chain_mut(&mut b[..]);
+
+chain.put_slice(b"hello world");
+
+assert_eq!(&a[..], b"hello");
+assert_eq!(&b[..], b" world");
BytesMut
can hold without …","Returns a slice starting at the current position and of …","","","Returns a mutable slice starting at the current BufMut …","","Clears the buffer, removing all data.","Clears the buffer, removing all data. Existing capacity is …","","","","","","","Creates Bytes
instance from slice, by copying it.","","","","","","","","","","","","","","","","","","","","","","","","","","","","","Appends given bytes to this BytesMut
.","","","","","","","Converts self
into an immutable Bytes
.","Returns the argument unchanged.","","","","","","","","","Returns the argument unchanged.","","","","Creates a new Bytes
from a static slice.","","","Calls U::from(self)
.","Calls U::from(self)
.","","","","","Returns true if the Bytes
has a length of 0.","Returns true if the BytesMut
has a length of 0.","Returns true if this is the only reference to the data.","Returns the number of bytes contained in this Bytes
.","Returns the number of bytes contained in this BytesMut
.","Creates a new empty Bytes
.","Creates a new BytesMut
with default capacity.","","","","","","","","","","","","","","","","Returns the number of bytes between the current position …","","","Returns the number of bytes that can be written from the …","","Reserves capacity for at least additional
more bytes to be …","Resizes the buffer so that len
is equal to new_len
.","","","Sets the length of the buffer.","Returns a slice of self for the provided range.","Returns a slice of self that is equivalent to the given …","Returns the remaining spare capacity of the buffer as a …","Removes the bytes from the current view, returning them in …","Splits the bytes into two at the given index.","Splits the bytes into two at the given index.","Splits the bytes into two at the given index.","Splits the buffer into two at the given index.","","","Shortens the buffer, keeping the first len
bytes and …","Shortens the buffer, keeping the first len
bytes and …","","","","","","","Absorbs a BytesMut
that was previously split off.","Creates a new BytesMut
with the specified capacity.","","","Creates a new BytesMut
, which is initialized with zero.","Read bytes from a buffer.","A trait for values that provide sequential write access to …","A Chain
sequences two buffers.","Iterator over the bytes contained by the buffer.","A BufMut
adapter which limits the amount of bytes that can …","A Buf
adapter which implements io::Read
for the inner …","A Buf
adapter which limits the bytes read from an …","Uninitialized byte slice.","A BufMut
adapter which implements io::Write
for the inner …","Advance the internal cursor of the Buf","","","Advance the internal cursor of the BufMut","","","Return a raw pointer to the slice’s buffer.","Return a &mut [MaybeUninit<u8>]
to this slice’s buffer.","","","","","","","","","","","","","","","Creates an adaptor which will chain this buffer with …","Creates an adaptor which will chain this buffer with …","Creates an adaptor which will chain this buffer with …","Creates an adapter which will chain this buffer with …","Creates an adapter which will chain this buffer with …","Creates an adapter which will chain this buffer with …","Returns a slice starting at the current position and of …","","","Returns a mutable slice starting at the current BufMut …","","","Fills dst
with potentially multiple slices starting at self
…","Fills dst
with potentially multiple slices starting at self
…","Fills dst
with potentially multiple slices starting at self
…","","","Copies bytes from src
into self
.","Consumes len
bytes inside self and returns new instance of …","Consumes len
bytes inside self and returns new instance of …","Consumes len
bytes inside self and returns new instance of …","","","Copies bytes from self
into dst
.","Copies bytes from self
into dst
.","Copies bytes from self
into dst
.","","Gets a mutable reference to the first underlying Buf
.","Gets a reference to the first underlying Buf
.","","","","","","","","","","","Returns the argument unchanged.","Returns the argument unchanged.","Returns the argument unchanged.","Returns the argument unchanged.","Returns the argument unchanged.","Returns the argument unchanged.","Create a &mut UninitSlice
from a pointer and a length.","Gets an IEEE754 single-precision (4 bytes) floating point …","Gets an IEEE754 single-precision (4 bytes) floating point …","Gets an IEEE754 single-precision (4 bytes) floating point …","Gets an IEEE754 single-precision (4 bytes) floating point …","Gets an IEEE754 single-precision (4 bytes) floating point …","Gets an IEEE754 single-precision (4 bytes) floating point …","Gets an IEEE754 single-precision (4 bytes) floating point …","Gets an IEEE754 single-precision (4 bytes) floating point …","Gets an IEEE754 single-precision (4 bytes) floating point …","Gets an IEEE754 double-precision (8 bytes) floating point …","Gets an IEEE754 double-precision (8 bytes) floating point …","Gets an IEEE754 double-precision (8 bytes) floating point …","Gets an IEEE754 double-precision (8 bytes) floating point …","Gets an IEEE754 double-precision (8 bytes) floating point …","Gets an IEEE754 double-precision (8 bytes) floating point …","Gets an IEEE754 double-precision (8 bytes) floating point …","Gets an IEEE754 double-precision (8 bytes) floating point …","Gets an IEEE754 double-precision (8 bytes) floating point …","Gets a signed 128 bit integer from self
in big-endian byte …","Gets a signed 128 bit integer from self
in big-endian byte …","Gets a signed 128 bit integer from self
in big-endian byte …","Gets a signed 128 bit integer from self
in little-endian …","Gets a signed 128 bit integer from self
in little-endian …","Gets a signed 128 bit integer from self
in little-endian …","Gets a signed 128 bit integer from self
in native-endian …","Gets a signed 128 bit integer from self
in native-endian …","Gets a signed 128 bit integer from self
in native-endian …","Gets a signed 16 bit integer from self
in big-endian byte …","Gets a signed 16 bit integer from self
in big-endian byte …","Gets a signed 16 bit integer from self
in big-endian byte …","Gets a signed 16 bit integer from self
in little-endian …","Gets a signed 16 bit integer from self
in little-endian …","Gets a signed 16 bit integer from self
in little-endian …","Gets a signed 16 bit integer from self
in native-endian …","Gets a signed 16 bit integer from self
in native-endian …","Gets a signed 16 bit integer from self
in native-endian …","Gets a signed 32 bit integer from self
in big-endian byte …","Gets a signed 32 bit integer from self
in big-endian byte …","Gets a signed 32 bit integer from self
in big-endian byte …","Gets a signed 32 bit integer from self
in little-endian …","Gets a signed 32 bit integer from self
in little-endian …","Gets a signed 32 bit integer from self
in little-endian …","Gets a signed 32 bit integer from self
in native-endian …","Gets a signed 32 bit integer from self
in native-endian …","Gets a signed 32 bit integer from self
in native-endian …","Gets a signed 64 bit integer from self
in big-endian byte …","Gets a signed 64 bit integer from self
in big-endian byte …","Gets a signed 64 bit integer from self
in big-endian byte …","Gets a signed 64 bit integer from self
in little-endian …","Gets a signed 64 bit integer from self
in little-endian …","Gets a signed 64 bit integer from self
in little-endian …","Gets a signed 64 bit integer from self
in native-endian …","Gets a signed 64 bit integer from self
in native-endian …","Gets a signed 64 bit integer from self
in native-endian …","Gets a signed 8 bit integer from self
.","Gets a signed 8 bit integer from self
.","Gets a signed 8 bit integer from self
.","Gets a signed n-byte integer from self
in big-endian byte …","Gets a signed n-byte integer from self
in big-endian byte …","Gets a signed n-byte integer from self
in big-endian byte …","Gets a signed n-byte integer from self
in little-endian …","Gets a signed n-byte integer from self
in little-endian …","Gets a signed n-byte integer from self
in little-endian …","Gets a signed n-byte integer from self
in native-endian …","Gets a signed n-byte integer from self
in native-endian …","Gets a signed n-byte integer from self
in native-endian …","Gets a mutable reference to the underlying Buf
.","Gets a mutable reference to the underlying BufMut
.","Gets a mutable reference to the underlying Buf
.","Gets a mutable reference to the underlying Buf
.","Gets a mutable reference to the underlying BufMut
.","Gets a reference to the underlying Buf
.","Gets a reference to the underlying BufMut
.","Gets a reference to the underlying Buf
.","Gets a reference to the underlying Buf
.","Gets a reference to the underlying BufMut
.","Gets an unsigned 128 bit integer from self
in big-endian …","Gets an unsigned 128 bit integer from self
in big-endian …","Gets an unsigned 128 bit integer from self
in big-endian …","Gets an unsigned 128 bit integer from self
in …","Gets an unsigned 128 bit integer from self
in …","Gets an unsigned 128 bit integer from self
in …","Gets an unsigned 128 bit integer from self
in …","Gets an unsigned 128 bit integer from self
in …","Gets an unsigned 128 bit integer from self
in …","Gets an unsigned 16 bit integer from self
in big-endian …","Gets an unsigned 16 bit integer from self
in big-endian …","Gets an unsigned 16 bit integer from self
in big-endian …","Gets an unsigned 16 bit integer from self
in little-endian …","Gets an unsigned 16 bit integer from self
in little-endian …","Gets an unsigned 16 bit integer from self
in little-endian …","Gets an unsigned 16 bit integer from self
in native-endian …","Gets an unsigned 16 bit integer from self
in native-endian …","Gets an unsigned 16 bit integer from self
in native-endian …","Gets an unsigned 32 bit integer from self
in the …","Gets an unsigned 32 bit integer from self
in the …","Gets an unsigned 32 bit integer from self
in the …","Gets an unsigned 32 bit integer from self
in the …","Gets an unsigned 32 bit integer from self
in the …","Gets an unsigned 32 bit integer from self
in the …","Gets an unsigned 32 bit integer from self
in native-endian …","Gets an unsigned 32 bit integer from self
in native-endian …","Gets an unsigned 32 bit integer from self
in native-endian …","Gets an unsigned 64 bit integer from self
in big-endian …","Gets an unsigned 64 bit integer from self
in big-endian …","Gets an unsigned 64 bit integer from self
in big-endian …","Gets an unsigned 64 bit integer from self
in little-endian …","Gets an unsigned 64 bit integer from self
in little-endian …","Gets an unsigned 64 bit integer from self
in little-endian …","Gets an unsigned 64 bit integer from self
in native-endian …","Gets an unsigned 64 bit integer from self
in native-endian …","Gets an unsigned 64 bit integer from self
in native-endian …","Gets an unsigned 8 bit integer from self
.","Gets an unsigned 8 bit integer from self
.","Gets an unsigned 8 bit integer from self
.","Gets an unsigned n-byte integer from self
in big-endian …","Gets an unsigned n-byte integer from self
in big-endian …","Gets an unsigned n-byte integer from self
in big-endian …","Gets an unsigned n-byte integer from self
in little-endian …","Gets an unsigned n-byte integer from self
in little-endian …","Gets an unsigned n-byte integer from self
in little-endian …","Gets an unsigned n-byte integer from self
in native-endian …","Gets an unsigned n-byte integer from self
in native-endian …","Gets an unsigned n-byte integer from self
in native-endian …","Returns true if there are any more bytes to consume","Returns true if there are any more bytes to consume","Returns true if there are any more bytes to consume","Returns true if there is space in self
for more bytes.","Returns true if there is space in self
for more bytes.","Returns true if there is space in self
for more bytes.","","","","","","","","","","","","","Calls U::from(self)
.","Calls U::from(self)
.","Calls U::from(self)
.","Calls U::from(self)
.","Calls U::from(self)
.","Calls U::from(self)
.","Consumes this Chain
, returning the underlying values.","Consumes this IntoIter
, returning the underlying value.","Consumes this Limit
, returning the underlying value.","Consumes this Reader
, returning the underlying value.","Consumes this Take
, returning the underlying value.","Consumes this Writer
, returning the underlying value.","","","Gets a mutable reference to the last underlying Buf
.","Gets a reference to the last underlying Buf
.","Returns the number of bytes in the slice.","Creates an adaptor which can write at most limit
bytes to …","Creates an adaptor which can write at most limit
bytes to …","Creates an adaptor which can write at most limit
bytes to …","Returns the maximum number of bytes that can be written","Returns the maximum number of bytes that can be read.","Creates a &mut UninitSlice
wrapping a slice of initialised …","Creates an iterator over the bytes contained by the buffer.","","Transfer bytes into self
from src
and advance the cursor …","Transfer bytes into self
from src
and advance the cursor …","Transfer bytes into self
from src
and advance the cursor …","Put cnt
bytes val
into self
.","Put cnt
bytes val
into self
.","Put cnt
bytes val
into self
.","Writes an IEEE754 single-precision (4 bytes) floating …","Writes an IEEE754 single-precision (4 bytes) floating …","Writes an IEEE754 single-precision (4 bytes) floating …","Writes an IEEE754 single-precision (4 bytes) floating …","Writes an IEEE754 single-precision (4 bytes) floating …","Writes an IEEE754 single-precision (4 bytes) floating …","Writes an IEEE754 single-precision (4 bytes) floating …","Writes an IEEE754 single-precision (4 bytes) floating …","Writes an IEEE754 single-precision (4 bytes) floating …","Writes an IEEE754 double-precision (8 bytes) floating …","Writes an IEEE754 double-precision (8 bytes) floating …","Writes an IEEE754 double-precision (8 bytes) floating …","Writes an IEEE754 double-precision (8 bytes) floating …","Writes an IEEE754 double-precision (8 bytes) floating …","Writes an IEEE754 double-precision (8 bytes) floating …","Writes an IEEE754 double-precision (8 bytes) floating …","Writes an IEEE754 double-precision (8 bytes) floating …","Writes an IEEE754 double-precision (8 bytes) floating …","Writes a signed 128 bit integer to self
in the big-endian …","Writes a signed 128 bit integer to self
in the big-endian …","Writes a signed 128 bit integer to self
in the big-endian …","Writes a signed 128 bit integer to self
in little-endian …","Writes a signed 128 bit integer to self
in little-endian …","Writes a signed 128 bit integer to self
in little-endian …","Writes a signed 128 bit integer to self
in native-endian …","Writes a signed 128 bit integer to self
in native-endian …","Writes a signed 128 bit integer to self
in native-endian …","Writes a signed 16 bit integer to self
in big-endian byte …","Writes a signed 16 bit integer to self
in big-endian byte …","Writes a signed 16 bit integer to self
in big-endian byte …","Writes a signed 16 bit integer to self
in little-endian …","Writes a signed 16 bit integer to self
in little-endian …","Writes a signed 16 bit integer to self
in little-endian …","Writes a signed 16 bit integer to self
in native-endian …","Writes a signed 16 bit integer to self
in native-endian …","Writes a signed 16 bit integer to self
in native-endian …","Writes a signed 32 bit integer to self
in big-endian byte …","Writes a signed 32 bit integer to self
in big-endian byte …","Writes a signed 32 bit integer to self
in big-endian byte …","Writes a signed 32 bit integer to self
in little-endian …","Writes a signed 32 bit integer to self
in little-endian …","Writes a signed 32 bit integer to self
in little-endian …","Writes a signed 32 bit integer to self
in native-endian …","Writes a signed 32 bit integer to self
in native-endian …","Writes a signed 32 bit integer to self
in native-endian …","Writes a signed 64 bit integer to self
in the big-endian …","Writes a signed 64 bit integer to self
in the big-endian …","Writes a signed 64 bit integer to self
in the big-endian …","Writes a signed 64 bit integer to self
in little-endian …","Writes a signed 64 bit integer to self
in little-endian …","Writes a signed 64 bit integer to self
in little-endian …","Writes a signed 64 bit integer to self
in native-endian …","Writes a signed 64 bit integer to self
in native-endian …","Writes a signed 64 bit integer to self
in native-endian …","Writes a signed 8 bit integer to self
.","Writes a signed 8 bit integer to self
.","Writes a signed 8 bit integer to self
.","Writes low nbytes
of a signed integer to self
in …","Writes low nbytes
of a signed integer to self
in …","Writes low nbytes
of a signed integer to self
in …","Writes low nbytes
of a signed integer to self
in …","Writes low nbytes
of a signed integer to self
in …","Writes low nbytes
of a signed integer to self
in …","Writes low nbytes
of a signed integer to self
in …","Writes low nbytes
of a signed integer to self
in …","Writes low nbytes
of a signed integer to self
in …","Transfer bytes into self
from src
and advance the cursor …","Transfer bytes into self
from src
and advance the cursor …","Transfer bytes into self
from src
and advance the cursor …","Writes an unsigned 128 bit integer to self
in the …","Writes an unsigned 128 bit integer to self
in the …","Writes an unsigned 128 bit integer to self
in the …","Writes an unsigned 128 bit integer to self
in …","Writes an unsigned 128 bit integer to self
in …","Writes an unsigned 128 bit integer to self
in …","Writes an unsigned 128 bit integer to self
in …","Writes an unsigned 128 bit integer to self
in …","Writes an unsigned 128 bit integer to self
in …","Writes an unsigned 16 bit integer to self
in big-endian …","Writes an unsigned 16 bit integer to self
in big-endian …","Writes an unsigned 16 bit integer to self
in big-endian …","Writes an unsigned 16 bit integer to self
in little-endian …","Writes an unsigned 16 bit integer to self
in little-endian …","Writes an unsigned 16 bit integer to self
in little-endian …","Writes an unsigned 16 bit integer to self
in native-endian …","Writes an unsigned 16 bit integer to self
in native-endian …","Writes an unsigned 16 bit integer to self
in native-endian …","Writes an unsigned 32 bit integer to self
in big-endian …","Writes an unsigned 32 bit integer to self
in big-endian …","Writes an unsigned 32 bit integer to self
in big-endian …","Writes an unsigned 32 bit integer to self
in little-endian …","Writes an unsigned 32 bit integer to self
in little-endian …","Writes an unsigned 32 bit integer to self
in little-endian …","Writes an unsigned 32 bit integer to self
in native-endian …","Writes an unsigned 32 bit integer to self
in native-endian …","Writes an unsigned 32 bit integer to self
in native-endian …","Writes an unsigned 64 bit integer to self
in the …","Writes an unsigned 64 bit integer to self
in the …","Writes an unsigned 64 bit integer to self
in the …","Writes an unsigned 64 bit integer to self
in little-endian …","Writes an unsigned 64 bit integer to self
in little-endian …","Writes an unsigned 64 bit integer to self
in little-endian …","Writes an unsigned 64 bit integer to self
in native-endian …","Writes an unsigned 64 bit integer to self
in native-endian …","Writes an unsigned 64 bit integer to self
in native-endian …","Writes an unsigned 8 bit integer to self
.","Writes an unsigned 8 bit integer to self
.","Writes an unsigned 8 bit integer to self
.","Writes an unsigned n-byte integer to self
in big-endian …","Writes an unsigned n-byte integer to self
in big-endian …","Writes an unsigned n-byte integer to self
in big-endian …","Writes an unsigned n-byte integer to self
in the …","Writes an unsigned n-byte integer to self
in the …","Writes an unsigned n-byte integer to self
in the …","Writes an unsigned n-byte integer to self
in the …","Writes an unsigned n-byte integer to self
in the …","Writes an unsigned n-byte integer to self
in the …","","Creates an adaptor which implements the Read
trait for self
…","Creates an adaptor which implements the Read
trait for self
…","Creates an adaptor which implements the Read
trait for self
…","Returns the number of bytes between the current position …","","","Returns the number of bytes that can be written from the …","","","Sets the maximum number of bytes that can be written.","Sets the maximum number of bytes that can be read.","","Creates an adaptor which will read at most limit
bytes …","Creates an adaptor which will read at most limit
bytes …","Creates an adaptor which will read at most limit
bytes …","","","","","","","","","","","","","","","","","","","","Creates a &mut UninitSlice
wrapping a slice of …","","Write a single byte at the specified offset.","Creates an adaptor which implements the Write
trait for …","Creates an adaptor which implements the Write
trait for …","Creates an adaptor which implements the Write
trait for …"],"i":[0,0,0,0,17,2,3,23,3,3,2,3,2,2,3,3,2,3,3,0,3,17,2,3,23,3,2,3,2,3,2,3,2,3,2,2,3,2,3,2,3,3,2,3,2,3,2,2,2,2,2,2,2,3,3,3,3,3,3,3,3,3,3,3,2,2,2,3,3,3,3,2,2,2,2,2,2,2,3,3,3,2,3,3,2,2,3,2,3,2,2,3,3,2,3,2,2,3,2,3,2,2,2,2,2,2,3,3,3,3,3,3,3,3,3,17,2,3,23,3,3,3,2,3,3,2,2,3,3,2,3,2,3,2,3,2,3,2,3,2,3,2,3,3,3,3,3,3,0,0,0,0,0,0,0,0,0,17,21,22,23,21,24,4,4,4,21,30,24,26,22,28,4,21,30,24,26,22,28,17,17,17,23,23,23,17,21,22,23,21,24,17,17,17,21,26,4,17,17,17,21,22,17,17,17,26,21,21,28,4,21,30,24,26,22,28,4,4,21,30,24,26,22,28,4,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,30,24,26,22,28,30,24,26,22,28,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,23,23,23,4,4,4,4,4,4,4,4,4,4,4,4,21,30,24,26,22,28,21,30,24,26,22,28,21,30,21,21,4,23,23,23,24,22,4,30,30,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,26,17,17,17,17,21,22,23,21,24,24,22,30,17,17,17,21,30,24,26,22,28,21,30,24,26,22,28,4,21,30,24,26,22,28,4,28,4,23,23,23],"f":[0,0,0,0,[1],[[2,1]],[[3,1]],[1],[[3,1]],[3],[2],[3],[2],[[]],[3],[[]],[[]],[3],[[]],0,[3,1],[[]],[2],[3],[[],4],[3,4],[2],[3],[2,2],[3,3],[[]],[[]],[[2,2],5],[[3,3],5],[[],2],[[2,1],2],[[3,1],2],[[],2],[[],3],[2],[3],[3],[[],[[6,[2]]]],[[],[[6,[3]]]],[2],[3],[[2,7],8],[[2,2],8],[2,8],[[2,9],8],[[2,3],8],[[2,10],8],[2,8],[[3,2],8],[[3,10],8],[3,8],[3,8],[[3,9],8],[[3,3],8],[[3,7],8],[3],[3],[3],[3],[[2,11],12],[[2,11],12],[[2,11],12],[[3,11],12],[[3,11],12],[[3,11],12],[3,2],[[]],[10,2],[13,2],[3,2],[[[7,[14]]],2],[9,2],[[],2],[9,3],[[],3],[[]],[15,2],[15,3],[15,3],[[],2],[2],[3],[[]],[[]],[2],[2],[3],[3],[2,8],[3,8],[2,8],[2,1],[3,1],[[],2],[[],3],[2,[[16,[5]]]],[[2,10],[[16,[5]]]],[[2,7],[[16,[5]]]],[[2,9],[[16,[5]]]],[2,[[16,[5]]]],[[2,2],[[16,[5]]]],[3,[[16,[5]]]],[3,[[16,[5]]]],[[3,10],[[16,[5]]]],[[3,7],[[16,[5]]]],[[3,9],[[16,[5]]]],[[3,3],[[16,[5]]]],[[3,17]],[[3,14,1]],[3],[[],1],[2,1],[3,1],[[],1],[3,1],[[3,1]],[[3,1,14]],[2,6],[3,6],[[3,1]],[[2,[18,[1]]],2],[2,2],[3],[3,3],[[2,1],2],[[3,1],3],[[2,1],2],[[3,1],3],[[]],[[]],[[2,1]],[[3,1]],[[],6],[[],6],[[],6],[[],6],[[],19],[[],19],[[3,3]],[1,3],[[3,20],12],[[3,9],12],[1,3],0,0,0,0,0,0,0,0,0,[1],[[21,1]],[[[22,[17]],1]],[1],[[21,1]],[[[24,[23]],1]],[4,14],[4],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[17,[[21,[17]]]],[17,[[21,[17]]]],[17,[[21,[17]]]],[23,[[21,[23]]]],[23,[[21,[23]]]],[23,[[21,[23]]]],[[]],[21],[[[22,[17]]]],[[],4],[21,4],[[[24,[23]]],4],[[],1],[[],1],[[],1],[21,1],[[[26,[[0,[17,25]]]],1]],[4],[1,2],[1,2],[1,2],[[21,1],2],[[[22,[17]],1],2],[[]],[[]],[[]],[[[26,[[0,[17,25]]]]],27],[21],[21],[[[28,[[0,[23,25]]]]],27],[[4,11],12],[[[21,[29,29]],11],12],[[[30,[29]],11],12],[[[24,[29]],11],12],[[[26,[29]],11],12],[[[22,[29]],11],12],[[[28,[29]],11],12],[[],4],[[],4],[[]],[[]],[[]],[[]],[[]],[[]],[[14,1],4],[[],31],[[],31],[[],31],[[],31],[[],31],[[],31],[[],31],[[],31],[[],31],[[],32],[[],32],[[],32],[[],32],[[],32],[[],32],[[],32],[[],32],[[],32],[[],33],[[],33],[[],33],[[],33],[[],33],[[],33],[[],33],[[],33],[[],33],[[],34],[[],34],[[],34],[[],34],[[],34],[[],34],[[],34],[[],34],[[],34],[[],35],[[],35],[[],35],[[],35],[[],35],[[],35],[[],35],[[],35],[[],35],[[],36],[[],36],[[],36],[[],36],[[],36],[[],36],[[],36],[[],36],[[],36],[[],37],[[],37],[[],37],[1,36],[1,36],[1,36],[1,36],[1,36],[1,36],[1,36],[1,36],[1,36],[30],[24],[[[26,[17]]]],[22],[[[28,[23]]]],[30],[24],[[[26,[17]]]],[22],[[[28,[23]]]],[[],38],[[],38],[[],38],[[],38],[[],38],[[],38],[[],38],[[],38],[[],38],[[],39],[[],39],[[],39],[[],39],[[],39],[[],39],[[],39],[[],39],[[],39],[[],40],[[],40],[[],40],[[],40],[[],40],[[],40],[[],40],[[],40],[[],40],[[],41],[[],41],[[],41],[[],41],[[],41],[[],41],[[],41],[[],41],[[],41],[[],14],[[],14],[[],14],[1,41],[1,41],[1,41],[1,41],[1,41],[1,41],[1,41],[1,41],[1,41],[[],8],[[],8],[[],8],[[],8],[[],8],[[],8],[[4,[42,[1]]],4],[[4,[43,[1]]],4],[[4,[44,[1]]],4],[[4,45],4],[[4,[46,[1]]],4],[[4,[47,[1]]],4],[[4,[44,[1]]],4],[[4,[43,[1]]],4],[[4,[47,[1]]],4],[[4,[42,[1]]],4],[[4,45],4],[[4,[46,[1]]],4],[[]],[[]],[[]],[[]],[[]],[[]],[21],[30],[24],[[[26,[17]]],17],[22],[[[28,[23]]],23],[21],[[]],[21],[21],[4,1],[1,24],[1,24],[1,24],[24,1],[22,1],[[],4],[[],30],[[[30,[17]]],[[16,[14]]]],[17],[17],[17],[[14,1]],[[14,1]],[[14,1]],[31],[31],[31],[31],[31],[31],[31],[31],[31],[32],[32],[32],[32],[32],[32],[32],[32],[32],[33],[33],[33],[33],[33],[33],[33],[33],[33],[34],[34],[34],[34],[34],[34],[34],[34],[34],[35],[35],[35],[35],[35],[35],[35],[35],[35],[36],[36],[36],[36],[36],[36],[36],[36],[36],[37],[37],[37],[[36,1]],[[36,1]],[[36,1]],[[36,1]],[[36,1]],[[36,1]],[[36,1]],[[36,1]],[[36,1]],[[]],[[]],[[]],[38],[38],[38],[38],[38],[38],[38],[38],[38],[39],[39],[39],[39],[39],[39],[39],[39],[39],[40],[40],[40],[40],[40],[40],[40],[40],[40],[41],[41],[41],[41],[41],[41],[41],[41],[41],[14],[14],[14],[[41,1]],[[41,1]],[[41,1]],[[41,1]],[[41,1]],[[41,1]],[[41,1]],[[41,1]],[[41,1]],[[[26,[[0,[17,25]]]]],[[27,[1]]]],[[],26],[[],26],[[],26],[[],1],[21,1],[[[22,[17]]],1],[[],1],[21,1],[[[24,[23]]],1],[[24,1]],[[22,1]],[[[30,[17]]]],[1,22],[1,22],[1,22],[[],6],[[],6],[[],6],[[],6],[[],6],[[],6],[[],6],[[],6],[[],6],[[],6],[[],6],[[],6],[[],19],[[],19],[[],19],[[],19],[[],19],[[],19],[[],19],[[],4],[[[28,[[0,[23,25]]]]],[[27,[1]]]],[[4,1,14]],[[],28],[[],28],[[],28]],"p":[[15,"usize"],[3,"Bytes"],[3,"BytesMut"],[3,"UninitSlice"],[4,"Ordering"],[4,"Result"],[3,"Vec"],[15,"bool"],[15,"str"],[3,"String"],[3,"Formatter"],[6,"Result"],[3,"Box"],[15,"u8"],[8,"IntoIterator"],[4,"Option"],[8,"Buf"],[8,"RangeBounds"],[3,"TypeId"],[3,"Arguments"],[3,"Chain"],[3,"Take"],[8,"BufMut"],[3,"Limit"],[8,"Sized"],[3,"Reader"],[6,"Result"],[3,"Writer"],[8,"Debug"],[3,"IntoIter"],[15,"f32"],[15,"f64"],[15,"i128"],[15,"i16"],[15,"i32"],[15,"i64"],[15,"i8"],[15,"u128"],[15,"u16"],[15,"u32"],[15,"u64"],[3,"RangeInclusive"],[3,"RangeTo"],[3,"RangeFrom"],[3,"RangeFull"],[3,"Range"],[3,"RangeToInclusive"]],"a":{"bytes":[21,185],"bytes_mut":[24,188]}}\
+}');
+if (typeof window !== 'undefined' && window.initSearch) {window.initSearch(searchIndex)};
+if (typeof exports !== 'undefined') {exports.searchIndex = searchIndex};
diff --git a/settings.html b/settings.html
new file mode 100644
index 000000000..21db84e03
--- /dev/null
+++ b/settings.html
@@ -0,0 +1 @@
+1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 +70 +71 +72 +73 +74 +75 +76 +77 +78 +79 +80 +81 +82 +83 +84 +85 +86 +87 +88 +89 +90 +91 +92 +93 +94 +95 +96 +97 +98 +99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 +235 +236 +237 +238 +239 +240 +241 +242 +243 +244 +245 +246 +247 +248 +249 +250 +251 +252 +253 +254 +255 +256 +257 +258 +259 +260 +261 +262 +263 +264 +265 +266 +267 +268 +269 +270 +271 +272 +273 +274 +275 +276 +277 +278 +279 +280 +281 +282 +283 +284 +285 +286 +287 +288 +289 +290 +291 +292 +293 +294 +295 +296 +297 +298 +299 +300 +301 +302 +303 +304 +305 +306 +307 +308 +309 +310 +311 +312 +313 +314 +315 +316 +317 +318 +319 +320 +321 +322 +323 +324 +325 +326 +327 +328 +329 +330 +331 +332 +333 +334 +335 +336 +337 +338 +339 +340 +341 +342 +343 +344 +345 +346 +347 +348 +349 +350 +351 +352 +353 +354 +355 +356 +357 +358 +359 +360 +361 +362 +363 +364 +365 +366 +367 +368 +369 +370 +371 +372 +373 +374 +375 +376 +377 +378 +379 +380 +381 +382 +383 +384 +385 +386 +387 +388 +389 +390 +391 +392 +393 +394 +395 +396 +397 +398 +399 +400 +401 +402 +403 +404 +405 +406 +407 +408 +409 +410 +411 +412 +413 +414 +415 +416 +417 +418 +419 +420 +421 +422 +423 +424 +425 +426 +427 +428 +429 +430 +431 +432 +433 +434 +435 +436 +437 +438 +439 +440 +441 +442 +443 +444 +445 +446 +447 +448 +449 +450 +451 +452 +453 +454 +455 +456 +457 +458 +459 +460 +461 +462 +463 +464 +465 +466 +467 +468 +469 +470 +471 +472 +473 +474 +475 +476 +477 +478 +479 +480 +481 +482 +483 +484 +485 +486 +487 +488 +489 +490 +491 +492 +493 +494 +495 +496 +497 +498 +499 +500 +501 +502 +503 +504 +505 +506 +507 +508 +509 +510 +511 +512 +513 +514 +515 +516 +517 +518 +519 +520 +521 +522 +523 +524 +525 +526 +527 +528 +529 +530 +531 +532 +533 +534 +535 +536 +537 +538 +539 +540 +541 +542 +543 +544 +545 +546 +547 +548 +549 +550 +551 +552 +553 +554 +555 +556 +557 +558 +559 +560 +561 +562 +563 +564 +565 +566 +567 +568 +569 +570 +571 +572 +573 +574 +575 +576 +577 +578 +579 +580 +581 +582 +583 +584 +585 +586 +587 +588 +589 +590 +591 +592 +593 +594 +595 +596 +597 +598 +599 +600 +601 +602 +603 +604 +605 +606 +607 +608 +609 +610 +611 +612 +613 +614 +615 +616 +617 +618 +619 +620 +621 +622 +623 +624 +625 +626 +627 +628 +629 +630 +631 +632 +633 +634 +635 +636 +637 +638 +639 +640 +641 +642 +643 +644 +645 +646 +647 +648 +649 +650 +651 +652 +653 +654 +655 +656 +657 +658 +659 +660 +661 +662 +663 +664 +665 +666 +667 +668 +669 +670 +671 +672 +673 +674 +675 +676 +677 +678 +679 +680 +681 +682 +683 +684 +685 +686 +687 +688 +689 +690 +691 +692 +693 +694 +695 +696 +697 +698 +699 +700 +701 +702 +703 +704 +705 +706 +707 +708 +709 +710 +711 +712 +713 +714 +715 +716 +717 +718 +719 +720 +721 +722 +723 +724 +725 +726 +727 +728 +729 +730 +731 +732 +733 +734 +735 +736 +737 +738 +739 +740 +741 +742 +743 +744 +745 +746 +747 +748 +749 +750 +751 +752 +753 +754 +755 +756 +757 +758 +759 +760 +761 +762 +763 +764 +765 +766 +767 +768 +769 +770 +771 +772 +773 +774 +775 +776 +777 +778 +779 +780 +781 +782 +783 +784 +785 +786 +787 +788 +789 +790 +791 +792 +793 +794 +795 +796 +797 +798 +799 +800 +801 +802 +803 +804 +805 +806 +807 +808 +809 +810 +811 +812 +813 +814 +815 +816 +817 +818 +819 +820 +821 +822 +823 +824 +825 +826 +827 +828 +829 +830 +831 +832 +833 +834 +835 +836 +837 +838 +839 +840 +841 +842 +843 +844 +845 +846 +847 +848 +849 +850 +851 +852 +853 +854 +855 +856 +857 +858 +859 +860 +861 +862 +863 +864 +865 +866 +867 +868 +869 +870 +871 +872 +873 +874 +875 +876 +877 +878 +879 +880 +881 +882 +883 +884 +885 +886 +887 +888 +889 +890 +891 +892 +893 +894 +895 +896 +897 +898 +899 +900 +901 +902 +903 +904 +905 +906 +907 +908 +909 +910 +911 +912 +913 +914 +915 +916 +917 +918 +919 +920 +921 +922 +923 +924 +925 +926 +927 +928 +929 +930 +931 +932 +933 +934 +935 +936 +937 +938 +939 +940 +941 +942 +943 +944 +945 +946 +947 +948 +949 +950 +951 +952 +953 +954 +955 +956 +957 +958 +959 +960 +961 +962 +963 +964 +965 +966 +967 +968 +969 +970 +971 +972 +973 +974 +975 +976 +977 +978 +979 +980 +981 +982 +983 +984 +985 +986 +987 +988 +989 +990 +991 +992 +993 +994 +995 +996 +997 +998 +999 +1000 +1001 +1002 +1003 +1004 +1005 +1006 +1007 +1008 +1009 +1010 +1011 +1012 +1013 +1014 +1015 +1016 +1017 +1018 +1019 +1020 +1021 +1022 +1023 +1024 +1025 +1026 +1027 +1028 +1029 +1030 +1031 +1032 +1033 +1034 +1035 +1036 +1037 +1038 +1039 +1040 +1041 +1042 +1043 +1044 +1045 +1046 +1047 +1048 +1049 +1050 +1051 +1052 +1053 +1054 +1055 +1056 +1057 +1058 +1059 +1060 +1061 +1062 +1063 +1064 +1065 +1066 +1067 +1068 +1069 +1070 +1071 +1072 +1073 +1074 +1075 +1076 +1077 +1078 +1079 +1080 +1081 +1082 +1083 +1084 +1085 +1086 +1087 +1088 +1089 +1090 +1091 +1092 +1093 +1094 +1095 +1096 +1097 +1098 +1099 +1100 +1101 +1102 +1103 +1104 +1105 +1106 +1107 +1108 +1109 +1110 +1111 +1112 +1113 +1114 +1115 +1116 +1117 +1118 +1119 +1120 +1121 +1122 +1123 +1124 +1125 +1126 +1127 +1128 +1129 +1130 +1131 +1132 +1133 +1134 +1135 +1136 +1137 +1138 +1139 +1140 +1141 +1142 +1143 +1144 +1145 +1146 +1147 +1148 +1149 +1150 +1151 +1152 +1153 +1154 +1155 +1156 +1157 +1158 +1159 +1160 +1161 +1162 +1163 +1164 +1165 +1166 +1167 +1168 +1169 +1170 +1171 +1172 +1173 +1174 +1175 +1176 +1177 +1178 +1179 +1180 +1181 +1182 +1183 +1184 +1185 +1186 +1187 +1188 +1189 +1190 +1191 +1192 +1193 +1194 +1195 +1196 +1197 +1198 +1199 +1200 +1201 +1202 +1203 +1204 +1205 +1206 +1207 +1208 +1209 +1210 +1211 +1212 +1213 +1214 +1215 +1216 +1217 +1218 +1219 +1220 +1221 +1222 +1223 +1224 +1225 +1226 +1227 +1228 +1229 +1230 +1231 +1232 +1233 +1234 +1235 +1236 +1237 +1238 +1239 +1240 +1241 +1242 +1243 +1244 +1245 +1246 +1247 +1248 +1249 +1250 +1251 +1252 +1253 +1254 +1255 +1256 +1257 +1258 +1259 +1260 +1261 +1262 +1263 +1264 +1265 +1266 +1267 +1268 +1269 +1270 +1271 +1272 +1273 +1274 +1275 +1276 +1277 +1278 +1279 +1280 +1281 +1282 +1283 +1284 +1285 +1286 +1287 +1288 +1289 +1290 +1291 +1292 +1293 +1294 +1295 +1296 +1297 +1298 +1299 +1300 +1301 +1302 +1303 +1304 +1305 +1306 +1307 +1308 +1309 +1310 +1311 +1312 +1313 +1314 +1315 +1316 +1317 +1318 +1319 +1320 +1321 +1322 +1323 +1324 +1325 +1326 +1327 +1328 +1329 +1330 +1331 +1332 +1333 +1334 +1335 +1336 +1337 +1338 +1339 +1340 +1341 +1342 +1343 +1344 +1345 +1346 +1347 +1348 +1349 +1350 +1351 +1352 +1353 +1354 +1355 +1356 +1357 +1358 +1359 +1360 +1361 +1362 +1363 +1364 +1365 +1366 +1367 +1368 +1369 +1370 +1371 +1372 +1373 +1374 +1375 +1376 +1377 +1378 +1379 +1380 +1381 +1382 +1383 +1384 +1385 +1386 +1387 +1388 +1389 +1390 +1391 +1392 +1393 +1394 +1395 +1396 +1397 +1398 +1399 +1400 +1401 +1402 +1403 +1404 +1405 +1406 +1407 +1408 +1409 +1410 +1411 +1412 +1413 +1414 +1415 +1416 +1417 +1418 +1419 +1420 +1421 +1422 +1423 +1424 +1425 +1426 +1427 +1428 +1429 +1430 +1431 +1432 +1433 +1434 +1435 +1436 +1437 +1438 +1439 +1440 +1441 +1442 +1443 +1444 +1445 +1446 +1447 +1448 +1449 +1450 +1451 +1452 +1453 +1454 +1455 +1456 +1457 +1458 +1459 +1460 +
#[cfg(feature = "std")]
+use crate::buf::{reader, Reader};
+use crate::buf::{take, Chain, Take};
+#[cfg(feature = "std")]
+use crate::{min_u64_usize, saturating_sub_usize_u64};
+use crate::{panic_advance, panic_does_not_fit};
+
+#[cfg(feature = "std")]
+use std::io::IoSlice;
+
+use alloc::boxed::Box;
+
+macro_rules! buf_get_impl {
+ ($this:ident, $typ:tt::$conv:tt) => {{
+ const SIZE: usize = core::mem::size_of::<$typ>();
+
+ if $this.remaining() < SIZE {
+ panic_advance(SIZE, $this.remaining());
+ }
+
+ // try to convert directly from the bytes
+ // this Option<ret> trick is to avoid keeping a borrow on self
+ // when advance() is called (mut borrow) and to call bytes() only once
+ let ret = $this
+ .chunk()
+ .get(..SIZE)
+ .map(|src| unsafe { $typ::$conv(*(src as *const _ as *const [_; SIZE])) });
+
+ if let Some(ret) = ret {
+ // if the direct conversion was possible, advance and return
+ $this.advance(SIZE);
+ return ret;
+ } else {
+ // if not we copy the bytes in a temp buffer then convert
+ let mut buf = [0; SIZE];
+ $this.copy_to_slice(&mut buf); // (do the advance)
+ return $typ::$conv(buf);
+ }
+ }};
+ (le => $this:ident, $typ:tt, $len_to_read:expr) => {{
+ const SIZE: usize = core::mem::size_of::<$typ>();
+
+ // The same trick as above does not improve the best case speed.
+ // It seems to be linked to the way the method is optimised by the compiler
+ let mut buf = [0; SIZE];
+
+ let subslice = match buf.get_mut(..$len_to_read) {
+ Some(subslice) => subslice,
+ None => panic_does_not_fit(SIZE, $len_to_read),
+ };
+
+ $this.copy_to_slice(subslice);
+ return $typ::from_le_bytes(buf);
+ }};
+ (be => $this:ident, $typ:tt, $len_to_read:expr) => {{
+ const SIZE: usize = core::mem::size_of::<$typ>();
+
+ let slice_at = match SIZE.checked_sub($len_to_read) {
+ Some(slice_at) => slice_at,
+ None => panic_does_not_fit(SIZE, $len_to_read),
+ };
+
+ let mut buf = [0; SIZE];
+ $this.copy_to_slice(&mut buf[slice_at..]);
+ return $typ::from_be_bytes(buf);
+ }};
+}
+
+/// Read bytes from a buffer.
+///
+/// A buffer stores bytes in memory such that read operations are infallible.
+/// The underlying storage may or may not be in contiguous memory. A `Buf` value
+/// is a cursor into the buffer. Reading from `Buf` advances the cursor
+/// position. It can be thought of as an efficient `Iterator` for collections of
+/// bytes.
+///
+/// The simplest `Buf` is a `&[u8]`.
+///
+/// ```
+/// use bytes::Buf;
+///
+/// let mut buf = &b"hello world"[..];
+///
+/// assert_eq!(b'h', buf.get_u8());
+/// assert_eq!(b'e', buf.get_u8());
+/// assert_eq!(b'l', buf.get_u8());
+///
+/// let mut rest = [0; 8];
+/// buf.copy_to_slice(&mut rest);
+///
+/// assert_eq!(&rest[..], &b"lo world"[..]);
+/// ```
+pub trait Buf {
+ /// Returns the number of bytes between the current position and the end of
+ /// the buffer.
+ ///
+ /// This value is greater than or equal to the length of the slice returned
+ /// by `chunk()`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::Buf;
+ ///
+ /// let mut buf = &b"hello world"[..];
+ ///
+ /// assert_eq!(buf.remaining(), 11);
+ ///
+ /// buf.get_u8();
+ ///
+ /// assert_eq!(buf.remaining(), 10);
+ /// ```
+ ///
+ /// # Implementer notes
+ ///
+ /// Implementations of `remaining` should ensure that the return value does
+ /// not change unless a call is made to `advance` or any other function that
+ /// is documented to change the `Buf`'s current position.
+ fn remaining(&self) -> usize;
+
+ /// Returns a slice starting at the current position and of length between 0
+ /// and `Buf::remaining()`. Note that this *can* return shorter slice (this allows
+ /// non-continuous internal representation).
+ ///
+ /// This is a lower level function. Most operations are done with other
+ /// functions.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::Buf;
+ ///
+ /// let mut buf = &b"hello world"[..];
+ ///
+ /// assert_eq!(buf.chunk(), &b"hello world"[..]);
+ ///
+ /// buf.advance(6);
+ ///
+ /// assert_eq!(buf.chunk(), &b"world"[..]);
+ /// ```
+ ///
+ /// # Implementer notes
+ ///
+ /// This function should never panic. Once the end of the buffer is reached,
+ /// i.e., `Buf::remaining` returns 0, calls to `chunk()` should return an
+ /// empty slice.
+ // The `chunk` method was previously called `bytes`. This alias makes the rename
+ // more easily discoverable.
+ #[cfg_attr(docsrs, doc(alias = "bytes"))]
+ fn chunk(&self) -> &[u8];
+
+ /// Fills `dst` with potentially multiple slices starting at `self`'s
+ /// current position.
+ ///
+ /// If the `Buf` is backed by disjoint slices of bytes, `chunk_vectored` enables
+ /// fetching more than one slice at once. `dst` is a slice of `IoSlice`
+ /// references, enabling the slice to be directly used with [`writev`]
+ /// without any further conversion. The sum of the lengths of all the
+ /// buffers in `dst` will be less than or equal to `Buf::remaining()`.
+ ///
+ /// The entries in `dst` will be overwritten, but the data **contained** by
+ /// the slices **will not** be modified. If `chunk_vectored` does not fill every
+ /// entry in `dst`, then `dst` is guaranteed to contain all remaining slices
+ /// in `self.
+ ///
+ /// This is a lower level function. Most operations are done with other
+ /// functions.
+ ///
+ /// # Implementer notes
+ ///
+ /// This function should never panic. Once the end of the buffer is reached,
+ /// i.e., `Buf::remaining` returns 0, calls to `chunk_vectored` must return 0
+ /// without mutating `dst`.
+ ///
+ /// Implementations should also take care to properly handle being called
+ /// with `dst` being a zero length slice.
+ ///
+ /// [`writev`]: http://man7.org/linux/man-pages/man2/readv.2.html
+ #[cfg(feature = "std")]
+ #[cfg_attr(docsrs, doc(cfg(feature = "std")))]
+ fn chunks_vectored<'a>(&'a self, dst: &mut [IoSlice<'a>]) -> usize {
+ if dst.is_empty() {
+ return 0;
+ }
+
+ if self.has_remaining() {
+ dst[0] = IoSlice::new(self.chunk());
+ 1
+ } else {
+ 0
+ }
+ }
+
+ /// Advance the internal cursor of the Buf
+ ///
+ /// The next call to `chunk()` will return a slice starting `cnt` bytes
+ /// further into the underlying buffer.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::Buf;
+ ///
+ /// let mut buf = &b"hello world"[..];
+ ///
+ /// assert_eq!(buf.chunk(), &b"hello world"[..]);
+ ///
+ /// buf.advance(6);
+ ///
+ /// assert_eq!(buf.chunk(), &b"world"[..]);
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function **may** panic if `cnt > self.remaining()`.
+ ///
+ /// # Implementer notes
+ ///
+ /// It is recommended for implementations of `advance` to panic if `cnt >
+ /// self.remaining()`. If the implementation does not panic, the call must
+ /// behave as if `cnt == self.remaining()`.
+ ///
+ /// A call with `cnt == 0` should never panic and be a no-op.
+ fn advance(&mut self, cnt: usize);
+
+ /// Returns true if there are any more bytes to consume
+ ///
+ /// This is equivalent to `self.remaining() != 0`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::Buf;
+ ///
+ /// let mut buf = &b"a"[..];
+ ///
+ /// assert!(buf.has_remaining());
+ ///
+ /// buf.get_u8();
+ ///
+ /// assert!(!buf.has_remaining());
+ /// ```
+ fn has_remaining(&self) -> bool {
+ self.remaining() > 0
+ }
+
+ /// Copies bytes from `self` into `dst`.
+ ///
+ /// The cursor is advanced by the number of bytes copied. `self` must have
+ /// enough remaining bytes to fill `dst`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::Buf;
+ ///
+ /// let mut buf = &b"hello world"[..];
+ /// let mut dst = [0; 5];
+ ///
+ /// buf.copy_to_slice(&mut dst);
+ /// assert_eq!(&b"hello"[..], &dst);
+ /// assert_eq!(6, buf.remaining());
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if `self.remaining() < dst.len()`.
+ fn copy_to_slice(&mut self, mut dst: &mut [u8]) {
+ if self.remaining() < dst.len() {
+ panic_advance(dst.len(), self.remaining());
+ }
+
+ while !dst.is_empty() {
+ let src = self.chunk();
+ let cnt = usize::min(src.len(), dst.len());
+
+ dst[..cnt].copy_from_slice(&src[..cnt]);
+ dst = &mut dst[cnt..];
+
+ self.advance(cnt);
+ }
+ }
+
+ /// Gets an unsigned 8 bit integer from `self`.
+ ///
+ /// The current position is advanced by 1.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::Buf;
+ ///
+ /// let mut buf = &b"\x08 hello"[..];
+ /// assert_eq!(8, buf.get_u8());
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is no more remaining data in `self`.
+ fn get_u8(&mut self) -> u8 {
+ if self.remaining() < 1 {
+ panic_advance(1, 0);
+ }
+ let ret = self.chunk()[0];
+ self.advance(1);
+ ret
+ }
+
+ /// Gets a signed 8 bit integer from `self`.
+ ///
+ /// The current position is advanced by 1.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::Buf;
+ ///
+ /// let mut buf = &b"\x08 hello"[..];
+ /// assert_eq!(8, buf.get_i8());
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is no more remaining data in `self`.
+ fn get_i8(&mut self) -> i8 {
+ if self.remaining() < 1 {
+ panic_advance(1, 0);
+ }
+ let ret = self.chunk()[0] as i8;
+ self.advance(1);
+ ret
+ }
+
+ /// Gets an unsigned 16 bit integer from `self` in big-endian byte order.
+ ///
+ /// The current position is advanced by 2.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::Buf;
+ ///
+ /// let mut buf = &b"\x08\x09 hello"[..];
+ /// assert_eq!(0x0809, buf.get_u16());
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining data in `self`.
+ fn get_u16(&mut self) -> u16 {
+ buf_get_impl!(self, u16::from_be_bytes);
+ }
+
+ /// Gets an unsigned 16 bit integer from `self` in little-endian byte order.
+ ///
+ /// The current position is advanced by 2.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::Buf;
+ ///
+ /// let mut buf = &b"\x09\x08 hello"[..];
+ /// assert_eq!(0x0809, buf.get_u16_le());
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining data in `self`.
+ fn get_u16_le(&mut self) -> u16 {
+ buf_get_impl!(self, u16::from_le_bytes);
+ }
+
+ /// Gets an unsigned 16 bit integer from `self` in native-endian byte order.
+ ///
+ /// The current position is advanced by 2.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::Buf;
+ ///
+ /// let mut buf: &[u8] = match cfg!(target_endian = "big") {
+ /// true => b"\x08\x09 hello",
+ /// false => b"\x09\x08 hello",
+ /// };
+ /// assert_eq!(0x0809, buf.get_u16_ne());
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining data in `self`.
+ fn get_u16_ne(&mut self) -> u16 {
+ buf_get_impl!(self, u16::from_ne_bytes);
+ }
+
+ /// Gets a signed 16 bit integer from `self` in big-endian byte order.
+ ///
+ /// The current position is advanced by 2.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::Buf;
+ ///
+ /// let mut buf = &b"\x08\x09 hello"[..];
+ /// assert_eq!(0x0809, buf.get_i16());
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining data in `self`.
+ fn get_i16(&mut self) -> i16 {
+ buf_get_impl!(self, i16::from_be_bytes);
+ }
+
+ /// Gets a signed 16 bit integer from `self` in little-endian byte order.
+ ///
+ /// The current position is advanced by 2.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::Buf;
+ ///
+ /// let mut buf = &b"\x09\x08 hello"[..];
+ /// assert_eq!(0x0809, buf.get_i16_le());
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining data in `self`.
+ fn get_i16_le(&mut self) -> i16 {
+ buf_get_impl!(self, i16::from_le_bytes);
+ }
+
+ /// Gets a signed 16 bit integer from `self` in native-endian byte order.
+ ///
+ /// The current position is advanced by 2.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::Buf;
+ ///
+ /// let mut buf: &[u8] = match cfg!(target_endian = "big") {
+ /// true => b"\x08\x09 hello",
+ /// false => b"\x09\x08 hello",
+ /// };
+ /// assert_eq!(0x0809, buf.get_i16_ne());
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining data in `self`.
+ fn get_i16_ne(&mut self) -> i16 {
+ buf_get_impl!(self, i16::from_ne_bytes);
+ }
+
+ /// Gets an unsigned 32 bit integer from `self` in the big-endian byte order.
+ ///
+ /// The current position is advanced by 4.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::Buf;
+ ///
+ /// let mut buf = &b"\x08\x09\xA0\xA1 hello"[..];
+ /// assert_eq!(0x0809A0A1, buf.get_u32());
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining data in `self`.
+ fn get_u32(&mut self) -> u32 {
+ buf_get_impl!(self, u32::from_be_bytes);
+ }
+
+ /// Gets an unsigned 32 bit integer from `self` in the little-endian byte order.
+ ///
+ /// The current position is advanced by 4.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::Buf;
+ ///
+ /// let mut buf = &b"\xA1\xA0\x09\x08 hello"[..];
+ /// assert_eq!(0x0809A0A1, buf.get_u32_le());
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining data in `self`.
+ fn get_u32_le(&mut self) -> u32 {
+ buf_get_impl!(self, u32::from_le_bytes);
+ }
+
+ /// Gets an unsigned 32 bit integer from `self` in native-endian byte order.
+ ///
+ /// The current position is advanced by 4.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::Buf;
+ ///
+ /// let mut buf: &[u8] = match cfg!(target_endian = "big") {
+ /// true => b"\x08\x09\xA0\xA1 hello",
+ /// false => b"\xA1\xA0\x09\x08 hello",
+ /// };
+ /// assert_eq!(0x0809A0A1, buf.get_u32_ne());
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining data in `self`.
+ fn get_u32_ne(&mut self) -> u32 {
+ buf_get_impl!(self, u32::from_ne_bytes);
+ }
+
+ /// Gets a signed 32 bit integer from `self` in big-endian byte order.
+ ///
+ /// The current position is advanced by 4.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::Buf;
+ ///
+ /// let mut buf = &b"\x08\x09\xA0\xA1 hello"[..];
+ /// assert_eq!(0x0809A0A1, buf.get_i32());
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining data in `self`.
+ fn get_i32(&mut self) -> i32 {
+ buf_get_impl!(self, i32::from_be_bytes);
+ }
+
+ /// Gets a signed 32 bit integer from `self` in little-endian byte order.
+ ///
+ /// The current position is advanced by 4.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::Buf;
+ ///
+ /// let mut buf = &b"\xA1\xA0\x09\x08 hello"[..];
+ /// assert_eq!(0x0809A0A1, buf.get_i32_le());
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining data in `self`.
+ fn get_i32_le(&mut self) -> i32 {
+ buf_get_impl!(self, i32::from_le_bytes);
+ }
+
+ /// Gets a signed 32 bit integer from `self` in native-endian byte order.
+ ///
+ /// The current position is advanced by 4.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::Buf;
+ ///
+ /// let mut buf: &[u8] = match cfg!(target_endian = "big") {
+ /// true => b"\x08\x09\xA0\xA1 hello",
+ /// false => b"\xA1\xA0\x09\x08 hello",
+ /// };
+ /// assert_eq!(0x0809A0A1, buf.get_i32_ne());
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining data in `self`.
+ fn get_i32_ne(&mut self) -> i32 {
+ buf_get_impl!(self, i32::from_ne_bytes);
+ }
+
+ /// Gets an unsigned 64 bit integer from `self` in big-endian byte order.
+ ///
+ /// The current position is advanced by 8.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::Buf;
+ ///
+ /// let mut buf = &b"\x01\x02\x03\x04\x05\x06\x07\x08 hello"[..];
+ /// assert_eq!(0x0102030405060708, buf.get_u64());
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining data in `self`.
+ fn get_u64(&mut self) -> u64 {
+ buf_get_impl!(self, u64::from_be_bytes);
+ }
+
+ /// Gets an unsigned 64 bit integer from `self` in little-endian byte order.
+ ///
+ /// The current position is advanced by 8.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::Buf;
+ ///
+ /// let mut buf = &b"\x08\x07\x06\x05\x04\x03\x02\x01 hello"[..];
+ /// assert_eq!(0x0102030405060708, buf.get_u64_le());
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining data in `self`.
+ fn get_u64_le(&mut self) -> u64 {
+ buf_get_impl!(self, u64::from_le_bytes);
+ }
+
+ /// Gets an unsigned 64 bit integer from `self` in native-endian byte order.
+ ///
+ /// The current position is advanced by 8.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::Buf;
+ ///
+ /// let mut buf: &[u8] = match cfg!(target_endian = "big") {
+ /// true => b"\x01\x02\x03\x04\x05\x06\x07\x08 hello",
+ /// false => b"\x08\x07\x06\x05\x04\x03\x02\x01 hello",
+ /// };
+ /// assert_eq!(0x0102030405060708, buf.get_u64_ne());
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining data in `self`.
+ fn get_u64_ne(&mut self) -> u64 {
+ buf_get_impl!(self, u64::from_ne_bytes);
+ }
+
+ /// Gets a signed 64 bit integer from `self` in big-endian byte order.
+ ///
+ /// The current position is advanced by 8.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::Buf;
+ ///
+ /// let mut buf = &b"\x01\x02\x03\x04\x05\x06\x07\x08 hello"[..];
+ /// assert_eq!(0x0102030405060708, buf.get_i64());
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining data in `self`.
+ fn get_i64(&mut self) -> i64 {
+ buf_get_impl!(self, i64::from_be_bytes);
+ }
+
+ /// Gets a signed 64 bit integer from `self` in little-endian byte order.
+ ///
+ /// The current position is advanced by 8.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::Buf;
+ ///
+ /// let mut buf = &b"\x08\x07\x06\x05\x04\x03\x02\x01 hello"[..];
+ /// assert_eq!(0x0102030405060708, buf.get_i64_le());
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining data in `self`.
+ fn get_i64_le(&mut self) -> i64 {
+ buf_get_impl!(self, i64::from_le_bytes);
+ }
+
+ /// Gets a signed 64 bit integer from `self` in native-endian byte order.
+ ///
+ /// The current position is advanced by 8.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::Buf;
+ ///
+ /// let mut buf: &[u8] = match cfg!(target_endian = "big") {
+ /// true => b"\x01\x02\x03\x04\x05\x06\x07\x08 hello",
+ /// false => b"\x08\x07\x06\x05\x04\x03\x02\x01 hello",
+ /// };
+ /// assert_eq!(0x0102030405060708, buf.get_i64_ne());
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining data in `self`.
+ fn get_i64_ne(&mut self) -> i64 {
+ buf_get_impl!(self, i64::from_ne_bytes);
+ }
+
+ /// Gets an unsigned 128 bit integer from `self` in big-endian byte order.
+ ///
+ /// The current position is advanced by 16.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::Buf;
+ ///
+ /// let mut buf = &b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16 hello"[..];
+ /// assert_eq!(0x01020304050607080910111213141516, buf.get_u128());
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining data in `self`.
+ fn get_u128(&mut self) -> u128 {
+ buf_get_impl!(self, u128::from_be_bytes);
+ }
+
+ /// Gets an unsigned 128 bit integer from `self` in little-endian byte order.
+ ///
+ /// The current position is advanced by 16.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::Buf;
+ ///
+ /// let mut buf = &b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01 hello"[..];
+ /// assert_eq!(0x01020304050607080910111213141516, buf.get_u128_le());
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining data in `self`.
+ fn get_u128_le(&mut self) -> u128 {
+ buf_get_impl!(self, u128::from_le_bytes);
+ }
+
+ /// Gets an unsigned 128 bit integer from `self` in native-endian byte order.
+ ///
+ /// The current position is advanced by 16.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::Buf;
+ ///
+ /// let mut buf: &[u8] = match cfg!(target_endian = "big") {
+ /// true => b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16 hello",
+ /// false => b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01 hello",
+ /// };
+ /// assert_eq!(0x01020304050607080910111213141516, buf.get_u128_ne());
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining data in `self`.
+ fn get_u128_ne(&mut self) -> u128 {
+ buf_get_impl!(self, u128::from_ne_bytes);
+ }
+
+ /// Gets a signed 128 bit integer from `self` in big-endian byte order.
+ ///
+ /// The current position is advanced by 16.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::Buf;
+ ///
+ /// let mut buf = &b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16 hello"[..];
+ /// assert_eq!(0x01020304050607080910111213141516, buf.get_i128());
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining data in `self`.
+ fn get_i128(&mut self) -> i128 {
+ buf_get_impl!(self, i128::from_be_bytes);
+ }
+
+ /// Gets a signed 128 bit integer from `self` in little-endian byte order.
+ ///
+ /// The current position is advanced by 16.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::Buf;
+ ///
+ /// let mut buf = &b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01 hello"[..];
+ /// assert_eq!(0x01020304050607080910111213141516, buf.get_i128_le());
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining data in `self`.
+ fn get_i128_le(&mut self) -> i128 {
+ buf_get_impl!(self, i128::from_le_bytes);
+ }
+
+ /// Gets a signed 128 bit integer from `self` in native-endian byte order.
+ ///
+ /// The current position is advanced by 16.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::Buf;
+ ///
+ /// let mut buf: &[u8] = match cfg!(target_endian = "big") {
+ /// true => b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16 hello",
+ /// false => b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01 hello",
+ /// };
+ /// assert_eq!(0x01020304050607080910111213141516, buf.get_i128_ne());
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining data in `self`.
+ fn get_i128_ne(&mut self) -> i128 {
+ buf_get_impl!(self, i128::from_ne_bytes);
+ }
+
+ /// Gets an unsigned n-byte integer from `self` in big-endian byte order.
+ ///
+ /// The current position is advanced by `nbytes`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::Buf;
+ ///
+ /// let mut buf = &b"\x01\x02\x03 hello"[..];
+ /// assert_eq!(0x010203, buf.get_uint(3));
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining data in `self`.
+ fn get_uint(&mut self, nbytes: usize) -> u64 {
+ buf_get_impl!(be => self, u64, nbytes);
+ }
+
+ /// Gets an unsigned n-byte integer from `self` in little-endian byte order.
+ ///
+ /// The current position is advanced by `nbytes`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::Buf;
+ ///
+ /// let mut buf = &b"\x03\x02\x01 hello"[..];
+ /// assert_eq!(0x010203, buf.get_uint_le(3));
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining data in `self`.
+ fn get_uint_le(&mut self, nbytes: usize) -> u64 {
+ buf_get_impl!(le => self, u64, nbytes);
+ }
+
+ /// Gets an unsigned n-byte integer from `self` in native-endian byte order.
+ ///
+ /// The current position is advanced by `nbytes`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::Buf;
+ ///
+ /// let mut buf: &[u8] = match cfg!(target_endian = "big") {
+ /// true => b"\x01\x02\x03 hello",
+ /// false => b"\x03\x02\x01 hello",
+ /// };
+ /// assert_eq!(0x010203, buf.get_uint_ne(3));
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining data in `self`, or
+ /// if `nbytes` is greater than 8.
+ fn get_uint_ne(&mut self, nbytes: usize) -> u64 {
+ if cfg!(target_endian = "big") {
+ self.get_uint(nbytes)
+ } else {
+ self.get_uint_le(nbytes)
+ }
+ }
+
+ /// Gets a signed n-byte integer from `self` in big-endian byte order.
+ ///
+ /// The current position is advanced by `nbytes`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::Buf;
+ ///
+ /// let mut buf = &b"\x01\x02\x03 hello"[..];
+ /// assert_eq!(0x010203, buf.get_int(3));
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining data in `self`, or
+ /// if `nbytes` is greater than 8.
+ fn get_int(&mut self, nbytes: usize) -> i64 {
+ buf_get_impl!(be => self, i64, nbytes);
+ }
+
+ /// Gets a signed n-byte integer from `self` in little-endian byte order.
+ ///
+ /// The current position is advanced by `nbytes`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::Buf;
+ ///
+ /// let mut buf = &b"\x03\x02\x01 hello"[..];
+ /// assert_eq!(0x010203, buf.get_int_le(3));
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining data in `self`, or
+ /// if `nbytes` is greater than 8.
+ fn get_int_le(&mut self, nbytes: usize) -> i64 {
+ buf_get_impl!(le => self, i64, nbytes);
+ }
+
+ /// Gets a signed n-byte integer from `self` in native-endian byte order.
+ ///
+ /// The current position is advanced by `nbytes`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::Buf;
+ ///
+ /// let mut buf: &[u8] = match cfg!(target_endian = "big") {
+ /// true => b"\x01\x02\x03 hello",
+ /// false => b"\x03\x02\x01 hello",
+ /// };
+ /// assert_eq!(0x010203, buf.get_int_ne(3));
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining data in `self`, or
+ /// if `nbytes` is greater than 8.
+ fn get_int_ne(&mut self, nbytes: usize) -> i64 {
+ if cfg!(target_endian = "big") {
+ self.get_int(nbytes)
+ } else {
+ self.get_int_le(nbytes)
+ }
+ }
+
+ /// Gets an IEEE754 single-precision (4 bytes) floating point number from
+ /// `self` in big-endian byte order.
+ ///
+ /// The current position is advanced by 4.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::Buf;
+ ///
+ /// let mut buf = &b"\x3F\x99\x99\x9A hello"[..];
+ /// assert_eq!(1.2f32, buf.get_f32());
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining data in `self`.
+ fn get_f32(&mut self) -> f32 {
+ f32::from_bits(self.get_u32())
+ }
+
+ /// Gets an IEEE754 single-precision (4 bytes) floating point number from
+ /// `self` in little-endian byte order.
+ ///
+ /// The current position is advanced by 4.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::Buf;
+ ///
+ /// let mut buf = &b"\x9A\x99\x99\x3F hello"[..];
+ /// assert_eq!(1.2f32, buf.get_f32_le());
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining data in `self`.
+ fn get_f32_le(&mut self) -> f32 {
+ f32::from_bits(self.get_u32_le())
+ }
+
+ /// Gets an IEEE754 single-precision (4 bytes) floating point number from
+ /// `self` in native-endian byte order.
+ ///
+ /// The current position is advanced by 4.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::Buf;
+ ///
+ /// let mut buf: &[u8] = match cfg!(target_endian = "big") {
+ /// true => b"\x3F\x99\x99\x9A hello",
+ /// false => b"\x9A\x99\x99\x3F hello",
+ /// };
+ /// assert_eq!(1.2f32, buf.get_f32_ne());
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining data in `self`.
+ fn get_f32_ne(&mut self) -> f32 {
+ f32::from_bits(self.get_u32_ne())
+ }
+
+ /// Gets an IEEE754 double-precision (8 bytes) floating point number from
+ /// `self` in big-endian byte order.
+ ///
+ /// The current position is advanced by 8.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::Buf;
+ ///
+ /// let mut buf = &b"\x3F\xF3\x33\x33\x33\x33\x33\x33 hello"[..];
+ /// assert_eq!(1.2f64, buf.get_f64());
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining data in `self`.
+ fn get_f64(&mut self) -> f64 {
+ f64::from_bits(self.get_u64())
+ }
+
+ /// Gets an IEEE754 double-precision (8 bytes) floating point number from
+ /// `self` in little-endian byte order.
+ ///
+ /// The current position is advanced by 8.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::Buf;
+ ///
+ /// let mut buf = &b"\x33\x33\x33\x33\x33\x33\xF3\x3F hello"[..];
+ /// assert_eq!(1.2f64, buf.get_f64_le());
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining data in `self`.
+ fn get_f64_le(&mut self) -> f64 {
+ f64::from_bits(self.get_u64_le())
+ }
+
+ /// Gets an IEEE754 double-precision (8 bytes) floating point number from
+ /// `self` in native-endian byte order.
+ ///
+ /// The current position is advanced by 8.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::Buf;
+ ///
+ /// let mut buf: &[u8] = match cfg!(target_endian = "big") {
+ /// true => b"\x3F\xF3\x33\x33\x33\x33\x33\x33 hello",
+ /// false => b"\x33\x33\x33\x33\x33\x33\xF3\x3F hello",
+ /// };
+ /// assert_eq!(1.2f64, buf.get_f64_ne());
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining data in `self`.
+ fn get_f64_ne(&mut self) -> f64 {
+ f64::from_bits(self.get_u64_ne())
+ }
+
+ /// Consumes `len` bytes inside self and returns new instance of `Bytes`
+ /// with this data.
+ ///
+ /// This function may be optimized by the underlying type to avoid actual
+ /// copies. For example, `Bytes` implementation will do a shallow copy
+ /// (ref-count increment).
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::Buf;
+ ///
+ /// let bytes = (&b"hello world"[..]).copy_to_bytes(5);
+ /// assert_eq!(&bytes[..], &b"hello"[..]);
+ /// ```
+ fn copy_to_bytes(&mut self, len: usize) -> crate::Bytes {
+ use super::BufMut;
+
+ if self.remaining() < len {
+ panic_advance(len, self.remaining());
+ }
+
+ let mut ret = crate::BytesMut::with_capacity(len);
+ ret.put(self.take(len));
+ ret.freeze()
+ }
+
+ /// Creates an adaptor which will read at most `limit` bytes from `self`.
+ ///
+ /// This function returns a new instance of `Buf` which will read at most
+ /// `limit` bytes.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::{Buf, BufMut};
+ ///
+ /// let mut buf = b"hello world"[..].take(5);
+ /// let mut dst = vec![];
+ ///
+ /// dst.put(&mut buf);
+ /// assert_eq!(dst, b"hello");
+ ///
+ /// let mut buf = buf.into_inner();
+ /// dst.clear();
+ /// dst.put(&mut buf);
+ /// assert_eq!(dst, b" world");
+ /// ```
+ fn take(self, limit: usize) -> Take<Self>
+ where
+ Self: Sized,
+ {
+ take::new(self, limit)
+ }
+
+ /// Creates an adaptor which will chain this buffer with another.
+ ///
+ /// The returned `Buf` instance will first consume all bytes from `self`.
+ /// Afterwards the output is equivalent to the output of next.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::Buf;
+ ///
+ /// let mut chain = b"hello "[..].chain(&b"world"[..]);
+ ///
+ /// let full = chain.copy_to_bytes(11);
+ /// assert_eq!(full.chunk(), b"hello world");
+ /// ```
+ fn chain<U: Buf>(self, next: U) -> Chain<Self, U>
+ where
+ Self: Sized,
+ {
+ Chain::new(self, next)
+ }
+
+ /// Creates an adaptor which implements the `Read` trait for `self`.
+ ///
+ /// This function returns a new value which implements `Read` by adapting
+ /// the `Read` trait functions to the `Buf` trait functions. Given that
+ /// `Buf` operations are infallible, none of the `Read` functions will
+ /// return with `Err`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::{Bytes, Buf};
+ /// use std::io::Read;
+ ///
+ /// let buf = Bytes::from("hello world");
+ ///
+ /// let mut reader = buf.reader();
+ /// let mut dst = [0; 1024];
+ ///
+ /// let num = reader.read(&mut dst).unwrap();
+ ///
+ /// assert_eq!(11, num);
+ /// assert_eq!(&dst[..11], &b"hello world"[..]);
+ /// ```
+ #[cfg(feature = "std")]
+ #[cfg_attr(docsrs, doc(cfg(feature = "std")))]
+ fn reader(self) -> Reader<Self>
+ where
+ Self: Sized,
+ {
+ reader::new(self)
+ }
+}
+
+macro_rules! deref_forward_buf {
+ () => {
+ #[inline]
+ fn remaining(&self) -> usize {
+ (**self).remaining()
+ }
+
+ #[inline]
+ fn chunk(&self) -> &[u8] {
+ (**self).chunk()
+ }
+
+ #[cfg(feature = "std")]
+ #[inline]
+ fn chunks_vectored<'b>(&'b self, dst: &mut [IoSlice<'b>]) -> usize {
+ (**self).chunks_vectored(dst)
+ }
+
+ #[inline]
+ fn advance(&mut self, cnt: usize) {
+ (**self).advance(cnt)
+ }
+
+ #[inline]
+ fn has_remaining(&self) -> bool {
+ (**self).has_remaining()
+ }
+
+ #[inline]
+ fn copy_to_slice(&mut self, dst: &mut [u8]) {
+ (**self).copy_to_slice(dst)
+ }
+
+ #[inline]
+ fn get_u8(&mut self) -> u8 {
+ (**self).get_u8()
+ }
+
+ #[inline]
+ fn get_i8(&mut self) -> i8 {
+ (**self).get_i8()
+ }
+
+ #[inline]
+ fn get_u16(&mut self) -> u16 {
+ (**self).get_u16()
+ }
+
+ #[inline]
+ fn get_u16_le(&mut self) -> u16 {
+ (**self).get_u16_le()
+ }
+
+ #[inline]
+ fn get_u16_ne(&mut self) -> u16 {
+ (**self).get_u16_ne()
+ }
+
+ #[inline]
+ fn get_i16(&mut self) -> i16 {
+ (**self).get_i16()
+ }
+
+ #[inline]
+ fn get_i16_le(&mut self) -> i16 {
+ (**self).get_i16_le()
+ }
+
+ #[inline]
+ fn get_i16_ne(&mut self) -> i16 {
+ (**self).get_i16_ne()
+ }
+
+ #[inline]
+ fn get_u32(&mut self) -> u32 {
+ (**self).get_u32()
+ }
+
+ #[inline]
+ fn get_u32_le(&mut self) -> u32 {
+ (**self).get_u32_le()
+ }
+
+ #[inline]
+ fn get_u32_ne(&mut self) -> u32 {
+ (**self).get_u32_ne()
+ }
+
+ #[inline]
+ fn get_i32(&mut self) -> i32 {
+ (**self).get_i32()
+ }
+
+ #[inline]
+ fn get_i32_le(&mut self) -> i32 {
+ (**self).get_i32_le()
+ }
+
+ #[inline]
+ fn get_i32_ne(&mut self) -> i32 {
+ (**self).get_i32_ne()
+ }
+
+ #[inline]
+ fn get_u64(&mut self) -> u64 {
+ (**self).get_u64()
+ }
+
+ #[inline]
+ fn get_u64_le(&mut self) -> u64 {
+ (**self).get_u64_le()
+ }
+
+ #[inline]
+ fn get_u64_ne(&mut self) -> u64 {
+ (**self).get_u64_ne()
+ }
+
+ #[inline]
+ fn get_i64(&mut self) -> i64 {
+ (**self).get_i64()
+ }
+
+ #[inline]
+ fn get_i64_le(&mut self) -> i64 {
+ (**self).get_i64_le()
+ }
+
+ #[inline]
+ fn get_i64_ne(&mut self) -> i64 {
+ (**self).get_i64_ne()
+ }
+
+ #[inline]
+ fn get_uint(&mut self, nbytes: usize) -> u64 {
+ (**self).get_uint(nbytes)
+ }
+
+ #[inline]
+ fn get_uint_le(&mut self, nbytes: usize) -> u64 {
+ (**self).get_uint_le(nbytes)
+ }
+
+ #[inline]
+ fn get_uint_ne(&mut self, nbytes: usize) -> u64 {
+ (**self).get_uint_ne(nbytes)
+ }
+
+ #[inline]
+ fn get_int(&mut self, nbytes: usize) -> i64 {
+ (**self).get_int(nbytes)
+ }
+
+ #[inline]
+ fn get_int_le(&mut self, nbytes: usize) -> i64 {
+ (**self).get_int_le(nbytes)
+ }
+
+ #[inline]
+ fn get_int_ne(&mut self, nbytes: usize) -> i64 {
+ (**self).get_int_ne(nbytes)
+ }
+
+ #[inline]
+ fn copy_to_bytes(&mut self, len: usize) -> crate::Bytes {
+ (**self).copy_to_bytes(len)
+ }
+ };
+}
+
+impl<T: Buf + ?Sized> Buf for &mut T {
+ deref_forward_buf!();
+}
+
+impl<T: Buf + ?Sized> Buf for Box<T> {
+ deref_forward_buf!();
+}
+
+impl Buf for &[u8] {
+ #[inline]
+ fn remaining(&self) -> usize {
+ self.len()
+ }
+
+ #[inline]
+ fn chunk(&self) -> &[u8] {
+ self
+ }
+
+ #[inline]
+ fn advance(&mut self, cnt: usize) {
+ if self.len() < cnt {
+ panic_advance(cnt, self.len());
+ }
+
+ *self = &self[cnt..];
+ }
+
+ #[inline]
+ fn copy_to_slice(&mut self, dst: &mut [u8]) {
+ if self.len() < dst.len() {
+ panic_advance(dst.len(), self.len());
+ }
+
+ dst.copy_from_slice(&self[..dst.len()]);
+ self.advance(dst.len());
+ }
+}
+
+#[cfg(feature = "std")]
+impl<T: AsRef<[u8]>> Buf for std::io::Cursor<T> {
+ #[inline]
+ fn remaining(&self) -> usize {
+ saturating_sub_usize_u64(self.get_ref().as_ref().len(), self.position())
+ }
+
+ #[inline]
+ fn chunk(&self) -> &[u8] {
+ let slice = self.get_ref().as_ref();
+ let pos = min_u64_usize(self.position(), slice.len());
+ &slice[pos..]
+ }
+
+ #[inline]
+ fn advance(&mut self, cnt: usize) {
+ let len = self.get_ref().as_ref().len();
+ let pos = self.position();
+
+ // We intentionally allow `cnt == 0` here even if `pos > len`.
+ let max_cnt = saturating_sub_usize_u64(len, pos);
+ if cnt > max_cnt {
+ panic_advance(cnt, max_cnt);
+ }
+
+ // This will not overflow because either `cnt == 0` or the sum is not
+ // greater than `len`.
+ self.set_position(pos + cnt as u64);
+ }
+}
+
+// The existence of this function makes the compiler catch if the Buf
+// trait is "object-safe" or not.
+fn _assert_trait_object(_b: &dyn Buf) {}
+
1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 +70 +71 +72 +73 +74 +75 +76 +77 +78 +79 +80 +81 +82 +83 +84 +85 +86 +87 +88 +89 +90 +91 +92 +93 +94 +95 +96 +97 +98 +99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 +235 +236 +237 +238 +239 +240 +241 +242 +243 +244 +245 +246 +247 +248 +249 +250 +251 +252 +253 +254 +255 +256 +257 +258 +259 +260 +261 +262 +263 +264 +265 +266 +267 +268 +269 +270 +271 +272 +273 +274 +275 +276 +277 +278 +279 +280 +281 +282 +283 +284 +285 +286 +287 +288 +289 +290 +291 +292 +293 +294 +295 +296 +297 +298 +299 +300 +301 +302 +303 +304 +305 +306 +307 +308 +309 +310 +311 +312 +313 +314 +315 +316 +317 +318 +319 +320 +321 +322 +323 +324 +325 +326 +327 +328 +329 +330 +331 +332 +333 +334 +335 +336 +337 +338 +339 +340 +341 +342 +343 +344 +345 +346 +347 +348 +349 +350 +351 +352 +353 +354 +355 +356 +357 +358 +359 +360 +361 +362 +363 +364 +365 +366 +367 +368 +369 +370 +371 +372 +373 +374 +375 +376 +377 +378 +379 +380 +381 +382 +383 +384 +385 +386 +387 +388 +389 +390 +391 +392 +393 +394 +395 +396 +397 +398 +399 +400 +401 +402 +403 +404 +405 +406 +407 +408 +409 +410 +411 +412 +413 +414 +415 +416 +417 +418 +419 +420 +421 +422 +423 +424 +425 +426 +427 +428 +429 +430 +431 +432 +433 +434 +435 +436 +437 +438 +439 +440 +441 +442 +443 +444 +445 +446 +447 +448 +449 +450 +451 +452 +453 +454 +455 +456 +457 +458 +459 +460 +461 +462 +463 +464 +465 +466 +467 +468 +469 +470 +471 +472 +473 +474 +475 +476 +477 +478 +479 +480 +481 +482 +483 +484 +485 +486 +487 +488 +489 +490 +491 +492 +493 +494 +495 +496 +497 +498 +499 +500 +501 +502 +503 +504 +505 +506 +507 +508 +509 +510 +511 +512 +513 +514 +515 +516 +517 +518 +519 +520 +521 +522 +523 +524 +525 +526 +527 +528 +529 +530 +531 +532 +533 +534 +535 +536 +537 +538 +539 +540 +541 +542 +543 +544 +545 +546 +547 +548 +549 +550 +551 +552 +553 +554 +555 +556 +557 +558 +559 +560 +561 +562 +563 +564 +565 +566 +567 +568 +569 +570 +571 +572 +573 +574 +575 +576 +577 +578 +579 +580 +581 +582 +583 +584 +585 +586 +587 +588 +589 +590 +591 +592 +593 +594 +595 +596 +597 +598 +599 +600 +601 +602 +603 +604 +605 +606 +607 +608 +609 +610 +611 +612 +613 +614 +615 +616 +617 +618 +619 +620 +621 +622 +623 +624 +625 +626 +627 +628 +629 +630 +631 +632 +633 +634 +635 +636 +637 +638 +639 +640 +641 +642 +643 +644 +645 +646 +647 +648 +649 +650 +651 +652 +653 +654 +655 +656 +657 +658 +659 +660 +661 +662 +663 +664 +665 +666 +667 +668 +669 +670 +671 +672 +673 +674 +675 +676 +677 +678 +679 +680 +681 +682 +683 +684 +685 +686 +687 +688 +689 +690 +691 +692 +693 +694 +695 +696 +697 +698 +699 +700 +701 +702 +703 +704 +705 +706 +707 +708 +709 +710 +711 +712 +713 +714 +715 +716 +717 +718 +719 +720 +721 +722 +723 +724 +725 +726 +727 +728 +729 +730 +731 +732 +733 +734 +735 +736 +737 +738 +739 +740 +741 +742 +743 +744 +745 +746 +747 +748 +749 +750 +751 +752 +753 +754 +755 +756 +757 +758 +759 +760 +761 +762 +763 +764 +765 +766 +767 +768 +769 +770 +771 +772 +773 +774 +775 +776 +777 +778 +779 +780 +781 +782 +783 +784 +785 +786 +787 +788 +789 +790 +791 +792 +793 +794 +795 +796 +797 +798 +799 +800 +801 +802 +803 +804 +805 +806 +807 +808 +809 +810 +811 +812 +813 +814 +815 +816 +817 +818 +819 +820 +821 +822 +823 +824 +825 +826 +827 +828 +829 +830 +831 +832 +833 +834 +835 +836 +837 +838 +839 +840 +841 +842 +843 +844 +845 +846 +847 +848 +849 +850 +851 +852 +853 +854 +855 +856 +857 +858 +859 +860 +861 +862 +863 +864 +865 +866 +867 +868 +869 +870 +871 +872 +873 +874 +875 +876 +877 +878 +879 +880 +881 +882 +883 +884 +885 +886 +887 +888 +889 +890 +891 +892 +893 +894 +895 +896 +897 +898 +899 +900 +901 +902 +903 +904 +905 +906 +907 +908 +909 +910 +911 +912 +913 +914 +915 +916 +917 +918 +919 +920 +921 +922 +923 +924 +925 +926 +927 +928 +929 +930 +931 +932 +933 +934 +935 +936 +937 +938 +939 +940 +941 +942 +943 +944 +945 +946 +947 +948 +949 +950 +951 +952 +953 +954 +955 +956 +957 +958 +959 +960 +961 +962 +963 +964 +965 +966 +967 +968 +969 +970 +971 +972 +973 +974 +975 +976 +977 +978 +979 +980 +981 +982 +983 +984 +985 +986 +987 +988 +989 +990 +991 +992 +993 +994 +995 +996 +997 +998 +999 +1000 +1001 +1002 +1003 +1004 +1005 +1006 +1007 +1008 +1009 +1010 +1011 +1012 +1013 +1014 +1015 +1016 +1017 +1018 +1019 +1020 +1021 +1022 +1023 +1024 +1025 +1026 +1027 +1028 +1029 +1030 +1031 +1032 +1033 +1034 +1035 +1036 +1037 +1038 +1039 +1040 +1041 +1042 +1043 +1044 +1045 +1046 +1047 +1048 +1049 +1050 +1051 +1052 +1053 +1054 +1055 +1056 +1057 +1058 +1059 +1060 +1061 +1062 +1063 +1064 +1065 +1066 +1067 +1068 +1069 +1070 +1071 +1072 +1073 +1074 +1075 +1076 +1077 +1078 +1079 +1080 +1081 +1082 +1083 +1084 +1085 +1086 +1087 +1088 +1089 +1090 +1091 +1092 +1093 +1094 +1095 +1096 +1097 +1098 +1099 +1100 +1101 +1102 +1103 +1104 +1105 +1106 +1107 +1108 +1109 +1110 +1111 +1112 +1113 +1114 +1115 +1116 +1117 +1118 +1119 +1120 +1121 +1122 +1123 +1124 +1125 +1126 +1127 +1128 +1129 +1130 +1131 +1132 +1133 +1134 +1135 +1136 +1137 +1138 +1139 +1140 +1141 +1142 +1143 +1144 +1145 +1146 +1147 +1148 +1149 +1150 +1151 +1152 +1153 +1154 +1155 +1156 +1157 +1158 +1159 +1160 +1161 +1162 +1163 +1164 +1165 +1166 +1167 +1168 +1169 +1170 +1171 +1172 +1173 +1174 +1175 +1176 +1177 +1178 +1179 +1180 +1181 +1182 +1183 +1184 +1185 +1186 +1187 +1188 +1189 +1190 +1191 +1192 +1193 +1194 +1195 +1196 +1197 +1198 +1199 +1200 +1201 +1202 +1203 +1204 +1205 +1206 +1207 +1208 +1209 +1210 +1211 +1212 +1213 +1214 +1215 +1216 +1217 +1218 +1219 +1220 +1221 +1222 +1223 +1224 +1225 +1226 +1227 +1228 +1229 +1230 +1231 +1232 +1233 +1234 +1235 +1236 +1237 +1238 +1239 +1240 +1241 +1242 +1243 +1244 +1245 +1246 +1247 +1248 +1249 +1250 +1251 +1252 +1253 +1254 +1255 +1256 +1257 +1258 +1259 +1260 +1261 +1262 +1263 +1264 +1265 +1266 +1267 +1268 +1269 +1270 +1271 +1272 +1273 +1274 +1275 +1276 +1277 +1278 +1279 +1280 +1281 +1282 +1283 +1284 +1285 +1286 +1287 +1288 +1289 +1290 +1291 +1292 +1293 +1294 +1295 +1296 +1297 +1298 +1299 +1300 +1301 +1302 +1303 +1304 +1305 +1306 +1307 +1308 +1309 +1310 +1311 +1312 +1313 +1314 +1315 +1316 +1317 +1318 +1319 +1320 +1321 +1322 +1323 +1324 +1325 +1326 +1327 +1328 +1329 +1330 +1331 +1332 +1333 +1334 +1335 +1336 +1337 +1338 +1339 +1340 +1341 +1342 +1343 +1344 +1345 +1346 +1347 +1348 +1349 +1350 +1351 +1352 +1353 +1354 +1355 +1356 +1357 +1358 +1359 +1360 +1361 +1362 +1363 +1364 +1365 +1366 +1367 +1368 +1369 +1370 +1371 +1372 +1373 +1374 +1375 +1376 +1377 +1378 +1379 +1380 +1381 +1382 +1383 +1384 +1385 +1386 +1387 +1388 +1389 +1390 +1391 +1392 +1393 +1394 +1395 +1396 +1397 +1398 +1399 +1400 +1401 +1402 +1403 +1404 +1405 +1406 +1407 +1408 +1409 +1410 +1411 +1412 +1413 +1414 +1415 +1416 +1417 +1418 +1419 +1420 +1421 +1422 +1423 +1424 +1425 +1426 +1427 +1428 +1429 +1430 +1431 +1432 +1433 +1434 +1435 +1436 +1437 +1438 +1439 +1440 +1441 +1442 +1443 +1444 +1445 +1446 +1447 +1448 +1449 +1450 +1451 +1452 +1453 +1454 +1455 +1456 +1457 +1458 +1459 +1460 +1461 +1462 +1463 +1464 +1465 +1466 +1467 +1468 +1469 +1470 +1471 +1472 +1473 +1474 +1475 +1476 +1477 +1478 +1479 +1480 +1481 +1482 +1483 +1484 +1485 +1486 +1487 +1488 +1489 +1490 +1491 +1492 +1493 +1494 +1495 +1496 +1497 +1498 +1499 +1500 +1501 +1502 +1503 +1504 +1505 +1506 +1507 +1508 +1509 +1510 +1511 +1512 +1513 +1514 +1515 +1516 +1517 +1518 +1519 +1520 +1521 +1522 +1523 +1524 +1525 +1526 +1527 +1528 +1529 +1530 +1531 +1532 +1533 +1534 +1535 +1536 +1537 +1538 +1539 +1540 +1541 +1542 +1543 +1544 +1545 +1546 +1547 +1548 +1549 +1550 +1551 +1552 +1553 +1554 +1555 +1556 +1557 +1558 +1559 +1560 +1561 +1562 +1563 +1564 +1565 +1566 +1567 +1568 +1569 +1570 +1571 +1572 +1573 +1574 +1575 +1576 +1577 +1578 +1579 +1580 +1581 +1582 +1583 +1584 +1585 +1586 +1587 +1588 +1589 +1590 +1591 +1592 +1593 +1594 +1595 +1596 +1597 +1598 +1599 +1600 +1601 +1602 +1603 +1604 +1605 +1606 +1607 +1608 +1609 +1610 +1611 +1612 +1613 +1614 +1615 +1616 +1617 +1618 +1619 +1620 +1621 +1622 +1623 +1624 +1625 +1626 +1627 +1628 +1629 +1630 +1631 +1632 +1633 +1634 +1635 +1636 +1637 +1638 +1639 +1640 +1641 +
use crate::buf::{limit, Chain, Limit, UninitSlice};
+#[cfg(feature = "std")]
+use crate::buf::{writer, Writer};
+use crate::{panic_advance, panic_does_not_fit};
+
+use core::{mem, ptr, usize};
+
+use alloc::{boxed::Box, vec::Vec};
+
+/// A trait for values that provide sequential write access to bytes.
+///
+/// Write bytes to a buffer
+///
+/// A buffer stores bytes in memory such that write operations are infallible.
+/// The underlying storage may or may not be in contiguous memory. A `BufMut`
+/// value is a cursor into the buffer. Writing to `BufMut` advances the cursor
+/// position.
+///
+/// The simplest `BufMut` is a `Vec<u8>`.
+///
+/// ```
+/// use bytes::BufMut;
+///
+/// let mut buf = vec![];
+///
+/// buf.put(&b"hello world"[..]);
+///
+/// assert_eq!(buf, b"hello world");
+/// ```
+pub unsafe trait BufMut {
+ /// Returns the number of bytes that can be written from the current
+ /// position until the end of the buffer is reached.
+ ///
+ /// This value is greater than or equal to the length of the slice returned
+ /// by `chunk_mut()`.
+ ///
+ /// Writing to a `BufMut` may involve allocating more memory on the fly.
+ /// Implementations may fail before reaching the number of bytes indicated
+ /// by this method if they encounter an allocation failure.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BufMut;
+ ///
+ /// let mut dst = [0; 10];
+ /// let mut buf = &mut dst[..];
+ ///
+ /// let original_remaining = buf.remaining_mut();
+ /// buf.put(&b"hello"[..]);
+ ///
+ /// assert_eq!(original_remaining - 5, buf.remaining_mut());
+ /// ```
+ ///
+ /// # Implementer notes
+ ///
+ /// Implementations of `remaining_mut` should ensure that the return value
+ /// does not change unless a call is made to `advance_mut` or any other
+ /// function that is documented to change the `BufMut`'s current position.
+ ///
+ /// # Note
+ ///
+ /// `remaining_mut` may return value smaller than actual available space.
+ fn remaining_mut(&self) -> usize;
+
+ /// Advance the internal cursor of the BufMut
+ ///
+ /// The next call to `chunk_mut` will return a slice starting `cnt` bytes
+ /// further into the underlying buffer.
+ ///
+ /// # Safety
+ ///
+ /// The caller must ensure that the next `cnt` bytes of `chunk` are
+ /// initialized.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BufMut;
+ ///
+ /// let mut buf = Vec::with_capacity(16);
+ ///
+ /// // Write some data
+ /// buf.chunk_mut()[0..2].copy_from_slice(b"he");
+ /// unsafe { buf.advance_mut(2) };
+ ///
+ /// // write more bytes
+ /// buf.chunk_mut()[0..3].copy_from_slice(b"llo");
+ ///
+ /// unsafe { buf.advance_mut(3); }
+ ///
+ /// assert_eq!(5, buf.len());
+ /// assert_eq!(buf, b"hello");
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function **may** panic if `cnt > self.remaining_mut()`.
+ ///
+ /// # Implementer notes
+ ///
+ /// It is recommended for implementations of `advance_mut` to panic if
+ /// `cnt > self.remaining_mut()`. If the implementation does not panic,
+ /// the call must behave as if `cnt == self.remaining_mut()`.
+ ///
+ /// A call with `cnt == 0` should never panic and be a no-op.
+ unsafe fn advance_mut(&mut self, cnt: usize);
+
+ /// Returns true if there is space in `self` for more bytes.
+ ///
+ /// This is equivalent to `self.remaining_mut() != 0`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BufMut;
+ ///
+ /// let mut dst = [0; 5];
+ /// let mut buf = &mut dst[..];
+ ///
+ /// assert!(buf.has_remaining_mut());
+ ///
+ /// buf.put(&b"hello"[..]);
+ ///
+ /// assert!(!buf.has_remaining_mut());
+ /// ```
+ #[inline]
+ fn has_remaining_mut(&self) -> bool {
+ self.remaining_mut() > 0
+ }
+
+ /// Returns a mutable slice starting at the current BufMut position and of
+ /// length between 0 and `BufMut::remaining_mut()`. Note that this *can* be shorter than the
+ /// whole remainder of the buffer (this allows non-continuous implementation).
+ ///
+ /// This is a lower level function. Most operations are done with other
+ /// functions.
+ ///
+ /// The returned byte slice may represent uninitialized memory.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BufMut;
+ ///
+ /// let mut buf = Vec::with_capacity(16);
+ ///
+ /// unsafe {
+ /// // MaybeUninit::as_mut_ptr
+ /// buf.chunk_mut()[0..].as_mut_ptr().write(b'h');
+ /// buf.chunk_mut()[1..].as_mut_ptr().write(b'e');
+ ///
+ /// buf.advance_mut(2);
+ ///
+ /// buf.chunk_mut()[0..].as_mut_ptr().write(b'l');
+ /// buf.chunk_mut()[1..].as_mut_ptr().write(b'l');
+ /// buf.chunk_mut()[2..].as_mut_ptr().write(b'o');
+ ///
+ /// buf.advance_mut(3);
+ /// }
+ ///
+ /// assert_eq!(5, buf.len());
+ /// assert_eq!(buf, b"hello");
+ /// ```
+ ///
+ /// # Implementer notes
+ ///
+ /// This function should never panic. `chunk_mut` should return an empty
+ /// slice **if and only if** `remaining_mut()` returns 0. In other words,
+ /// `chunk_mut()` returning an empty slice implies that `remaining_mut()` will
+ /// return 0 and `remaining_mut()` returning 0 implies that `chunk_mut()` will
+ /// return an empty slice.
+ ///
+ /// This function may trigger an out-of-memory abort if it tries to allocate
+ /// memory and fails to do so.
+ // The `chunk_mut` method was previously called `bytes_mut`. This alias makes the
+ // rename more easily discoverable.
+ #[cfg_attr(docsrs, doc(alias = "bytes_mut"))]
+ fn chunk_mut(&mut self) -> &mut UninitSlice;
+
+ /// Transfer bytes into `self` from `src` and advance the cursor by the
+ /// number of bytes written.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BufMut;
+ ///
+ /// let mut buf = vec![];
+ ///
+ /// buf.put_u8(b'h');
+ /// buf.put(&b"ello"[..]);
+ /// buf.put(&b" world"[..]);
+ ///
+ /// assert_eq!(buf, b"hello world");
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// Panics if `self` does not have enough capacity to contain `src`.
+ #[inline]
+ fn put<T: super::Buf>(&mut self, mut src: T)
+ where
+ Self: Sized,
+ {
+ if self.remaining_mut() < src.remaining() {
+ panic_advance(src.remaining(), self.remaining_mut());
+ }
+
+ while src.has_remaining() {
+ let s = src.chunk();
+ let d = self.chunk_mut();
+ let cnt = usize::min(s.len(), d.len());
+
+ d[..cnt].copy_from_slice(&s[..cnt]);
+
+ // SAFETY: We just initialized `cnt` bytes in `self`.
+ unsafe { self.advance_mut(cnt) };
+ src.advance(cnt);
+ }
+ }
+
+ /// Transfer bytes into `self` from `src` and advance the cursor by the
+ /// number of bytes written.
+ ///
+ /// `self` must have enough remaining capacity to contain all of `src`.
+ ///
+ /// ```
+ /// use bytes::BufMut;
+ ///
+ /// let mut dst = [0; 6];
+ ///
+ /// {
+ /// let mut buf = &mut dst[..];
+ /// buf.put_slice(b"hello");
+ ///
+ /// assert_eq!(1, buf.remaining_mut());
+ /// }
+ ///
+ /// assert_eq!(b"hello\0", &dst);
+ /// ```
+ #[inline]
+ fn put_slice(&mut self, mut src: &[u8]) {
+ if self.remaining_mut() < src.len() {
+ panic_advance(src.len(), self.remaining_mut());
+ }
+
+ while !src.is_empty() {
+ let dst = self.chunk_mut();
+ let cnt = usize::min(src.len(), dst.len());
+
+ dst[..cnt].copy_from_slice(&src[..cnt]);
+ src = &src[cnt..];
+
+ // SAFETY: We just initialized `cnt` bytes in `self`.
+ unsafe { self.advance_mut(cnt) };
+ }
+ }
+
+ /// Put `cnt` bytes `val` into `self`.
+ ///
+ /// Logically equivalent to calling `self.put_u8(val)` `cnt` times, but may work faster.
+ ///
+ /// `self` must have at least `cnt` remaining capacity.
+ ///
+ /// ```
+ /// use bytes::BufMut;
+ ///
+ /// let mut dst = [0; 6];
+ ///
+ /// {
+ /// let mut buf = &mut dst[..];
+ /// buf.put_bytes(b'a', 4);
+ ///
+ /// assert_eq!(2, buf.remaining_mut());
+ /// }
+ ///
+ /// assert_eq!(b"aaaa\0\0", &dst);
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining capacity in
+ /// `self`.
+ #[inline]
+ fn put_bytes(&mut self, val: u8, mut cnt: usize) {
+ if self.remaining_mut() < cnt {
+ panic_advance(cnt, self.remaining_mut());
+ }
+
+ while cnt > 0 {
+ let dst = self.chunk_mut();
+ let dst_len = usize::min(dst.len(), cnt);
+ // SAFETY: The pointer is valid for `dst_len <= dst.len()` bytes.
+ unsafe { core::ptr::write_bytes(dst.as_mut_ptr(), val, dst_len) };
+ // SAFETY: We just initialized `dst_len` bytes in `self`.
+ unsafe { self.advance_mut(dst_len) };
+ cnt -= dst_len;
+ }
+ }
+
+ /// Writes an unsigned 8 bit integer to `self`.
+ ///
+ /// The current position is advanced by 1.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BufMut;
+ ///
+ /// let mut buf = vec![];
+ /// buf.put_u8(0x01);
+ /// assert_eq!(buf, b"\x01");
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining capacity in
+ /// `self`.
+ #[inline]
+ fn put_u8(&mut self, n: u8) {
+ let src = [n];
+ self.put_slice(&src);
+ }
+
+ /// Writes a signed 8 bit integer to `self`.
+ ///
+ /// The current position is advanced by 1.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BufMut;
+ ///
+ /// let mut buf = vec![];
+ /// buf.put_i8(0x01);
+ /// assert_eq!(buf, b"\x01");
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining capacity in
+ /// `self`.
+ #[inline]
+ fn put_i8(&mut self, n: i8) {
+ let src = [n as u8];
+ self.put_slice(&src)
+ }
+
+ /// Writes an unsigned 16 bit integer to `self` in big-endian byte order.
+ ///
+ /// The current position is advanced by 2.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BufMut;
+ ///
+ /// let mut buf = vec![];
+ /// buf.put_u16(0x0809);
+ /// assert_eq!(buf, b"\x08\x09");
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining capacity in
+ /// `self`.
+ #[inline]
+ fn put_u16(&mut self, n: u16) {
+ self.put_slice(&n.to_be_bytes())
+ }
+
+ /// Writes an unsigned 16 bit integer to `self` in little-endian byte order.
+ ///
+ /// The current position is advanced by 2.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BufMut;
+ ///
+ /// let mut buf = vec![];
+ /// buf.put_u16_le(0x0809);
+ /// assert_eq!(buf, b"\x09\x08");
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining capacity in
+ /// `self`.
+ #[inline]
+ fn put_u16_le(&mut self, n: u16) {
+ self.put_slice(&n.to_le_bytes())
+ }
+
+ /// Writes an unsigned 16 bit integer to `self` in native-endian byte order.
+ ///
+ /// The current position is advanced by 2.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BufMut;
+ ///
+ /// let mut buf = vec![];
+ /// buf.put_u16_ne(0x0809);
+ /// if cfg!(target_endian = "big") {
+ /// assert_eq!(buf, b"\x08\x09");
+ /// } else {
+ /// assert_eq!(buf, b"\x09\x08");
+ /// }
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining capacity in
+ /// `self`.
+ #[inline]
+ fn put_u16_ne(&mut self, n: u16) {
+ self.put_slice(&n.to_ne_bytes())
+ }
+
+ /// Writes a signed 16 bit integer to `self` in big-endian byte order.
+ ///
+ /// The current position is advanced by 2.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BufMut;
+ ///
+ /// let mut buf = vec![];
+ /// buf.put_i16(0x0809);
+ /// assert_eq!(buf, b"\x08\x09");
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining capacity in
+ /// `self`.
+ #[inline]
+ fn put_i16(&mut self, n: i16) {
+ self.put_slice(&n.to_be_bytes())
+ }
+
+ /// Writes a signed 16 bit integer to `self` in little-endian byte order.
+ ///
+ /// The current position is advanced by 2.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BufMut;
+ ///
+ /// let mut buf = vec![];
+ /// buf.put_i16_le(0x0809);
+ /// assert_eq!(buf, b"\x09\x08");
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining capacity in
+ /// `self`.
+ #[inline]
+ fn put_i16_le(&mut self, n: i16) {
+ self.put_slice(&n.to_le_bytes())
+ }
+
+ /// Writes a signed 16 bit integer to `self` in native-endian byte order.
+ ///
+ /// The current position is advanced by 2.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BufMut;
+ ///
+ /// let mut buf = vec![];
+ /// buf.put_i16_ne(0x0809);
+ /// if cfg!(target_endian = "big") {
+ /// assert_eq!(buf, b"\x08\x09");
+ /// } else {
+ /// assert_eq!(buf, b"\x09\x08");
+ /// }
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining capacity in
+ /// `self`.
+ #[inline]
+ fn put_i16_ne(&mut self, n: i16) {
+ self.put_slice(&n.to_ne_bytes())
+ }
+
+ /// Writes an unsigned 32 bit integer to `self` in big-endian byte order.
+ ///
+ /// The current position is advanced by 4.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BufMut;
+ ///
+ /// let mut buf = vec![];
+ /// buf.put_u32(0x0809A0A1);
+ /// assert_eq!(buf, b"\x08\x09\xA0\xA1");
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining capacity in
+ /// `self`.
+ #[inline]
+ fn put_u32(&mut self, n: u32) {
+ self.put_slice(&n.to_be_bytes())
+ }
+
+ /// Writes an unsigned 32 bit integer to `self` in little-endian byte order.
+ ///
+ /// The current position is advanced by 4.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BufMut;
+ ///
+ /// let mut buf = vec![];
+ /// buf.put_u32_le(0x0809A0A1);
+ /// assert_eq!(buf, b"\xA1\xA0\x09\x08");
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining capacity in
+ /// `self`.
+ #[inline]
+ fn put_u32_le(&mut self, n: u32) {
+ self.put_slice(&n.to_le_bytes())
+ }
+
+ /// Writes an unsigned 32 bit integer to `self` in native-endian byte order.
+ ///
+ /// The current position is advanced by 4.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BufMut;
+ ///
+ /// let mut buf = vec![];
+ /// buf.put_u32_ne(0x0809A0A1);
+ /// if cfg!(target_endian = "big") {
+ /// assert_eq!(buf, b"\x08\x09\xA0\xA1");
+ /// } else {
+ /// assert_eq!(buf, b"\xA1\xA0\x09\x08");
+ /// }
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining capacity in
+ /// `self`.
+ #[inline]
+ fn put_u32_ne(&mut self, n: u32) {
+ self.put_slice(&n.to_ne_bytes())
+ }
+
+ /// Writes a signed 32 bit integer to `self` in big-endian byte order.
+ ///
+ /// The current position is advanced by 4.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BufMut;
+ ///
+ /// let mut buf = vec![];
+ /// buf.put_i32(0x0809A0A1);
+ /// assert_eq!(buf, b"\x08\x09\xA0\xA1");
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining capacity in
+ /// `self`.
+ #[inline]
+ fn put_i32(&mut self, n: i32) {
+ self.put_slice(&n.to_be_bytes())
+ }
+
+ /// Writes a signed 32 bit integer to `self` in little-endian byte order.
+ ///
+ /// The current position is advanced by 4.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BufMut;
+ ///
+ /// let mut buf = vec![];
+ /// buf.put_i32_le(0x0809A0A1);
+ /// assert_eq!(buf, b"\xA1\xA0\x09\x08");
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining capacity in
+ /// `self`.
+ #[inline]
+ fn put_i32_le(&mut self, n: i32) {
+ self.put_slice(&n.to_le_bytes())
+ }
+
+ /// Writes a signed 32 bit integer to `self` in native-endian byte order.
+ ///
+ /// The current position is advanced by 4.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BufMut;
+ ///
+ /// let mut buf = vec![];
+ /// buf.put_i32_ne(0x0809A0A1);
+ /// if cfg!(target_endian = "big") {
+ /// assert_eq!(buf, b"\x08\x09\xA0\xA1");
+ /// } else {
+ /// assert_eq!(buf, b"\xA1\xA0\x09\x08");
+ /// }
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining capacity in
+ /// `self`.
+ #[inline]
+ fn put_i32_ne(&mut self, n: i32) {
+ self.put_slice(&n.to_ne_bytes())
+ }
+
+ /// Writes an unsigned 64 bit integer to `self` in the big-endian byte order.
+ ///
+ /// The current position is advanced by 8.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BufMut;
+ ///
+ /// let mut buf = vec![];
+ /// buf.put_u64(0x0102030405060708);
+ /// assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08");
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining capacity in
+ /// `self`.
+ #[inline]
+ fn put_u64(&mut self, n: u64) {
+ self.put_slice(&n.to_be_bytes())
+ }
+
+ /// Writes an unsigned 64 bit integer to `self` in little-endian byte order.
+ ///
+ /// The current position is advanced by 8.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BufMut;
+ ///
+ /// let mut buf = vec![];
+ /// buf.put_u64_le(0x0102030405060708);
+ /// assert_eq!(buf, b"\x08\x07\x06\x05\x04\x03\x02\x01");
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining capacity in
+ /// `self`.
+ #[inline]
+ fn put_u64_le(&mut self, n: u64) {
+ self.put_slice(&n.to_le_bytes())
+ }
+
+ /// Writes an unsigned 64 bit integer to `self` in native-endian byte order.
+ ///
+ /// The current position is advanced by 8.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BufMut;
+ ///
+ /// let mut buf = vec![];
+ /// buf.put_u64_ne(0x0102030405060708);
+ /// if cfg!(target_endian = "big") {
+ /// assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08");
+ /// } else {
+ /// assert_eq!(buf, b"\x08\x07\x06\x05\x04\x03\x02\x01");
+ /// }
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining capacity in
+ /// `self`.
+ #[inline]
+ fn put_u64_ne(&mut self, n: u64) {
+ self.put_slice(&n.to_ne_bytes())
+ }
+
+ /// Writes a signed 64 bit integer to `self` in the big-endian byte order.
+ ///
+ /// The current position is advanced by 8.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BufMut;
+ ///
+ /// let mut buf = vec![];
+ /// buf.put_i64(0x0102030405060708);
+ /// assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08");
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining capacity in
+ /// `self`.
+ #[inline]
+ fn put_i64(&mut self, n: i64) {
+ self.put_slice(&n.to_be_bytes())
+ }
+
+ /// Writes a signed 64 bit integer to `self` in little-endian byte order.
+ ///
+ /// The current position is advanced by 8.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BufMut;
+ ///
+ /// let mut buf = vec![];
+ /// buf.put_i64_le(0x0102030405060708);
+ /// assert_eq!(buf, b"\x08\x07\x06\x05\x04\x03\x02\x01");
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining capacity in
+ /// `self`.
+ #[inline]
+ fn put_i64_le(&mut self, n: i64) {
+ self.put_slice(&n.to_le_bytes())
+ }
+
+ /// Writes a signed 64 bit integer to `self` in native-endian byte order.
+ ///
+ /// The current position is advanced by 8.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BufMut;
+ ///
+ /// let mut buf = vec![];
+ /// buf.put_i64_ne(0x0102030405060708);
+ /// if cfg!(target_endian = "big") {
+ /// assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08");
+ /// } else {
+ /// assert_eq!(buf, b"\x08\x07\x06\x05\x04\x03\x02\x01");
+ /// }
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining capacity in
+ /// `self`.
+ #[inline]
+ fn put_i64_ne(&mut self, n: i64) {
+ self.put_slice(&n.to_ne_bytes())
+ }
+
+ /// Writes an unsigned 128 bit integer to `self` in the big-endian byte order.
+ ///
+ /// The current position is advanced by 16.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BufMut;
+ ///
+ /// let mut buf = vec![];
+ /// buf.put_u128(0x01020304050607080910111213141516);
+ /// assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16");
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining capacity in
+ /// `self`.
+ #[inline]
+ fn put_u128(&mut self, n: u128) {
+ self.put_slice(&n.to_be_bytes())
+ }
+
+ /// Writes an unsigned 128 bit integer to `self` in little-endian byte order.
+ ///
+ /// The current position is advanced by 16.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BufMut;
+ ///
+ /// let mut buf = vec![];
+ /// buf.put_u128_le(0x01020304050607080910111213141516);
+ /// assert_eq!(buf, b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01");
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining capacity in
+ /// `self`.
+ #[inline]
+ fn put_u128_le(&mut self, n: u128) {
+ self.put_slice(&n.to_le_bytes())
+ }
+
+ /// Writes an unsigned 128 bit integer to `self` in native-endian byte order.
+ ///
+ /// The current position is advanced by 16.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BufMut;
+ ///
+ /// let mut buf = vec![];
+ /// buf.put_u128_ne(0x01020304050607080910111213141516);
+ /// if cfg!(target_endian = "big") {
+ /// assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16");
+ /// } else {
+ /// assert_eq!(buf, b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01");
+ /// }
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining capacity in
+ /// `self`.
+ #[inline]
+ fn put_u128_ne(&mut self, n: u128) {
+ self.put_slice(&n.to_ne_bytes())
+ }
+
+ /// Writes a signed 128 bit integer to `self` in the big-endian byte order.
+ ///
+ /// The current position is advanced by 16.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BufMut;
+ ///
+ /// let mut buf = vec![];
+ /// buf.put_i128(0x01020304050607080910111213141516);
+ /// assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16");
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining capacity in
+ /// `self`.
+ #[inline]
+ fn put_i128(&mut self, n: i128) {
+ self.put_slice(&n.to_be_bytes())
+ }
+
+ /// Writes a signed 128 bit integer to `self` in little-endian byte order.
+ ///
+ /// The current position is advanced by 16.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BufMut;
+ ///
+ /// let mut buf = vec![];
+ /// buf.put_i128_le(0x01020304050607080910111213141516);
+ /// assert_eq!(buf, b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01");
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining capacity in
+ /// `self`.
+ #[inline]
+ fn put_i128_le(&mut self, n: i128) {
+ self.put_slice(&n.to_le_bytes())
+ }
+
+ /// Writes a signed 128 bit integer to `self` in native-endian byte order.
+ ///
+ /// The current position is advanced by 16.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BufMut;
+ ///
+ /// let mut buf = vec![];
+ /// buf.put_i128_ne(0x01020304050607080910111213141516);
+ /// if cfg!(target_endian = "big") {
+ /// assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16");
+ /// } else {
+ /// assert_eq!(buf, b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01");
+ /// }
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining capacity in
+ /// `self`.
+ #[inline]
+ fn put_i128_ne(&mut self, n: i128) {
+ self.put_slice(&n.to_ne_bytes())
+ }
+
+ /// Writes an unsigned n-byte integer to `self` in big-endian byte order.
+ ///
+ /// The current position is advanced by `nbytes`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BufMut;
+ ///
+ /// let mut buf = vec![];
+ /// buf.put_uint(0x010203, 3);
+ /// assert_eq!(buf, b"\x01\x02\x03");
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining capacity in
+ /// `self` or if `nbytes` is greater than 8.
+ #[inline]
+ fn put_uint(&mut self, n: u64, nbytes: usize) {
+ let start = match mem::size_of_val(&n).checked_sub(nbytes) {
+ Some(start) => start,
+ None => panic_does_not_fit(nbytes, mem::size_of_val(&n)),
+ };
+
+ self.put_slice(&n.to_be_bytes()[start..]);
+ }
+
+ /// Writes an unsigned n-byte integer to `self` in the little-endian byte order.
+ ///
+ /// The current position is advanced by `nbytes`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BufMut;
+ ///
+ /// let mut buf = vec![];
+ /// buf.put_uint_le(0x010203, 3);
+ /// assert_eq!(buf, b"\x03\x02\x01");
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining capacity in
+ /// `self` or if `nbytes` is greater than 8.
+ #[inline]
+ fn put_uint_le(&mut self, n: u64, nbytes: usize) {
+ let slice = n.to_le_bytes();
+ let slice = match slice.get(..nbytes) {
+ Some(slice) => slice,
+ None => panic_does_not_fit(nbytes, slice.len()),
+ };
+
+ self.put_slice(slice);
+ }
+
+ /// Writes an unsigned n-byte integer to `self` in the native-endian byte order.
+ ///
+ /// The current position is advanced by `nbytes`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BufMut;
+ ///
+ /// let mut buf = vec![];
+ /// buf.put_uint_ne(0x010203, 3);
+ /// if cfg!(target_endian = "big") {
+ /// assert_eq!(buf, b"\x01\x02\x03");
+ /// } else {
+ /// assert_eq!(buf, b"\x03\x02\x01");
+ /// }
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining capacity in
+ /// `self` or if `nbytes` is greater than 8.
+ #[inline]
+ fn put_uint_ne(&mut self, n: u64, nbytes: usize) {
+ if cfg!(target_endian = "big") {
+ self.put_uint(n, nbytes)
+ } else {
+ self.put_uint_le(n, nbytes)
+ }
+ }
+
+ /// Writes low `nbytes` of a signed integer to `self` in big-endian byte order.
+ ///
+ /// The current position is advanced by `nbytes`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BufMut;
+ ///
+ /// let mut buf = vec![];
+ /// buf.put_int(0x0504010203, 3);
+ /// assert_eq!(buf, b"\x01\x02\x03");
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining capacity in
+ /// `self` or if `nbytes` is greater than 8.
+ #[inline]
+ fn put_int(&mut self, n: i64, nbytes: usize) {
+ let start = match mem::size_of_val(&n).checked_sub(nbytes) {
+ Some(start) => start,
+ None => panic_does_not_fit(nbytes, mem::size_of_val(&n)),
+ };
+
+ self.put_slice(&n.to_be_bytes()[start..]);
+ }
+
+ /// Writes low `nbytes` of a signed integer to `self` in little-endian byte order.
+ ///
+ /// The current position is advanced by `nbytes`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BufMut;
+ ///
+ /// let mut buf = vec![];
+ /// buf.put_int_le(0x0504010203, 3);
+ /// assert_eq!(buf, b"\x03\x02\x01");
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining capacity in
+ /// `self` or if `nbytes` is greater than 8.
+ #[inline]
+ fn put_int_le(&mut self, n: i64, nbytes: usize) {
+ let slice = n.to_le_bytes();
+ let slice = match slice.get(..nbytes) {
+ Some(slice) => slice,
+ None => panic_does_not_fit(nbytes, slice.len()),
+ };
+
+ self.put_slice(slice);
+ }
+
+ /// Writes low `nbytes` of a signed integer to `self` in native-endian byte order.
+ ///
+ /// The current position is advanced by `nbytes`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BufMut;
+ ///
+ /// let mut buf = vec![];
+ /// buf.put_int_ne(0x010203, 3);
+ /// if cfg!(target_endian = "big") {
+ /// assert_eq!(buf, b"\x01\x02\x03");
+ /// } else {
+ /// assert_eq!(buf, b"\x03\x02\x01");
+ /// }
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining capacity in
+ /// `self` or if `nbytes` is greater than 8.
+ #[inline]
+ fn put_int_ne(&mut self, n: i64, nbytes: usize) {
+ if cfg!(target_endian = "big") {
+ self.put_int(n, nbytes)
+ } else {
+ self.put_int_le(n, nbytes)
+ }
+ }
+
+ /// Writes an IEEE754 single-precision (4 bytes) floating point number to
+ /// `self` in big-endian byte order.
+ ///
+ /// The current position is advanced by 4.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BufMut;
+ ///
+ /// let mut buf = vec![];
+ /// buf.put_f32(1.2f32);
+ /// assert_eq!(buf, b"\x3F\x99\x99\x9A");
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining capacity in
+ /// `self`.
+ #[inline]
+ fn put_f32(&mut self, n: f32) {
+ self.put_u32(n.to_bits());
+ }
+
+ /// Writes an IEEE754 single-precision (4 bytes) floating point number to
+ /// `self` in little-endian byte order.
+ ///
+ /// The current position is advanced by 4.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BufMut;
+ ///
+ /// let mut buf = vec![];
+ /// buf.put_f32_le(1.2f32);
+ /// assert_eq!(buf, b"\x9A\x99\x99\x3F");
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining capacity in
+ /// `self`.
+ #[inline]
+ fn put_f32_le(&mut self, n: f32) {
+ self.put_u32_le(n.to_bits());
+ }
+
+ /// Writes an IEEE754 single-precision (4 bytes) floating point number to
+ /// `self` in native-endian byte order.
+ ///
+ /// The current position is advanced by 4.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BufMut;
+ ///
+ /// let mut buf = vec![];
+ /// buf.put_f32_ne(1.2f32);
+ /// if cfg!(target_endian = "big") {
+ /// assert_eq!(buf, b"\x3F\x99\x99\x9A");
+ /// } else {
+ /// assert_eq!(buf, b"\x9A\x99\x99\x3F");
+ /// }
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining capacity in
+ /// `self`.
+ #[inline]
+ fn put_f32_ne(&mut self, n: f32) {
+ self.put_u32_ne(n.to_bits());
+ }
+
+ /// Writes an IEEE754 double-precision (8 bytes) floating point number to
+ /// `self` in big-endian byte order.
+ ///
+ /// The current position is advanced by 8.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BufMut;
+ ///
+ /// let mut buf = vec![];
+ /// buf.put_f64(1.2f64);
+ /// assert_eq!(buf, b"\x3F\xF3\x33\x33\x33\x33\x33\x33");
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining capacity in
+ /// `self`.
+ #[inline]
+ fn put_f64(&mut self, n: f64) {
+ self.put_u64(n.to_bits());
+ }
+
+ /// Writes an IEEE754 double-precision (8 bytes) floating point number to
+ /// `self` in little-endian byte order.
+ ///
+ /// The current position is advanced by 8.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BufMut;
+ ///
+ /// let mut buf = vec![];
+ /// buf.put_f64_le(1.2f64);
+ /// assert_eq!(buf, b"\x33\x33\x33\x33\x33\x33\xF3\x3F");
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining capacity in
+ /// `self`.
+ #[inline]
+ fn put_f64_le(&mut self, n: f64) {
+ self.put_u64_le(n.to_bits());
+ }
+
+ /// Writes an IEEE754 double-precision (8 bytes) floating point number to
+ /// `self` in native-endian byte order.
+ ///
+ /// The current position is advanced by 8.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BufMut;
+ ///
+ /// let mut buf = vec![];
+ /// buf.put_f64_ne(1.2f64);
+ /// if cfg!(target_endian = "big") {
+ /// assert_eq!(buf, b"\x3F\xF3\x33\x33\x33\x33\x33\x33");
+ /// } else {
+ /// assert_eq!(buf, b"\x33\x33\x33\x33\x33\x33\xF3\x3F");
+ /// }
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining capacity in
+ /// `self`.
+ #[inline]
+ fn put_f64_ne(&mut self, n: f64) {
+ self.put_u64_ne(n.to_bits());
+ }
+
+ /// Creates an adaptor which can write at most `limit` bytes to `self`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BufMut;
+ ///
+ /// let arr = &mut [0u8; 128][..];
+ /// assert_eq!(arr.remaining_mut(), 128);
+ ///
+ /// let dst = arr.limit(10);
+ /// assert_eq!(dst.remaining_mut(), 10);
+ /// ```
+ #[inline]
+ fn limit(self, limit: usize) -> Limit<Self>
+ where
+ Self: Sized,
+ {
+ limit::new(self, limit)
+ }
+
+ /// Creates an adaptor which implements the `Write` trait for `self`.
+ ///
+ /// This function returns a new value which implements `Write` by adapting
+ /// the `Write` trait functions to the `BufMut` trait functions. Given that
+ /// `BufMut` operations are infallible, none of the `Write` functions will
+ /// return with `Err`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BufMut;
+ /// use std::io::Write;
+ ///
+ /// let mut buf = vec![].writer();
+ ///
+ /// let num = buf.write(&b"hello world"[..]).unwrap();
+ /// assert_eq!(11, num);
+ ///
+ /// let buf = buf.into_inner();
+ ///
+ /// assert_eq!(*buf, b"hello world"[..]);
+ /// ```
+ #[cfg(feature = "std")]
+ #[cfg_attr(docsrs, doc(cfg(feature = "std")))]
+ #[inline]
+ fn writer(self) -> Writer<Self>
+ where
+ Self: Sized,
+ {
+ writer::new(self)
+ }
+
+ /// Creates an adapter which will chain this buffer with another.
+ ///
+ /// The returned `BufMut` instance will first write to all bytes from
+ /// `self`. Afterwards, it will write to `next`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BufMut;
+ ///
+ /// let mut a = [0u8; 5];
+ /// let mut b = [0u8; 6];
+ ///
+ /// let mut chain = (&mut a[..]).chain_mut(&mut b[..]);
+ ///
+ /// chain.put_slice(b"hello world");
+ ///
+ /// assert_eq!(&a[..], b"hello");
+ /// assert_eq!(&b[..], b" world");
+ /// ```
+ #[inline]
+ fn chain_mut<U: BufMut>(self, next: U) -> Chain<Self, U>
+ where
+ Self: Sized,
+ {
+ Chain::new(self, next)
+ }
+}
+
+macro_rules! deref_forward_bufmut {
+ () => {
+ #[inline]
+ fn remaining_mut(&self) -> usize {
+ (**self).remaining_mut()
+ }
+
+ #[inline]
+ fn chunk_mut(&mut self) -> &mut UninitSlice {
+ (**self).chunk_mut()
+ }
+
+ #[inline]
+ unsafe fn advance_mut(&mut self, cnt: usize) {
+ (**self).advance_mut(cnt)
+ }
+
+ #[inline]
+ fn put_slice(&mut self, src: &[u8]) {
+ (**self).put_slice(src)
+ }
+
+ #[inline]
+ fn put_u8(&mut self, n: u8) {
+ (**self).put_u8(n)
+ }
+
+ #[inline]
+ fn put_i8(&mut self, n: i8) {
+ (**self).put_i8(n)
+ }
+
+ #[inline]
+ fn put_u16(&mut self, n: u16) {
+ (**self).put_u16(n)
+ }
+
+ #[inline]
+ fn put_u16_le(&mut self, n: u16) {
+ (**self).put_u16_le(n)
+ }
+
+ #[inline]
+ fn put_u16_ne(&mut self, n: u16) {
+ (**self).put_u16_ne(n)
+ }
+
+ #[inline]
+ fn put_i16(&mut self, n: i16) {
+ (**self).put_i16(n)
+ }
+
+ #[inline]
+ fn put_i16_le(&mut self, n: i16) {
+ (**self).put_i16_le(n)
+ }
+
+ #[inline]
+ fn put_i16_ne(&mut self, n: i16) {
+ (**self).put_i16_ne(n)
+ }
+
+ #[inline]
+ fn put_u32(&mut self, n: u32) {
+ (**self).put_u32(n)
+ }
+
+ #[inline]
+ fn put_u32_le(&mut self, n: u32) {
+ (**self).put_u32_le(n)
+ }
+
+ #[inline]
+ fn put_u32_ne(&mut self, n: u32) {
+ (**self).put_u32_ne(n)
+ }
+
+ #[inline]
+ fn put_i32(&mut self, n: i32) {
+ (**self).put_i32(n)
+ }
+
+ #[inline]
+ fn put_i32_le(&mut self, n: i32) {
+ (**self).put_i32_le(n)
+ }
+
+ #[inline]
+ fn put_i32_ne(&mut self, n: i32) {
+ (**self).put_i32_ne(n)
+ }
+
+ #[inline]
+ fn put_u64(&mut self, n: u64) {
+ (**self).put_u64(n)
+ }
+
+ #[inline]
+ fn put_u64_le(&mut self, n: u64) {
+ (**self).put_u64_le(n)
+ }
+
+ #[inline]
+ fn put_u64_ne(&mut self, n: u64) {
+ (**self).put_u64_ne(n)
+ }
+
+ #[inline]
+ fn put_i64(&mut self, n: i64) {
+ (**self).put_i64(n)
+ }
+
+ #[inline]
+ fn put_i64_le(&mut self, n: i64) {
+ (**self).put_i64_le(n)
+ }
+
+ #[inline]
+ fn put_i64_ne(&mut self, n: i64) {
+ (**self).put_i64_ne(n)
+ }
+ };
+}
+
+unsafe impl<T: BufMut + ?Sized> BufMut for &mut T {
+ deref_forward_bufmut!();
+}
+
+unsafe impl<T: BufMut + ?Sized> BufMut for Box<T> {
+ deref_forward_bufmut!();
+}
+
+unsafe impl BufMut for &mut [u8] {
+ #[inline]
+ fn remaining_mut(&self) -> usize {
+ self.len()
+ }
+
+ #[inline]
+ fn chunk_mut(&mut self) -> &mut UninitSlice {
+ UninitSlice::new(self)
+ }
+
+ #[inline]
+ unsafe fn advance_mut(&mut self, cnt: usize) {
+ if self.len() < cnt {
+ panic_advance(cnt, self.len());
+ }
+
+ // Lifetime dance taken from `impl Write for &mut [u8]`.
+ let (_, b) = core::mem::replace(self, &mut []).split_at_mut(cnt);
+ *self = b;
+ }
+
+ #[inline]
+ fn put_slice(&mut self, src: &[u8]) {
+ if self.len() < src.len() {
+ panic_advance(src.len(), self.len());
+ }
+
+ self[..src.len()].copy_from_slice(src);
+ // SAFETY: We just initialized `src.len()` bytes.
+ unsafe { self.advance_mut(src.len()) };
+ }
+
+ #[inline]
+ fn put_bytes(&mut self, val: u8, cnt: usize) {
+ if self.len() < cnt {
+ panic_advance(cnt, self.len());
+ }
+
+ // SAFETY: We just checked that the pointer is valid for `cnt` bytes.
+ unsafe {
+ ptr::write_bytes(self.as_mut_ptr(), val, cnt);
+ self.advance_mut(cnt);
+ }
+ }
+}
+
+unsafe impl BufMut for &mut [core::mem::MaybeUninit<u8>] {
+ #[inline]
+ fn remaining_mut(&self) -> usize {
+ self.len()
+ }
+
+ #[inline]
+ fn chunk_mut(&mut self) -> &mut UninitSlice {
+ UninitSlice::uninit(self)
+ }
+
+ #[inline]
+ unsafe fn advance_mut(&mut self, cnt: usize) {
+ if self.len() < cnt {
+ panic_advance(cnt, self.len());
+ }
+
+ // Lifetime dance taken from `impl Write for &mut [u8]`.
+ let (_, b) = core::mem::replace(self, &mut []).split_at_mut(cnt);
+ *self = b;
+ }
+
+ #[inline]
+ fn put_slice(&mut self, src: &[u8]) {
+ if self.len() < src.len() {
+ panic_advance(src.len(), self.len());
+ }
+
+ // SAFETY: We just checked that the pointer is valid for `src.len()` bytes.
+ unsafe {
+ ptr::copy_nonoverlapping(src.as_ptr(), self.as_mut_ptr().cast(), src.len());
+ self.advance_mut(src.len());
+ }
+ }
+
+ #[inline]
+ fn put_bytes(&mut self, val: u8, cnt: usize) {
+ if self.len() < cnt {
+ panic_advance(cnt, self.len());
+ }
+
+ // SAFETY: We just checked that the pointer is valid for `cnt` bytes.
+ unsafe {
+ ptr::write_bytes(self.as_mut_ptr() as *mut u8, val, cnt);
+ self.advance_mut(cnt);
+ }
+ }
+}
+
+unsafe impl BufMut for Vec<u8> {
+ #[inline]
+ fn remaining_mut(&self) -> usize {
+ // A vector can never have more than isize::MAX bytes
+ core::isize::MAX as usize - self.len()
+ }
+
+ #[inline]
+ unsafe fn advance_mut(&mut self, cnt: usize) {
+ let len = self.len();
+ let remaining = self.capacity() - len;
+
+ if remaining < cnt {
+ panic_advance(cnt, remaining);
+ }
+
+ // Addition will not overflow since the sum is at most the capacity.
+ self.set_len(len + cnt);
+ }
+
+ #[inline]
+ fn chunk_mut(&mut self) -> &mut UninitSlice {
+ if self.capacity() == self.len() {
+ self.reserve(64); // Grow the vec
+ }
+
+ let cap = self.capacity();
+ let len = self.len();
+
+ let ptr = self.as_mut_ptr();
+ // SAFETY: Since `ptr` is valid for `cap` bytes, `ptr.add(len)` must be
+ // valid for `cap - len` bytes. The subtraction will not underflow since
+ // `len <= cap`.
+ unsafe { UninitSlice::from_raw_parts_mut(ptr.add(len), cap - len) }
+ }
+
+ // Specialize these methods so they can skip checking `remaining_mut`
+ // and `advance_mut`.
+ #[inline]
+ fn put<T: super::Buf>(&mut self, mut src: T)
+ where
+ Self: Sized,
+ {
+ // In case the src isn't contiguous, reserve upfront.
+ self.reserve(src.remaining());
+
+ while src.has_remaining() {
+ let s = src.chunk();
+ let l = s.len();
+ self.extend_from_slice(s);
+ src.advance(l);
+ }
+ }
+
+ #[inline]
+ fn put_slice(&mut self, src: &[u8]) {
+ self.extend_from_slice(src);
+ }
+
+ #[inline]
+ fn put_bytes(&mut self, val: u8, cnt: usize) {
+ // If the addition overflows, then the `resize` will fail.
+ let new_len = self.len().saturating_add(cnt);
+ self.resize(new_len, val);
+ }
+}
+
+// The existence of this function makes the compiler catch if the BufMut
+// trait is "object-safe" or not.
+fn _assert_trait_object(_b: &dyn BufMut) {}
+
1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 +70 +71 +72 +73 +74 +75 +76 +77 +78 +79 +80 +81 +82 +83 +84 +85 +86 +87 +88 +89 +90 +91 +92 +93 +94 +95 +96 +97 +98 +99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 +235 +236 +237 +238 +239 +240 +
use crate::buf::{IntoIter, UninitSlice};
+use crate::{Buf, BufMut, Bytes};
+
+#[cfg(feature = "std")]
+use std::io::IoSlice;
+
+/// A `Chain` sequences two buffers.
+///
+/// `Chain` is an adapter that links two underlying buffers and provides a
+/// continuous view across both buffers. It is able to sequence either immutable
+/// buffers ([`Buf`] values) or mutable buffers ([`BufMut`] values).
+///
+/// This struct is generally created by calling [`Buf::chain`]. Please see that
+/// function's documentation for more detail.
+///
+/// # Examples
+///
+/// ```
+/// use bytes::{Bytes, Buf};
+///
+/// let mut buf = (&b"hello "[..])
+/// .chain(&b"world"[..]);
+///
+/// let full: Bytes = buf.copy_to_bytes(11);
+/// assert_eq!(full[..], b"hello world"[..]);
+/// ```
+///
+/// [`Buf::chain`]: Buf::chain
+#[derive(Debug)]
+pub struct Chain<T, U> {
+ a: T,
+ b: U,
+}
+
+impl<T, U> Chain<T, U> {
+ /// Creates a new `Chain` sequencing the provided values.
+ pub(crate) fn new(a: T, b: U) -> Chain<T, U> {
+ Chain { a, b }
+ }
+
+ /// Gets a reference to the first underlying `Buf`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::Buf;
+ ///
+ /// let buf = (&b"hello"[..])
+ /// .chain(&b"world"[..]);
+ ///
+ /// assert_eq!(buf.first_ref()[..], b"hello"[..]);
+ /// ```
+ pub fn first_ref(&self) -> &T {
+ &self.a
+ }
+
+ /// Gets a mutable reference to the first underlying `Buf`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::Buf;
+ ///
+ /// let mut buf = (&b"hello"[..])
+ /// .chain(&b"world"[..]);
+ ///
+ /// buf.first_mut().advance(1);
+ ///
+ /// let full = buf.copy_to_bytes(9);
+ /// assert_eq!(full, b"elloworld"[..]);
+ /// ```
+ pub fn first_mut(&mut self) -> &mut T {
+ &mut self.a
+ }
+
+ /// Gets a reference to the last underlying `Buf`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::Buf;
+ ///
+ /// let buf = (&b"hello"[..])
+ /// .chain(&b"world"[..]);
+ ///
+ /// assert_eq!(buf.last_ref()[..], b"world"[..]);
+ /// ```
+ pub fn last_ref(&self) -> &U {
+ &self.b
+ }
+
+ /// Gets a mutable reference to the last underlying `Buf`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::Buf;
+ ///
+ /// let mut buf = (&b"hello "[..])
+ /// .chain(&b"world"[..]);
+ ///
+ /// buf.last_mut().advance(1);
+ ///
+ /// let full = buf.copy_to_bytes(10);
+ /// assert_eq!(full, b"hello orld"[..]);
+ /// ```
+ pub fn last_mut(&mut self) -> &mut U {
+ &mut self.b
+ }
+
+ /// Consumes this `Chain`, returning the underlying values.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::Buf;
+ ///
+ /// let chain = (&b"hello"[..])
+ /// .chain(&b"world"[..]);
+ ///
+ /// let (first, last) = chain.into_inner();
+ /// assert_eq!(first[..], b"hello"[..]);
+ /// assert_eq!(last[..], b"world"[..]);
+ /// ```
+ pub fn into_inner(self) -> (T, U) {
+ (self.a, self.b)
+ }
+}
+
+impl<T, U> Buf for Chain<T, U>
+where
+ T: Buf,
+ U: Buf,
+{
+ fn remaining(&self) -> usize {
+ self.a.remaining().saturating_add(self.b.remaining())
+ }
+
+ fn chunk(&self) -> &[u8] {
+ if self.a.has_remaining() {
+ self.a.chunk()
+ } else {
+ self.b.chunk()
+ }
+ }
+
+ fn advance(&mut self, mut cnt: usize) {
+ let a_rem = self.a.remaining();
+
+ if a_rem != 0 {
+ if a_rem >= cnt {
+ self.a.advance(cnt);
+ return;
+ }
+
+ // Consume what is left of a
+ self.a.advance(a_rem);
+
+ cnt -= a_rem;
+ }
+
+ self.b.advance(cnt);
+ }
+
+ #[cfg(feature = "std")]
+ fn chunks_vectored<'a>(&'a self, dst: &mut [IoSlice<'a>]) -> usize {
+ let mut n = self.a.chunks_vectored(dst);
+ n += self.b.chunks_vectored(&mut dst[n..]);
+ n
+ }
+
+ fn copy_to_bytes(&mut self, len: usize) -> Bytes {
+ let a_rem = self.a.remaining();
+ if a_rem >= len {
+ self.a.copy_to_bytes(len)
+ } else if a_rem == 0 {
+ self.b.copy_to_bytes(len)
+ } else {
+ assert!(
+ len - a_rem <= self.b.remaining(),
+ "`len` greater than remaining"
+ );
+ let mut ret = crate::BytesMut::with_capacity(len);
+ ret.put(&mut self.a);
+ ret.put((&mut self.b).take(len - a_rem));
+ ret.freeze()
+ }
+ }
+}
+
+unsafe impl<T, U> BufMut for Chain<T, U>
+where
+ T: BufMut,
+ U: BufMut,
+{
+ fn remaining_mut(&self) -> usize {
+ self.a
+ .remaining_mut()
+ .saturating_add(self.b.remaining_mut())
+ }
+
+ fn chunk_mut(&mut self) -> &mut UninitSlice {
+ if self.a.has_remaining_mut() {
+ self.a.chunk_mut()
+ } else {
+ self.b.chunk_mut()
+ }
+ }
+
+ unsafe fn advance_mut(&mut self, mut cnt: usize) {
+ let a_rem = self.a.remaining_mut();
+
+ if a_rem != 0 {
+ if a_rem >= cnt {
+ self.a.advance_mut(cnt);
+ return;
+ }
+
+ // Consume what is left of a
+ self.a.advance_mut(a_rem);
+
+ cnt -= a_rem;
+ }
+
+ self.b.advance_mut(cnt);
+ }
+}
+
+impl<T, U> IntoIterator for Chain<T, U>
+where
+ T: Buf,
+ U: Buf,
+{
+ type Item = u8;
+ type IntoIter = IntoIter<Chain<T, U>>;
+
+ fn into_iter(self) -> Self::IntoIter {
+ IntoIter::new(self)
+ }
+}
+
1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 +70 +71 +72 +73 +74 +75 +76 +77 +78 +79 +80 +81 +82 +83 +84 +85 +86 +87 +88 +89 +90 +91 +92 +93 +94 +95 +96 +97 +98 +99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +
use crate::Buf;
+
+/// Iterator over the bytes contained by the buffer.
+///
+/// # Examples
+///
+/// Basic usage:
+///
+/// ```
+/// use bytes::Bytes;
+///
+/// let buf = Bytes::from(&b"abc"[..]);
+/// let mut iter = buf.into_iter();
+///
+/// assert_eq!(iter.next(), Some(b'a'));
+/// assert_eq!(iter.next(), Some(b'b'));
+/// assert_eq!(iter.next(), Some(b'c'));
+/// assert_eq!(iter.next(), None);
+/// ```
+#[derive(Debug)]
+pub struct IntoIter<T> {
+ inner: T,
+}
+
+impl<T> IntoIter<T> {
+ /// Creates an iterator over the bytes contained by the buffer.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::Bytes;
+ ///
+ /// let buf = Bytes::from_static(b"abc");
+ /// let mut iter = buf.into_iter();
+ ///
+ /// assert_eq!(iter.next(), Some(b'a'));
+ /// assert_eq!(iter.next(), Some(b'b'));
+ /// assert_eq!(iter.next(), Some(b'c'));
+ /// assert_eq!(iter.next(), None);
+ /// ```
+ pub fn new(inner: T) -> IntoIter<T> {
+ IntoIter { inner }
+ }
+
+ /// Consumes this `IntoIter`, returning the underlying value.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bytes::{Buf, Bytes};
+ ///
+ /// let buf = Bytes::from(&b"abc"[..]);
+ /// let mut iter = buf.into_iter();
+ ///
+ /// assert_eq!(iter.next(), Some(b'a'));
+ ///
+ /// let buf = iter.into_inner();
+ /// assert_eq!(2, buf.remaining());
+ /// ```
+ pub fn into_inner(self) -> T {
+ self.inner
+ }
+
+ /// Gets a reference to the underlying `Buf`.
+ ///
+ /// It is inadvisable to directly read from the underlying `Buf`.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bytes::{Buf, Bytes};
+ ///
+ /// let buf = Bytes::from(&b"abc"[..]);
+ /// let mut iter = buf.into_iter();
+ ///
+ /// assert_eq!(iter.next(), Some(b'a'));
+ ///
+ /// assert_eq!(2, iter.get_ref().remaining());
+ /// ```
+ pub fn get_ref(&self) -> &T {
+ &self.inner
+ }
+
+ /// Gets a mutable reference to the underlying `Buf`.
+ ///
+ /// It is inadvisable to directly read from the underlying `Buf`.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bytes::{Buf, BytesMut};
+ ///
+ /// let buf = BytesMut::from(&b"abc"[..]);
+ /// let mut iter = buf.into_iter();
+ ///
+ /// assert_eq!(iter.next(), Some(b'a'));
+ ///
+ /// iter.get_mut().advance(1);
+ ///
+ /// assert_eq!(iter.next(), Some(b'c'));
+ /// ```
+ pub fn get_mut(&mut self) -> &mut T {
+ &mut self.inner
+ }
+}
+
+impl<T: Buf> Iterator for IntoIter<T> {
+ type Item = u8;
+
+ fn next(&mut self) -> Option<u8> {
+ if !self.inner.has_remaining() {
+ return None;
+ }
+
+ let b = self.inner.chunk()[0];
+ self.inner.advance(1);
+
+ Some(b)
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let rem = self.inner.remaining();
+ (rem, Some(rem))
+ }
+}
+
+impl<T: Buf> ExactSizeIterator for IntoIter<T> {}
+
1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 +70 +71 +72 +73 +74 +75 +
use crate::buf::UninitSlice;
+use crate::BufMut;
+
+use core::cmp;
+
+/// A `BufMut` adapter which limits the amount of bytes that can be written
+/// to an underlying buffer.
+#[derive(Debug)]
+pub struct Limit<T> {
+ inner: T,
+ limit: usize,
+}
+
+pub(super) fn new<T>(inner: T, limit: usize) -> Limit<T> {
+ Limit { inner, limit }
+}
+
+impl<T> Limit<T> {
+ /// Consumes this `Limit`, returning the underlying value.
+ pub fn into_inner(self) -> T {
+ self.inner
+ }
+
+ /// Gets a reference to the underlying `BufMut`.
+ ///
+ /// It is inadvisable to directly write to the underlying `BufMut`.
+ pub fn get_ref(&self) -> &T {
+ &self.inner
+ }
+
+ /// Gets a mutable reference to the underlying `BufMut`.
+ ///
+ /// It is inadvisable to directly write to the underlying `BufMut`.
+ pub fn get_mut(&mut self) -> &mut T {
+ &mut self.inner
+ }
+
+ /// Returns the maximum number of bytes that can be written
+ ///
+ /// # Note
+ ///
+ /// If the inner `BufMut` has fewer bytes than indicated by this method then
+ /// that is the actual number of available bytes.
+ pub fn limit(&self) -> usize {
+ self.limit
+ }
+
+ /// Sets the maximum number of bytes that can be written.
+ ///
+ /// # Note
+ ///
+ /// If the inner `BufMut` has fewer bytes than `lim` then that is the actual
+ /// number of available bytes.
+ pub fn set_limit(&mut self, lim: usize) {
+ self.limit = lim
+ }
+}
+
+unsafe impl<T: BufMut> BufMut for Limit<T> {
+ fn remaining_mut(&self) -> usize {
+ cmp::min(self.inner.remaining_mut(), self.limit)
+ }
+
+ fn chunk_mut(&mut self) -> &mut UninitSlice {
+ let bytes = self.inner.chunk_mut();
+ let end = cmp::min(bytes.len(), self.limit);
+ &mut bytes[..end]
+ }
+
+ unsafe fn advance_mut(&mut self, cnt: usize) {
+ assert!(cnt <= self.limit);
+ self.inner.advance_mut(cnt);
+ self.limit -= cnt;
+ }
+}
+
1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +
//! Utilities for working with buffers.
+//!
+//! A buffer is any structure that contains a sequence of bytes. The bytes may
+//! or may not be stored in contiguous memory. This module contains traits used
+//! to abstract over buffers as well as utilities for working with buffer types.
+//!
+//! # `Buf`, `BufMut`
+//!
+//! These are the two foundational traits for abstractly working with buffers.
+//! They can be thought as iterators for byte structures. They offer additional
+//! performance over `Iterator` by providing an API optimized for byte slices.
+//!
+//! See [`Buf`] and [`BufMut`] for more details.
+//!
+//! [rope]: https://en.wikipedia.org/wiki/Rope_(data_structure)
+
+mod buf_impl;
+mod buf_mut;
+mod chain;
+mod iter;
+mod limit;
+#[cfg(feature = "std")]
+mod reader;
+mod take;
+mod uninit_slice;
+mod vec_deque;
+#[cfg(feature = "std")]
+mod writer;
+
+pub use self::buf_impl::Buf;
+pub use self::buf_mut::BufMut;
+pub use self::chain::Chain;
+pub use self::iter::IntoIter;
+pub use self::limit::Limit;
+pub use self::take::Take;
+pub use self::uninit_slice::UninitSlice;
+
+#[cfg(feature = "std")]
+pub use self::{reader::Reader, writer::Writer};
+
1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 +70 +71 +72 +73 +74 +75 +76 +77 +78 +79 +80 +81 +
use crate::Buf;
+
+use std::{cmp, io};
+
+/// A `Buf` adapter which implements `io::Read` for the inner value.
+///
+/// This struct is generally created by calling `reader()` on `Buf`. See
+/// documentation of [`reader()`](Buf::reader) for more
+/// details.
+#[derive(Debug)]
+pub struct Reader<B> {
+ buf: B,
+}
+
+pub fn new<B>(buf: B) -> Reader<B> {
+ Reader { buf }
+}
+
+impl<B: Buf> Reader<B> {
+ /// Gets a reference to the underlying `Buf`.
+ ///
+ /// It is inadvisable to directly read from the underlying `Buf`.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bytes::Buf;
+ ///
+ /// let buf = b"hello world".reader();
+ ///
+ /// assert_eq!(b"hello world", buf.get_ref());
+ /// ```
+ pub fn get_ref(&self) -> &B {
+ &self.buf
+ }
+
+ /// Gets a mutable reference to the underlying `Buf`.
+ ///
+ /// It is inadvisable to directly read from the underlying `Buf`.
+ pub fn get_mut(&mut self) -> &mut B {
+ &mut self.buf
+ }
+
+ /// Consumes this `Reader`, returning the underlying value.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bytes::Buf;
+ /// use std::io;
+ ///
+ /// let mut buf = b"hello world".reader();
+ /// let mut dst = vec![];
+ ///
+ /// io::copy(&mut buf, &mut dst).unwrap();
+ ///
+ /// let buf = buf.into_inner();
+ /// assert_eq!(0, buf.remaining());
+ /// ```
+ pub fn into_inner(self) -> B {
+ self.buf
+ }
+}
+
+impl<B: Buf + Sized> io::Read for Reader<B> {
+ fn read(&mut self, dst: &mut [u8]) -> io::Result<usize> {
+ let len = cmp::min(self.buf.remaining(), dst.len());
+
+ Buf::copy_to_slice(&mut self.buf, &mut dst[0..len]);
+ Ok(len)
+ }
+}
+
+impl<B: Buf + Sized> io::BufRead for Reader<B> {
+ fn fill_buf(&mut self) -> io::Result<&[u8]> {
+ Ok(self.buf.chunk())
+ }
+ fn consume(&mut self, amt: usize) {
+ self.buf.advance(amt)
+ }
+}
+
1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 +70 +71 +72 +73 +74 +75 +76 +77 +78 +79 +80 +81 +82 +83 +84 +85 +86 +87 +88 +89 +90 +91 +92 +93 +94 +95 +96 +97 +98 +99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +
use crate::{Buf, Bytes};
+
+use core::cmp;
+
+/// A `Buf` adapter which limits the bytes read from an underlying buffer.
+///
+/// This struct is generally created by calling `take()` on `Buf`. See
+/// documentation of [`take()`](Buf::take) for more details.
+#[derive(Debug)]
+pub struct Take<T> {
+ inner: T,
+ limit: usize,
+}
+
+pub fn new<T>(inner: T, limit: usize) -> Take<T> {
+ Take { inner, limit }
+}
+
+impl<T> Take<T> {
+ /// Consumes this `Take`, returning the underlying value.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bytes::{Buf, BufMut};
+ ///
+ /// let mut buf = b"hello world".take(2);
+ /// let mut dst = vec![];
+ ///
+ /// dst.put(&mut buf);
+ /// assert_eq!(*dst, b"he"[..]);
+ ///
+ /// let mut buf = buf.into_inner();
+ ///
+ /// dst.clear();
+ /// dst.put(&mut buf);
+ /// assert_eq!(*dst, b"llo world"[..]);
+ /// ```
+ pub fn into_inner(self) -> T {
+ self.inner
+ }
+
+ /// Gets a reference to the underlying `Buf`.
+ ///
+ /// It is inadvisable to directly read from the underlying `Buf`.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bytes::Buf;
+ ///
+ /// let buf = b"hello world".take(2);
+ ///
+ /// assert_eq!(11, buf.get_ref().remaining());
+ /// ```
+ pub fn get_ref(&self) -> &T {
+ &self.inner
+ }
+
+ /// Gets a mutable reference to the underlying `Buf`.
+ ///
+ /// It is inadvisable to directly read from the underlying `Buf`.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bytes::{Buf, BufMut};
+ ///
+ /// let mut buf = b"hello world".take(2);
+ /// let mut dst = vec![];
+ ///
+ /// buf.get_mut().advance(2);
+ ///
+ /// dst.put(&mut buf);
+ /// assert_eq!(*dst, b"ll"[..]);
+ /// ```
+ pub fn get_mut(&mut self) -> &mut T {
+ &mut self.inner
+ }
+
+ /// Returns the maximum number of bytes that can be read.
+ ///
+ /// # Note
+ ///
+ /// If the inner `Buf` has fewer bytes than indicated by this method then
+ /// that is the actual number of available bytes.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bytes::Buf;
+ ///
+ /// let mut buf = b"hello world".take(2);
+ ///
+ /// assert_eq!(2, buf.limit());
+ /// assert_eq!(b'h', buf.get_u8());
+ /// assert_eq!(1, buf.limit());
+ /// ```
+ pub fn limit(&self) -> usize {
+ self.limit
+ }
+
+ /// Sets the maximum number of bytes that can be read.
+ ///
+ /// # Note
+ ///
+ /// If the inner `Buf` has fewer bytes than `lim` then that is the actual
+ /// number of available bytes.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bytes::{Buf, BufMut};
+ ///
+ /// let mut buf = b"hello world".take(2);
+ /// let mut dst = vec![];
+ ///
+ /// dst.put(&mut buf);
+ /// assert_eq!(*dst, b"he"[..]);
+ ///
+ /// dst.clear();
+ ///
+ /// buf.set_limit(3);
+ /// dst.put(&mut buf);
+ /// assert_eq!(*dst, b"llo"[..]);
+ /// ```
+ pub fn set_limit(&mut self, lim: usize) {
+ self.limit = lim
+ }
+}
+
+impl<T: Buf> Buf for Take<T> {
+ fn remaining(&self) -> usize {
+ cmp::min(self.inner.remaining(), self.limit)
+ }
+
+ fn chunk(&self) -> &[u8] {
+ let bytes = self.inner.chunk();
+ &bytes[..cmp::min(bytes.len(), self.limit)]
+ }
+
+ fn advance(&mut self, cnt: usize) {
+ assert!(cnt <= self.limit);
+ self.inner.advance(cnt);
+ self.limit -= cnt;
+ }
+
+ fn copy_to_bytes(&mut self, len: usize) -> Bytes {
+ assert!(len <= self.remaining(), "`len` greater than remaining");
+
+ let r = self.inner.copy_to_bytes(len);
+ self.limit -= len;
+ r
+ }
+}
+
1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 +70 +71 +72 +73 +74 +75 +76 +77 +78 +79 +80 +81 +82 +83 +84 +85 +86 +87 +88 +89 +90 +91 +92 +93 +94 +95 +96 +97 +98 +99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 +235 +236 +237 +238 +239 +240 +241 +242 +243 +244 +245 +246 +247 +248 +249 +250 +251 +252 +253 +254 +255 +256 +257 +
use core::fmt;
+use core::mem::MaybeUninit;
+use core::ops::{
+ Index, IndexMut, Range, RangeFrom, RangeFull, RangeInclusive, RangeTo, RangeToInclusive,
+};
+
+/// Uninitialized byte slice.
+///
+/// Returned by `BufMut::chunk_mut()`, the referenced byte slice may be
+/// uninitialized. The wrapper provides safe access without introducing
+/// undefined behavior.
+///
+/// The safety invariants of this wrapper are:
+///
+/// 1. Reading from an `UninitSlice` is undefined behavior.
+/// 2. Writing uninitialized bytes to an `UninitSlice` is undefined behavior.
+///
+/// The difference between `&mut UninitSlice` and `&mut [MaybeUninit<u8>]` is
+/// that it is possible in safe code to write uninitialized bytes to an
+/// `&mut [MaybeUninit<u8>]`, which this type prohibits.
+#[repr(transparent)]
+pub struct UninitSlice([MaybeUninit<u8>]);
+
+impl UninitSlice {
+ /// Creates a `&mut UninitSlice` wrapping a slice of initialised memory.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::buf::UninitSlice;
+ ///
+ /// let mut buffer = [0u8; 64];
+ /// let slice = UninitSlice::new(&mut buffer[..]);
+ /// ```
+ #[inline]
+ pub fn new(slice: &mut [u8]) -> &mut UninitSlice {
+ unsafe { &mut *(slice as *mut [u8] as *mut [MaybeUninit<u8>] as *mut UninitSlice) }
+ }
+
+ /// Creates a `&mut UninitSlice` wrapping a slice of uninitialised memory.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::buf::UninitSlice;
+ /// use core::mem::MaybeUninit;
+ ///
+ /// let mut buffer = [MaybeUninit::uninit(); 64];
+ /// let slice = UninitSlice::uninit(&mut buffer[..]);
+ ///
+ /// let mut vec = Vec::with_capacity(1024);
+ /// let spare: &mut UninitSlice = vec.spare_capacity_mut().into();
+ /// ```
+ #[inline]
+ pub fn uninit(slice: &mut [MaybeUninit<u8>]) -> &mut UninitSlice {
+ unsafe { &mut *(slice as *mut [MaybeUninit<u8>] as *mut UninitSlice) }
+ }
+
+ fn uninit_ref(slice: &[MaybeUninit<u8>]) -> &UninitSlice {
+ unsafe { &*(slice as *const [MaybeUninit<u8>] as *const UninitSlice) }
+ }
+
+ /// Create a `&mut UninitSlice` from a pointer and a length.
+ ///
+ /// # Safety
+ ///
+ /// The caller must ensure that `ptr` references a valid memory region owned
+ /// by the caller representing a byte slice for the duration of `'a`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::buf::UninitSlice;
+ ///
+ /// let bytes = b"hello world".to_vec();
+ /// let ptr = bytes.as_ptr() as *mut _;
+ /// let len = bytes.len();
+ ///
+ /// let slice = unsafe { UninitSlice::from_raw_parts_mut(ptr, len) };
+ /// ```
+ #[inline]
+ pub unsafe fn from_raw_parts_mut<'a>(ptr: *mut u8, len: usize) -> &'a mut UninitSlice {
+ let maybe_init: &mut [MaybeUninit<u8>] =
+ core::slice::from_raw_parts_mut(ptr as *mut _, len);
+ Self::uninit(maybe_init)
+ }
+
+ /// Write a single byte at the specified offset.
+ ///
+ /// # Panics
+ ///
+ /// The function panics if `index` is out of bounds.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::buf::UninitSlice;
+ ///
+ /// let mut data = [b'f', b'o', b'o'];
+ /// let slice = unsafe { UninitSlice::from_raw_parts_mut(data.as_mut_ptr(), 3) };
+ ///
+ /// slice.write_byte(0, b'b');
+ ///
+ /// assert_eq!(b"boo", &data[..]);
+ /// ```
+ #[inline]
+ pub fn write_byte(&mut self, index: usize, byte: u8) {
+ assert!(index < self.len());
+
+ unsafe { self[index..].as_mut_ptr().write(byte) }
+ }
+
+ /// Copies bytes from `src` into `self`.
+ ///
+ /// The length of `src` must be the same as `self`.
+ ///
+ /// # Panics
+ ///
+ /// The function panics if `src` has a different length than `self`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::buf::UninitSlice;
+ ///
+ /// let mut data = [b'f', b'o', b'o'];
+ /// let slice = unsafe { UninitSlice::from_raw_parts_mut(data.as_mut_ptr(), 3) };
+ ///
+ /// slice.copy_from_slice(b"bar");
+ ///
+ /// assert_eq!(b"bar", &data[..]);
+ /// ```
+ #[inline]
+ pub fn copy_from_slice(&mut self, src: &[u8]) {
+ use core::ptr;
+
+ assert_eq!(self.len(), src.len());
+
+ unsafe {
+ ptr::copy_nonoverlapping(src.as_ptr(), self.as_mut_ptr(), self.len());
+ }
+ }
+
+ /// Return a raw pointer to the slice's buffer.
+ ///
+ /// # Safety
+ ///
+ /// The caller **must not** read from the referenced memory and **must not**
+ /// write **uninitialized** bytes to the slice either.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BufMut;
+ ///
+ /// let mut data = [0, 1, 2];
+ /// let mut slice = &mut data[..];
+ /// let ptr = BufMut::chunk_mut(&mut slice).as_mut_ptr();
+ /// ```
+ #[inline]
+ pub fn as_mut_ptr(&mut self) -> *mut u8 {
+ self.0.as_mut_ptr() as *mut _
+ }
+
+ /// Return a `&mut [MaybeUninit<u8>]` to this slice's buffer.
+ ///
+ /// # Safety
+ ///
+ /// The caller **must not** read from the referenced memory and **must not** write
+ /// **uninitialized** bytes to the slice either. This is because `BufMut` implementation
+ /// that created the `UninitSlice` knows which parts are initialized. Writing uninitialized
+ /// bytes to the slice may cause the `BufMut` to read those bytes and trigger undefined
+ /// behavior.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BufMut;
+ ///
+ /// let mut data = [0, 1, 2];
+ /// let mut slice = &mut data[..];
+ /// unsafe {
+ /// let uninit_slice = BufMut::chunk_mut(&mut slice).as_uninit_slice_mut();
+ /// };
+ /// ```
+ #[inline]
+ pub unsafe fn as_uninit_slice_mut(&mut self) -> &mut [MaybeUninit<u8>] {
+ &mut self.0
+ }
+
+ /// Returns the number of bytes in the slice.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BufMut;
+ ///
+ /// let mut data = [0, 1, 2];
+ /// let mut slice = &mut data[..];
+ /// let len = BufMut::chunk_mut(&mut slice).len();
+ ///
+ /// assert_eq!(len, 3);
+ /// ```
+ #[inline]
+ pub fn len(&self) -> usize {
+ self.0.len()
+ }
+}
+
+impl fmt::Debug for UninitSlice {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_struct("UninitSlice[...]").finish()
+ }
+}
+
+impl<'a> From<&'a mut [u8]> for &'a mut UninitSlice {
+ fn from(slice: &'a mut [u8]) -> Self {
+ UninitSlice::new(slice)
+ }
+}
+
+impl<'a> From<&'a mut [MaybeUninit<u8>]> for &'a mut UninitSlice {
+ fn from(slice: &'a mut [MaybeUninit<u8>]) -> Self {
+ UninitSlice::uninit(slice)
+ }
+}
+
+macro_rules! impl_index {
+ ($($t:ty),*) => {
+ $(
+ impl Index<$t> for UninitSlice {
+ type Output = UninitSlice;
+
+ #[inline]
+ fn index(&self, index: $t) -> &UninitSlice {
+ UninitSlice::uninit_ref(&self.0[index])
+ }
+ }
+
+ impl IndexMut<$t> for UninitSlice {
+ #[inline]
+ fn index_mut(&mut self, index: $t) -> &mut UninitSlice {
+ UninitSlice::uninit(&mut self.0[index])
+ }
+ }
+ )*
+ };
+}
+
+impl_index!(
+ Range<usize>,
+ RangeFrom<usize>,
+ RangeFull,
+ RangeInclusive<usize>,
+ RangeTo<usize>,
+ RangeToInclusive<usize>
+);
+
1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +
use alloc::collections::VecDeque;
+
+use super::Buf;
+
+impl Buf for VecDeque<u8> {
+ fn remaining(&self) -> usize {
+ self.len()
+ }
+
+ fn chunk(&self) -> &[u8] {
+ let (s1, s2) = self.as_slices();
+ if s1.is_empty() {
+ s2
+ } else {
+ s1
+ }
+ }
+
+ fn advance(&mut self, cnt: usize) {
+ self.drain(..cnt);
+ }
+}
+
1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 +70 +71 +72 +73 +74 +75 +76 +77 +78 +79 +80 +81 +82 +83 +84 +85 +86 +87 +88 +
use crate::BufMut;
+
+use std::{cmp, io};
+
+/// A `BufMut` adapter which implements `io::Write` for the inner value.
+///
+/// This struct is generally created by calling `writer()` on `BufMut`. See
+/// documentation of [`writer()`](BufMut::writer) for more
+/// details.
+#[derive(Debug)]
+pub struct Writer<B> {
+ buf: B,
+}
+
+pub fn new<B>(buf: B) -> Writer<B> {
+ Writer { buf }
+}
+
+impl<B: BufMut> Writer<B> {
+ /// Gets a reference to the underlying `BufMut`.
+ ///
+ /// It is inadvisable to directly write to the underlying `BufMut`.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bytes::BufMut;
+ ///
+ /// let buf = Vec::with_capacity(1024).writer();
+ ///
+ /// assert_eq!(1024, buf.get_ref().capacity());
+ /// ```
+ pub fn get_ref(&self) -> &B {
+ &self.buf
+ }
+
+ /// Gets a mutable reference to the underlying `BufMut`.
+ ///
+ /// It is inadvisable to directly write to the underlying `BufMut`.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bytes::BufMut;
+ ///
+ /// let mut buf = vec![].writer();
+ ///
+ /// buf.get_mut().reserve(1024);
+ ///
+ /// assert_eq!(1024, buf.get_ref().capacity());
+ /// ```
+ pub fn get_mut(&mut self) -> &mut B {
+ &mut self.buf
+ }
+
+ /// Consumes this `Writer`, returning the underlying value.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bytes::BufMut;
+ /// use std::io;
+ ///
+ /// let mut buf = vec![].writer();
+ /// let mut src = &b"hello world"[..];
+ ///
+ /// io::copy(&mut src, &mut buf).unwrap();
+ ///
+ /// let buf = buf.into_inner();
+ /// assert_eq!(*buf, b"hello world"[..]);
+ /// ```
+ pub fn into_inner(self) -> B {
+ self.buf
+ }
+}
+
+impl<B: BufMut + Sized> io::Write for Writer<B> {
+ fn write(&mut self, src: &[u8]) -> io::Result<usize> {
+ let n = cmp::min(self.buf.remaining_mut(), src.len());
+
+ self.buf.put(&src[0..n]);
+ Ok(n)
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ Ok(())
+ }
+}
+
1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 +70 +71 +72 +73 +74 +75 +76 +77 +78 +79 +80 +81 +82 +83 +84 +85 +86 +87 +88 +89 +90 +91 +92 +93 +94 +95 +96 +97 +98 +99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 +235 +236 +237 +238 +239 +240 +241 +242 +243 +244 +245 +246 +247 +248 +249 +250 +251 +252 +253 +254 +255 +256 +257 +258 +259 +260 +261 +262 +263 +264 +265 +266 +267 +268 +269 +270 +271 +272 +273 +274 +275 +276 +277 +278 +279 +280 +281 +282 +283 +284 +285 +286 +287 +288 +289 +290 +291 +292 +293 +294 +295 +296 +297 +298 +299 +300 +301 +302 +303 +304 +305 +306 +307 +308 +309 +310 +311 +312 +313 +314 +315 +316 +317 +318 +319 +320 +321 +322 +323 +324 +325 +326 +327 +328 +329 +330 +331 +332 +333 +334 +335 +336 +337 +338 +339 +340 +341 +342 +343 +344 +345 +346 +347 +348 +349 +350 +351 +352 +353 +354 +355 +356 +357 +358 +359 +360 +361 +362 +363 +364 +365 +366 +367 +368 +369 +370 +371 +372 +373 +374 +375 +376 +377 +378 +379 +380 +381 +382 +383 +384 +385 +386 +387 +388 +389 +390 +391 +392 +393 +394 +395 +396 +397 +398 +399 +400 +401 +402 +403 +404 +405 +406 +407 +408 +409 +410 +411 +412 +413 +414 +415 +416 +417 +418 +419 +420 +421 +422 +423 +424 +425 +426 +427 +428 +429 +430 +431 +432 +433 +434 +435 +436 +437 +438 +439 +440 +441 +442 +443 +444 +445 +446 +447 +448 +449 +450 +451 +452 +453 +454 +455 +456 +457 +458 +459 +460 +461 +462 +463 +464 +465 +466 +467 +468 +469 +470 +471 +472 +473 +474 +475 +476 +477 +478 +479 +480 +481 +482 +483 +484 +485 +486 +487 +488 +489 +490 +491 +492 +493 +494 +495 +496 +497 +498 +499 +500 +501 +502 +503 +504 +505 +506 +507 +508 +509 +510 +511 +512 +513 +514 +515 +516 +517 +518 +519 +520 +521 +522 +523 +524 +525 +526 +527 +528 +529 +530 +531 +532 +533 +534 +535 +536 +537 +538 +539 +540 +541 +542 +543 +544 +545 +546 +547 +548 +549 +550 +551 +552 +553 +554 +555 +556 +557 +558 +559 +560 +561 +562 +563 +564 +565 +566 +567 +568 +569 +570 +571 +572 +573 +574 +575 +576 +577 +578 +579 +580 +581 +582 +583 +584 +585 +586 +587 +588 +589 +590 +591 +592 +593 +594 +595 +596 +597 +598 +599 +600 +601 +602 +603 +604 +605 +606 +607 +608 +609 +610 +611 +612 +613 +614 +615 +616 +617 +618 +619 +620 +621 +622 +623 +624 +625 +626 +627 +628 +629 +630 +631 +632 +633 +634 +635 +636 +637 +638 +639 +640 +641 +642 +643 +644 +645 +646 +647 +648 +649 +650 +651 +652 +653 +654 +655 +656 +657 +658 +659 +660 +661 +662 +663 +664 +665 +666 +667 +668 +669 +670 +671 +672 +673 +674 +675 +676 +677 +678 +679 +680 +681 +682 +683 +684 +685 +686 +687 +688 +689 +690 +691 +692 +693 +694 +695 +696 +697 +698 +699 +700 +701 +702 +703 +704 +705 +706 +707 +708 +709 +710 +711 +712 +713 +714 +715 +716 +717 +718 +719 +720 +721 +722 +723 +724 +725 +726 +727 +728 +729 +730 +731 +732 +733 +734 +735 +736 +737 +738 +739 +740 +741 +742 +743 +744 +745 +746 +747 +748 +749 +750 +751 +752 +753 +754 +755 +756 +757 +758 +759 +760 +761 +762 +763 +764 +765 +766 +767 +768 +769 +770 +771 +772 +773 +774 +775 +776 +777 +778 +779 +780 +781 +782 +783 +784 +785 +786 +787 +788 +789 +790 +791 +792 +793 +794 +795 +796 +797 +798 +799 +800 +801 +802 +803 +804 +805 +806 +807 +808 +809 +810 +811 +812 +813 +814 +815 +816 +817 +818 +819 +820 +821 +822 +823 +824 +825 +826 +827 +828 +829 +830 +831 +832 +833 +834 +835 +836 +837 +838 +839 +840 +841 +842 +843 +844 +845 +846 +847 +848 +849 +850 +851 +852 +853 +854 +855 +856 +857 +858 +859 +860 +861 +862 +863 +864 +865 +866 +867 +868 +869 +870 +871 +872 +873 +874 +875 +876 +877 +878 +879 +880 +881 +882 +883 +884 +885 +886 +887 +888 +889 +890 +891 +892 +893 +894 +895 +896 +897 +898 +899 +900 +901 +902 +903 +904 +905 +906 +907 +908 +909 +910 +911 +912 +913 +914 +915 +916 +917 +918 +919 +920 +921 +922 +923 +924 +925 +926 +927 +928 +929 +930 +931 +932 +933 +934 +935 +936 +937 +938 +939 +940 +941 +942 +943 +944 +945 +946 +947 +948 +949 +950 +951 +952 +953 +954 +955 +956 +957 +958 +959 +960 +961 +962 +963 +964 +965 +966 +967 +968 +969 +970 +971 +972 +973 +974 +975 +976 +977 +978 +979 +980 +981 +982 +983 +984 +985 +986 +987 +988 +989 +990 +991 +992 +993 +994 +995 +996 +997 +998 +999 +1000 +1001 +1002 +1003 +1004 +1005 +1006 +1007 +1008 +1009 +1010 +1011 +1012 +1013 +1014 +1015 +1016 +1017 +1018 +1019 +1020 +1021 +1022 +1023 +1024 +1025 +1026 +1027 +1028 +1029 +1030 +1031 +1032 +1033 +1034 +1035 +1036 +1037 +1038 +1039 +1040 +1041 +1042 +1043 +1044 +1045 +1046 +1047 +1048 +1049 +1050 +1051 +1052 +1053 +1054 +1055 +1056 +1057 +1058 +1059 +1060 +1061 +1062 +1063 +1064 +1065 +1066 +1067 +1068 +1069 +1070 +1071 +1072 +1073 +1074 +1075 +1076 +1077 +1078 +1079 +1080 +1081 +1082 +1083 +1084 +1085 +1086 +1087 +1088 +1089 +1090 +1091 +1092 +1093 +1094 +1095 +1096 +1097 +1098 +1099 +1100 +1101 +1102 +1103 +1104 +1105 +1106 +1107 +1108 +1109 +1110 +1111 +1112 +1113 +1114 +1115 +1116 +1117 +1118 +1119 +1120 +1121 +1122 +1123 +1124 +1125 +1126 +1127 +1128 +1129 +1130 +1131 +1132 +1133 +1134 +1135 +1136 +1137 +1138 +1139 +1140 +1141 +1142 +1143 +1144 +1145 +1146 +1147 +1148 +1149 +1150 +1151 +1152 +1153 +1154 +1155 +1156 +1157 +1158 +1159 +1160 +1161 +1162 +1163 +1164 +1165 +1166 +1167 +1168 +1169 +1170 +1171 +1172 +1173 +1174 +1175 +1176 +1177 +1178 +1179 +1180 +1181 +1182 +1183 +1184 +1185 +1186 +1187 +1188 +1189 +1190 +1191 +1192 +1193 +1194 +1195 +1196 +1197 +1198 +1199 +1200 +1201 +1202 +1203 +1204 +1205 +1206 +1207 +1208 +1209 +1210 +1211 +1212 +1213 +1214 +1215 +1216 +1217 +1218 +1219 +1220 +1221 +1222 +1223 +1224 +1225 +1226 +1227 +1228 +1229 +1230 +1231 +1232 +1233 +1234 +1235 +1236 +1237 +1238 +1239 +1240 +1241 +1242 +1243 +1244 +1245 +1246 +1247 +1248 +1249 +1250 +1251 +1252 +1253 +1254 +1255 +1256 +1257 +1258 +1259 +1260 +1261 +1262 +1263 +1264 +1265 +1266 +1267 +1268 +1269 +1270 +1271 +1272 +1273 +1274 +1275 +1276 +1277 +1278 +1279 +1280 +1281 +1282 +1283 +1284 +1285 +1286 +1287 +1288 +1289 +1290 +1291 +1292 +1293 +1294 +1295 +1296 +1297 +1298 +1299 +1300 +1301 +1302 +1303 +1304 +1305 +1306 +1307 +1308 +1309 +1310 +1311 +1312 +1313 +1314 +1315 +1316 +1317 +1318 +1319 +1320 +1321 +1322 +1323 +1324 +1325 +1326 +1327 +1328 +1329 +1330 +1331 +1332 +1333 +1334 +1335 +1336 +1337 +1338 +1339 +1340 +1341 +1342 +1343 +1344 +1345 +1346 +1347 +1348 +1349 +1350 +1351 +
use core::iter::FromIterator;
+use core::ops::{Deref, RangeBounds};
+use core::{cmp, fmt, hash, mem, ptr, slice, usize};
+
+use alloc::{
+ alloc::{dealloc, Layout},
+ borrow::Borrow,
+ boxed::Box,
+ string::String,
+ vec::Vec,
+};
+
+use crate::buf::IntoIter;
+#[allow(unused)]
+use crate::loom::sync::atomic::AtomicMut;
+use crate::loom::sync::atomic::{AtomicPtr, AtomicUsize, Ordering};
+use crate::Buf;
+
+/// A cheaply cloneable and sliceable chunk of contiguous memory.
+///
+/// `Bytes` is an efficient container for storing and operating on contiguous
+/// slices of memory. It is intended for use primarily in networking code, but
+/// could have applications elsewhere as well.
+///
+/// `Bytes` values facilitate zero-copy network programming by allowing multiple
+/// `Bytes` objects to point to the same underlying memory.
+///
+/// `Bytes` does not have a single implementation. It is an interface, whose
+/// exact behavior is implemented through dynamic dispatch in several underlying
+/// implementations of `Bytes`.
+///
+/// All `Bytes` implementations must fulfill the following requirements:
+/// - They are cheaply cloneable and thereby shareable between an unlimited amount
+/// of components, for example by modifying a reference count.
+/// - Instances can be sliced to refer to a subset of the original buffer.
+///
+/// ```
+/// use bytes::Bytes;
+///
+/// let mut mem = Bytes::from("Hello world");
+/// let a = mem.slice(0..5);
+///
+/// assert_eq!(a, "Hello");
+///
+/// let b = mem.split_to(6);
+///
+/// assert_eq!(mem, "world");
+/// assert_eq!(b, "Hello ");
+/// ```
+///
+/// # Memory layout
+///
+/// The `Bytes` struct itself is fairly small, limited to 4 `usize` fields used
+/// to track information about which segment of the underlying memory the
+/// `Bytes` handle has access to.
+///
+/// `Bytes` keeps both a pointer to the shared state containing the full memory
+/// slice and a pointer to the start of the region visible by the handle.
+/// `Bytes` also tracks the length of its view into the memory.
+///
+/// # Sharing
+///
+/// `Bytes` contains a vtable, which allows implementations of `Bytes` to define
+/// how sharing/cloning is implemented in detail.
+/// When `Bytes::clone()` is called, `Bytes` will call the vtable function for
+/// cloning the backing storage in order to share it behind multiple `Bytes`
+/// instances.
+///
+/// For `Bytes` implementations which refer to constant memory (e.g. created
+/// via `Bytes::from_static()`) the cloning implementation will be a no-op.
+///
+/// For `Bytes` implementations which point to a reference counted shared storage
+/// (e.g. an `Arc<[u8]>`), sharing will be implemented by increasing the
+/// reference count.
+///
+/// Due to this mechanism, multiple `Bytes` instances may point to the same
+/// shared memory region.
+/// Each `Bytes` instance can point to different sections within that
+/// memory region, and `Bytes` instances may or may not have overlapping views
+/// into the memory.
+///
+/// The following diagram visualizes a scenario where 2 `Bytes` instances make
+/// use of an `Arc`-based backing storage, and provide access to different views:
+///
+/// ```text
+///
+/// Arc ptrs ┌─────────┐
+/// ________________________ / │ Bytes 2 │
+/// / └─────────┘
+/// / ┌───────────┐ | |
+/// |_________/ │ Bytes 1 │ | |
+/// | └───────────┘ | |
+/// | | | ___/ data | tail
+/// | data | tail |/ |
+/// v v v v
+/// ┌─────┬─────┬───────────┬───────────────┬─────┐
+/// │ Arc │ │ │ │ │
+/// └─────┴─────┴───────────┴───────────────┴─────┘
+/// ```
+pub struct Bytes {
+ ptr: *const u8,
+ len: usize,
+ // inlined "trait object"
+ data: AtomicPtr<()>,
+ vtable: &'static Vtable,
+}
+
+pub(crate) struct Vtable {
+ /// fn(data, ptr, len)
+ pub clone: unsafe fn(&AtomicPtr<()>, *const u8, usize) -> Bytes,
+ /// fn(data, ptr, len)
+ ///
+ /// takes `Bytes` to value
+ pub to_vec: unsafe fn(&AtomicPtr<()>, *const u8, usize) -> Vec<u8>,
+ /// fn(data)
+ pub is_unique: unsafe fn(&AtomicPtr<()>) -> bool,
+ /// fn(data, ptr, len)
+ pub drop: unsafe fn(&mut AtomicPtr<()>, *const u8, usize),
+}
+
+impl Bytes {
+ /// Creates a new empty `Bytes`.
+ ///
+ /// This will not allocate and the returned `Bytes` handle will be empty.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::Bytes;
+ ///
+ /// let b = Bytes::new();
+ /// assert_eq!(&b[..], b"");
+ /// ```
+ #[inline]
+ #[cfg(not(all(loom, test)))]
+ pub const fn new() -> Self {
+ // Make it a named const to work around
+ // "unsizing casts are not allowed in const fn"
+ const EMPTY: &[u8] = &[];
+ Bytes::from_static(EMPTY)
+ }
+
+ #[cfg(all(loom, test))]
+ pub fn new() -> Self {
+ const EMPTY: &[u8] = &[];
+ Bytes::from_static(EMPTY)
+ }
+
+ /// Creates a new `Bytes` from a static slice.
+ ///
+ /// The returned `Bytes` will point directly to the static slice. There is
+ /// no allocating or copying.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::Bytes;
+ ///
+ /// let b = Bytes::from_static(b"hello");
+ /// assert_eq!(&b[..], b"hello");
+ /// ```
+ #[inline]
+ #[cfg(not(all(loom, test)))]
+ pub const fn from_static(bytes: &'static [u8]) -> Self {
+ Bytes {
+ ptr: bytes.as_ptr(),
+ len: bytes.len(),
+ data: AtomicPtr::new(ptr::null_mut()),
+ vtable: &STATIC_VTABLE,
+ }
+ }
+
+ #[cfg(all(loom, test))]
+ pub fn from_static(bytes: &'static [u8]) -> Self {
+ Bytes {
+ ptr: bytes.as_ptr(),
+ len: bytes.len(),
+ data: AtomicPtr::new(ptr::null_mut()),
+ vtable: &STATIC_VTABLE,
+ }
+ }
+
+ /// Returns the number of bytes contained in this `Bytes`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::Bytes;
+ ///
+ /// let b = Bytes::from(&b"hello"[..]);
+ /// assert_eq!(b.len(), 5);
+ /// ```
+ #[inline]
+ pub const fn len(&self) -> usize {
+ self.len
+ }
+
+ /// Returns true if the `Bytes` has a length of 0.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::Bytes;
+ ///
+ /// let b = Bytes::new();
+ /// assert!(b.is_empty());
+ /// ```
+ #[inline]
+ pub const fn is_empty(&self) -> bool {
+ self.len == 0
+ }
+
+ /// Returns true if this is the only reference to the data.
+ ///
+ /// Always returns false if the data is backed by a static slice.
+ ///
+ /// The result of this method may be invalidated immediately if another
+ /// thread clones this value while this is being called. Ensure you have
+ /// unique access to this value (`&mut Bytes`) first if you need to be
+ /// certain the result is valid (i.e. for safety reasons)
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::Bytes;
+ ///
+ /// let a = Bytes::from(vec![1, 2, 3]);
+ /// assert!(a.is_unique());
+ /// let b = a.clone();
+ /// assert!(!a.is_unique());
+ /// ```
+ pub fn is_unique(&self) -> bool {
+ unsafe { (self.vtable.is_unique)(&self.data) }
+ }
+
+ /// Creates `Bytes` instance from slice, by copying it.
+ pub fn copy_from_slice(data: &[u8]) -> Self {
+ data.to_vec().into()
+ }
+
+ /// Returns a slice of self for the provided range.
+ ///
+ /// This will increment the reference count for the underlying memory and
+ /// return a new `Bytes` handle set to the slice.
+ ///
+ /// This operation is `O(1)`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::Bytes;
+ ///
+ /// let a = Bytes::from(&b"hello world"[..]);
+ /// let b = a.slice(2..5);
+ ///
+ /// assert_eq!(&b[..], b"llo");
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// Requires that `begin <= end` and `end <= self.len()`, otherwise slicing
+ /// will panic.
+ pub fn slice(&self, range: impl RangeBounds<usize>) -> Self {
+ use core::ops::Bound;
+
+ let len = self.len();
+
+ let begin = match range.start_bound() {
+ Bound::Included(&n) => n,
+ Bound::Excluded(&n) => n.checked_add(1).expect("out of range"),
+ Bound::Unbounded => 0,
+ };
+
+ let end = match range.end_bound() {
+ Bound::Included(&n) => n.checked_add(1).expect("out of range"),
+ Bound::Excluded(&n) => n,
+ Bound::Unbounded => len,
+ };
+
+ assert!(
+ begin <= end,
+ "range start must not be greater than end: {:?} <= {:?}",
+ begin,
+ end,
+ );
+ assert!(
+ end <= len,
+ "range end out of bounds: {:?} <= {:?}",
+ end,
+ len,
+ );
+
+ if end == begin {
+ return Bytes::new();
+ }
+
+ let mut ret = self.clone();
+
+ ret.len = end - begin;
+ ret.ptr = unsafe { ret.ptr.add(begin) };
+
+ ret
+ }
+
+ /// Returns a slice of self that is equivalent to the given `subset`.
+ ///
+ /// When processing a `Bytes` buffer with other tools, one often gets a
+ /// `&[u8]` which is in fact a slice of the `Bytes`, i.e. a subset of it.
+ /// This function turns that `&[u8]` into another `Bytes`, as if one had
+ /// called `self.slice()` with the offsets that correspond to `subset`.
+ ///
+ /// This operation is `O(1)`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::Bytes;
+ ///
+ /// let bytes = Bytes::from(&b"012345678"[..]);
+ /// let as_slice = bytes.as_ref();
+ /// let subset = &as_slice[2..6];
+ /// let subslice = bytes.slice_ref(&subset);
+ /// assert_eq!(&subslice[..], b"2345");
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// Requires that the given `sub` slice is in fact contained within the
+ /// `Bytes` buffer; otherwise this function will panic.
+ pub fn slice_ref(&self, subset: &[u8]) -> Self {
+ // Empty slice and empty Bytes may have their pointers reset
+ // so explicitly allow empty slice to be a subslice of any slice.
+ if subset.is_empty() {
+ return Bytes::new();
+ }
+
+ let bytes_p = self.as_ptr() as usize;
+ let bytes_len = self.len();
+
+ let sub_p = subset.as_ptr() as usize;
+ let sub_len = subset.len();
+
+ assert!(
+ sub_p >= bytes_p,
+ "subset pointer ({:p}) is smaller than self pointer ({:p})",
+ subset.as_ptr(),
+ self.as_ptr(),
+ );
+ assert!(
+ sub_p + sub_len <= bytes_p + bytes_len,
+ "subset is out of bounds: self = ({:p}, {}), subset = ({:p}, {})",
+ self.as_ptr(),
+ bytes_len,
+ subset.as_ptr(),
+ sub_len,
+ );
+
+ let sub_offset = sub_p - bytes_p;
+
+ self.slice(sub_offset..(sub_offset + sub_len))
+ }
+
+ /// Splits the bytes into two at the given index.
+ ///
+ /// Afterwards `self` contains elements `[0, at)`, and the returned `Bytes`
+ /// contains elements `[at, len)`.
+ ///
+ /// This is an `O(1)` operation that just increases the reference count and
+ /// sets a few indices.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::Bytes;
+ ///
+ /// let mut a = Bytes::from(&b"hello world"[..]);
+ /// let b = a.split_off(5);
+ ///
+ /// assert_eq!(&a[..], b"hello");
+ /// assert_eq!(&b[..], b" world");
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// Panics if `at > len`.
+ #[must_use = "consider Bytes::truncate if you don't need the other half"]
+ pub fn split_off(&mut self, at: usize) -> Self {
+ assert!(
+ at <= self.len(),
+ "split_off out of bounds: {:?} <= {:?}",
+ at,
+ self.len(),
+ );
+
+ if at == self.len() {
+ return Bytes::new();
+ }
+
+ if at == 0 {
+ return mem::replace(self, Bytes::new());
+ }
+
+ let mut ret = self.clone();
+
+ self.len = at;
+
+ unsafe { ret.inc_start(at) };
+
+ ret
+ }
+
+ /// Splits the bytes into two at the given index.
+ ///
+ /// Afterwards `self` contains elements `[at, len)`, and the returned
+ /// `Bytes` contains elements `[0, at)`.
+ ///
+ /// This is an `O(1)` operation that just increases the reference count and
+ /// sets a few indices.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::Bytes;
+ ///
+ /// let mut a = Bytes::from(&b"hello world"[..]);
+ /// let b = a.split_to(5);
+ ///
+ /// assert_eq!(&a[..], b" world");
+ /// assert_eq!(&b[..], b"hello");
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// Panics if `at > len`.
+ #[must_use = "consider Bytes::advance if you don't need the other half"]
+ pub fn split_to(&mut self, at: usize) -> Self {
+ assert!(
+ at <= self.len(),
+ "split_to out of bounds: {:?} <= {:?}",
+ at,
+ self.len(),
+ );
+
+ if at == self.len() {
+ return mem::replace(self, Bytes::new());
+ }
+
+ if at == 0 {
+ return Bytes::new();
+ }
+
+ let mut ret = self.clone();
+
+ unsafe { self.inc_start(at) };
+
+ ret.len = at;
+ ret
+ }
+
+ /// Shortens the buffer, keeping the first `len` bytes and dropping the
+ /// rest.
+ ///
+ /// If `len` is greater than the buffer's current length, this has no
+ /// effect.
+ ///
+ /// The [split_off](`Self::split_off()`) method can emulate `truncate`, but this causes the
+ /// excess bytes to be returned instead of dropped.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::Bytes;
+ ///
+ /// let mut buf = Bytes::from(&b"hello world"[..]);
+ /// buf.truncate(5);
+ /// assert_eq!(buf, b"hello"[..]);
+ /// ```
+ #[inline]
+ pub fn truncate(&mut self, len: usize) {
+ if len < self.len {
+ // The Vec "promotable" vtables do not store the capacity,
+ // so we cannot truncate while using this repr. We *have* to
+ // promote using `split_off` so the capacity can be stored.
+ if self.vtable as *const Vtable == &PROMOTABLE_EVEN_VTABLE
+ || self.vtable as *const Vtable == &PROMOTABLE_ODD_VTABLE
+ {
+ drop(self.split_off(len));
+ } else {
+ self.len = len;
+ }
+ }
+ }
+
+ /// Clears the buffer, removing all data.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::Bytes;
+ ///
+ /// let mut buf = Bytes::from(&b"hello world"[..]);
+ /// buf.clear();
+ /// assert!(buf.is_empty());
+ /// ```
+ #[inline]
+ pub fn clear(&mut self) {
+ self.truncate(0);
+ }
+
+ #[inline]
+ pub(crate) unsafe fn with_vtable(
+ ptr: *const u8,
+ len: usize,
+ data: AtomicPtr<()>,
+ vtable: &'static Vtable,
+ ) -> Bytes {
+ Bytes {
+ ptr,
+ len,
+ data,
+ vtable,
+ }
+ }
+
+ // private
+
+ #[inline]
+ fn as_slice(&self) -> &[u8] {
+ unsafe { slice::from_raw_parts(self.ptr, self.len) }
+ }
+
+ #[inline]
+ unsafe fn inc_start(&mut self, by: usize) {
+ // should already be asserted, but debug assert for tests
+ debug_assert!(self.len >= by, "internal: inc_start out of bounds");
+ self.len -= by;
+ self.ptr = self.ptr.add(by);
+ }
+}
+
+// Vtable must enforce this behavior
+unsafe impl Send for Bytes {}
+unsafe impl Sync for Bytes {}
+
+impl Drop for Bytes {
+ #[inline]
+ fn drop(&mut self) {
+ unsafe { (self.vtable.drop)(&mut self.data, self.ptr, self.len) }
+ }
+}
+
+impl Clone for Bytes {
+ #[inline]
+ fn clone(&self) -> Bytes {
+ unsafe { (self.vtable.clone)(&self.data, self.ptr, self.len) }
+ }
+}
+
+impl Buf for Bytes {
+ #[inline]
+ fn remaining(&self) -> usize {
+ self.len()
+ }
+
+ #[inline]
+ fn chunk(&self) -> &[u8] {
+ self.as_slice()
+ }
+
+ #[inline]
+ fn advance(&mut self, cnt: usize) {
+ assert!(
+ cnt <= self.len(),
+ "cannot advance past `remaining`: {:?} <= {:?}",
+ cnt,
+ self.len(),
+ );
+
+ unsafe {
+ self.inc_start(cnt);
+ }
+ }
+
+ fn copy_to_bytes(&mut self, len: usize) -> Self {
+ if len == self.remaining() {
+ core::mem::replace(self, Bytes::new())
+ } else {
+ let ret = self.slice(..len);
+ self.advance(len);
+ ret
+ }
+ }
+}
+
+impl Deref for Bytes {
+ type Target = [u8];
+
+ #[inline]
+ fn deref(&self) -> &[u8] {
+ self.as_slice()
+ }
+}
+
+impl AsRef<[u8]> for Bytes {
+ #[inline]
+ fn as_ref(&self) -> &[u8] {
+ self.as_slice()
+ }
+}
+
+impl hash::Hash for Bytes {
+ fn hash<H>(&self, state: &mut H)
+ where
+ H: hash::Hasher,
+ {
+ self.as_slice().hash(state);
+ }
+}
+
+impl Borrow<[u8]> for Bytes {
+ fn borrow(&self) -> &[u8] {
+ self.as_slice()
+ }
+}
+
+impl IntoIterator for Bytes {
+ type Item = u8;
+ type IntoIter = IntoIter<Bytes>;
+
+ fn into_iter(self) -> Self::IntoIter {
+ IntoIter::new(self)
+ }
+}
+
+impl<'a> IntoIterator for &'a Bytes {
+ type Item = &'a u8;
+ type IntoIter = core::slice::Iter<'a, u8>;
+
+ fn into_iter(self) -> Self::IntoIter {
+ self.as_slice().iter()
+ }
+}
+
+impl FromIterator<u8> for Bytes {
+ fn from_iter<T: IntoIterator<Item = u8>>(into_iter: T) -> Self {
+ Vec::from_iter(into_iter).into()
+ }
+}
+
+// impl Eq
+
+impl PartialEq for Bytes {
+ fn eq(&self, other: &Bytes) -> bool {
+ self.as_slice() == other.as_slice()
+ }
+}
+
+impl PartialOrd for Bytes {
+ fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
+ self.as_slice().partial_cmp(other.as_slice())
+ }
+}
+
+impl Ord for Bytes {
+ fn cmp(&self, other: &Bytes) -> cmp::Ordering {
+ self.as_slice().cmp(other.as_slice())
+ }
+}
+
+impl Eq for Bytes {}
+
+impl PartialEq<[u8]> for Bytes {
+ fn eq(&self, other: &[u8]) -> bool {
+ self.as_slice() == other
+ }
+}
+
+impl PartialOrd<[u8]> for Bytes {
+ fn partial_cmp(&self, other: &[u8]) -> Option<cmp::Ordering> {
+ self.as_slice().partial_cmp(other)
+ }
+}
+
+impl PartialEq<Bytes> for [u8] {
+ fn eq(&self, other: &Bytes) -> bool {
+ *other == *self
+ }
+}
+
+impl PartialOrd<Bytes> for [u8] {
+ fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
+ <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other)
+ }
+}
+
+impl PartialEq<str> for Bytes {
+ fn eq(&self, other: &str) -> bool {
+ self.as_slice() == other.as_bytes()
+ }
+}
+
+impl PartialOrd<str> for Bytes {
+ fn partial_cmp(&self, other: &str) -> Option<cmp::Ordering> {
+ self.as_slice().partial_cmp(other.as_bytes())
+ }
+}
+
+impl PartialEq<Bytes> for str {
+ fn eq(&self, other: &Bytes) -> bool {
+ *other == *self
+ }
+}
+
+impl PartialOrd<Bytes> for str {
+ fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
+ <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other)
+ }
+}
+
+impl PartialEq<Vec<u8>> for Bytes {
+ fn eq(&self, other: &Vec<u8>) -> bool {
+ *self == other[..]
+ }
+}
+
+impl PartialOrd<Vec<u8>> for Bytes {
+ fn partial_cmp(&self, other: &Vec<u8>) -> Option<cmp::Ordering> {
+ self.as_slice().partial_cmp(&other[..])
+ }
+}
+
+impl PartialEq<Bytes> for Vec<u8> {
+ fn eq(&self, other: &Bytes) -> bool {
+ *other == *self
+ }
+}
+
+impl PartialOrd<Bytes> for Vec<u8> {
+ fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
+ <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other)
+ }
+}
+
+impl PartialEq<String> for Bytes {
+ fn eq(&self, other: &String) -> bool {
+ *self == other[..]
+ }
+}
+
+impl PartialOrd<String> for Bytes {
+ fn partial_cmp(&self, other: &String) -> Option<cmp::Ordering> {
+ self.as_slice().partial_cmp(other.as_bytes())
+ }
+}
+
+impl PartialEq<Bytes> for String {
+ fn eq(&self, other: &Bytes) -> bool {
+ *other == *self
+ }
+}
+
+impl PartialOrd<Bytes> for String {
+ fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
+ <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other)
+ }
+}
+
+impl PartialEq<Bytes> for &[u8] {
+ fn eq(&self, other: &Bytes) -> bool {
+ *other == *self
+ }
+}
+
+impl PartialOrd<Bytes> for &[u8] {
+ fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
+ <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other)
+ }
+}
+
+impl PartialEq<Bytes> for &str {
+ fn eq(&self, other: &Bytes) -> bool {
+ *other == *self
+ }
+}
+
+impl PartialOrd<Bytes> for &str {
+ fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
+ <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other)
+ }
+}
+
+impl<'a, T: ?Sized> PartialEq<&'a T> for Bytes
+where
+ Bytes: PartialEq<T>,
+{
+ fn eq(&self, other: &&'a T) -> bool {
+ *self == **other
+ }
+}
+
+impl<'a, T: ?Sized> PartialOrd<&'a T> for Bytes
+where
+ Bytes: PartialOrd<T>,
+{
+ fn partial_cmp(&self, other: &&'a T) -> Option<cmp::Ordering> {
+ self.partial_cmp(&**other)
+ }
+}
+
+// impl From
+
+impl Default for Bytes {
+ #[inline]
+ fn default() -> Bytes {
+ Bytes::new()
+ }
+}
+
+impl From<&'static [u8]> for Bytes {
+ fn from(slice: &'static [u8]) -> Bytes {
+ Bytes::from_static(slice)
+ }
+}
+
+impl From<&'static str> for Bytes {
+ fn from(slice: &'static str) -> Bytes {
+ Bytes::from_static(slice.as_bytes())
+ }
+}
+
+impl From<Vec<u8>> for Bytes {
+ fn from(mut vec: Vec<u8>) -> Bytes {
+ let ptr = vec.as_mut_ptr();
+ let len = vec.len();
+ let cap = vec.capacity();
+
+ // Avoid an extra allocation if possible.
+ if len == cap {
+ return Bytes::from(vec.into_boxed_slice());
+ }
+
+ let shared = Box::new(Shared {
+ buf: ptr,
+ cap,
+ ref_cnt: AtomicUsize::new(1),
+ });
+ mem::forget(vec);
+
+ let shared = Box::into_raw(shared);
+ // The pointer should be aligned, so this assert should
+ // always succeed.
+ debug_assert!(
+ 0 == (shared as usize & KIND_MASK),
+ "internal: Box<Shared> should have an aligned pointer",
+ );
+ Bytes {
+ ptr,
+ len,
+ data: AtomicPtr::new(shared as _),
+ vtable: &SHARED_VTABLE,
+ }
+ }
+}
+
+impl From<Box<[u8]>> for Bytes {
+ fn from(slice: Box<[u8]>) -> Bytes {
+ // Box<[u8]> doesn't contain a heap allocation for empty slices,
+ // so the pointer isn't aligned enough for the KIND_VEC stashing to
+ // work.
+ if slice.is_empty() {
+ return Bytes::new();
+ }
+
+ let len = slice.len();
+ let ptr = Box::into_raw(slice) as *mut u8;
+
+ if ptr as usize & 0x1 == 0 {
+ let data = ptr_map(ptr, |addr| addr | KIND_VEC);
+ Bytes {
+ ptr,
+ len,
+ data: AtomicPtr::new(data.cast()),
+ vtable: &PROMOTABLE_EVEN_VTABLE,
+ }
+ } else {
+ Bytes {
+ ptr,
+ len,
+ data: AtomicPtr::new(ptr.cast()),
+ vtable: &PROMOTABLE_ODD_VTABLE,
+ }
+ }
+ }
+}
+
+impl From<String> for Bytes {
+ fn from(s: String) -> Bytes {
+ Bytes::from(s.into_bytes())
+ }
+}
+
+impl From<Bytes> for Vec<u8> {
+ fn from(bytes: Bytes) -> Vec<u8> {
+ let bytes = mem::ManuallyDrop::new(bytes);
+ unsafe { (bytes.vtable.to_vec)(&bytes.data, bytes.ptr, bytes.len) }
+ }
+}
+
+// ===== impl Vtable =====
+
+impl fmt::Debug for Vtable {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Vtable")
+ .field("clone", &(self.clone as *const ()))
+ .field("drop", &(self.drop as *const ()))
+ .finish()
+ }
+}
+
+// ===== impl StaticVtable =====
+
+const STATIC_VTABLE: Vtable = Vtable {
+ clone: static_clone,
+ to_vec: static_to_vec,
+ is_unique: static_is_unique,
+ drop: static_drop,
+};
+
+unsafe fn static_clone(_: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
+ let slice = slice::from_raw_parts(ptr, len);
+ Bytes::from_static(slice)
+}
+
+unsafe fn static_to_vec(_: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
+ let slice = slice::from_raw_parts(ptr, len);
+ slice.to_vec()
+}
+
+fn static_is_unique(_: &AtomicPtr<()>) -> bool {
+ false
+}
+
+unsafe fn static_drop(_: &mut AtomicPtr<()>, _: *const u8, _: usize) {
+ // nothing to drop for &'static [u8]
+}
+
+// ===== impl PromotableVtable =====
+
+static PROMOTABLE_EVEN_VTABLE: Vtable = Vtable {
+ clone: promotable_even_clone,
+ to_vec: promotable_even_to_vec,
+ is_unique: promotable_is_unique,
+ drop: promotable_even_drop,
+};
+
+static PROMOTABLE_ODD_VTABLE: Vtable = Vtable {
+ clone: promotable_odd_clone,
+ to_vec: promotable_odd_to_vec,
+ is_unique: promotable_is_unique,
+ drop: promotable_odd_drop,
+};
+
+unsafe fn promotable_even_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
+ let shared = data.load(Ordering::Acquire);
+ let kind = shared as usize & KIND_MASK;
+
+ if kind == KIND_ARC {
+ shallow_clone_arc(shared.cast(), ptr, len)
+ } else {
+ debug_assert_eq!(kind, KIND_VEC);
+ let buf = ptr_map(shared.cast(), |addr| addr & !KIND_MASK);
+ shallow_clone_vec(data, shared, buf, ptr, len)
+ }
+}
+
+unsafe fn promotable_to_vec(
+ data: &AtomicPtr<()>,
+ ptr: *const u8,
+ len: usize,
+ f: fn(*mut ()) -> *mut u8,
+) -> Vec<u8> {
+ let shared = data.load(Ordering::Acquire);
+ let kind = shared as usize & KIND_MASK;
+
+ if kind == KIND_ARC {
+ shared_to_vec_impl(shared.cast(), ptr, len)
+ } else {
+ // If Bytes holds a Vec, then the offset must be 0.
+ debug_assert_eq!(kind, KIND_VEC);
+
+ let buf = f(shared);
+
+ let cap = (ptr as usize - buf as usize) + len;
+
+ // Copy back buffer
+ ptr::copy(ptr, buf, len);
+
+ Vec::from_raw_parts(buf, len, cap)
+ }
+}
+
+unsafe fn promotable_even_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
+ promotable_to_vec(data, ptr, len, |shared| {
+ ptr_map(shared.cast(), |addr| addr & !KIND_MASK)
+ })
+}
+
+unsafe fn promotable_even_drop(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) {
+ data.with_mut(|shared| {
+ let shared = *shared;
+ let kind = shared as usize & KIND_MASK;
+
+ if kind == KIND_ARC {
+ release_shared(shared.cast());
+ } else {
+ debug_assert_eq!(kind, KIND_VEC);
+ let buf = ptr_map(shared.cast(), |addr| addr & !KIND_MASK);
+ free_boxed_slice(buf, ptr, len);
+ }
+ });
+}
+
+unsafe fn promotable_odd_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
+ let shared = data.load(Ordering::Acquire);
+ let kind = shared as usize & KIND_MASK;
+
+ if kind == KIND_ARC {
+ shallow_clone_arc(shared as _, ptr, len)
+ } else {
+ debug_assert_eq!(kind, KIND_VEC);
+ shallow_clone_vec(data, shared, shared.cast(), ptr, len)
+ }
+}
+
+unsafe fn promotable_odd_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
+ promotable_to_vec(data, ptr, len, |shared| shared.cast())
+}
+
+unsafe fn promotable_odd_drop(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) {
+ data.with_mut(|shared| {
+ let shared = *shared;
+ let kind = shared as usize & KIND_MASK;
+
+ if kind == KIND_ARC {
+ release_shared(shared.cast());
+ } else {
+ debug_assert_eq!(kind, KIND_VEC);
+
+ free_boxed_slice(shared.cast(), ptr, len);
+ }
+ });
+}
+
+unsafe fn promotable_is_unique(data: &AtomicPtr<()>) -> bool {
+ let shared = data.load(Ordering::Acquire);
+ let kind = shared as usize & KIND_MASK;
+
+ if kind == KIND_ARC {
+ let ref_cnt = (*shared.cast::<Shared>()).ref_cnt.load(Ordering::Relaxed);
+ ref_cnt == 1
+ } else {
+ true
+ }
+}
+
+unsafe fn free_boxed_slice(buf: *mut u8, offset: *const u8, len: usize) {
+ let cap = (offset as usize - buf as usize) + len;
+ dealloc(buf, Layout::from_size_align(cap, 1).unwrap())
+}
+
+// ===== impl SharedVtable =====
+
+struct Shared {
+ // Holds arguments to dealloc upon Drop, but otherwise doesn't use them
+ buf: *mut u8,
+ cap: usize,
+ ref_cnt: AtomicUsize,
+}
+
+impl Drop for Shared {
+ fn drop(&mut self) {
+ unsafe { dealloc(self.buf, Layout::from_size_align(self.cap, 1).unwrap()) }
+ }
+}
+
+// Assert that the alignment of `Shared` is divisible by 2.
+// This is a necessary invariant since we depend on allocating `Shared` a
+// shared object to implicitly carry the `KIND_ARC` flag in its pointer.
+// This flag is set when the LSB is 0.
+const _: [(); 0 - mem::align_of::<Shared>() % 2] = []; // Assert that the alignment of `Shared` is divisible by 2.
+
+static SHARED_VTABLE: Vtable = Vtable {
+ clone: shared_clone,
+ to_vec: shared_to_vec,
+ is_unique: shared_is_unique,
+ drop: shared_drop,
+};
+
+const KIND_ARC: usize = 0b0;
+const KIND_VEC: usize = 0b1;
+const KIND_MASK: usize = 0b1;
+
+unsafe fn shared_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
+ let shared = data.load(Ordering::Relaxed);
+ shallow_clone_arc(shared as _, ptr, len)
+}
+
+unsafe fn shared_to_vec_impl(shared: *mut Shared, ptr: *const u8, len: usize) -> Vec<u8> {
+ // Check that the ref_cnt is 1 (unique).
+ //
+ // If it is unique, then it is set to 0 with AcqRel fence for the same
+ // reason in release_shared.
+ //
+ // Otherwise, we take the other branch and call release_shared.
+ if (*shared)
+ .ref_cnt
+ .compare_exchange(1, 0, Ordering::AcqRel, Ordering::Relaxed)
+ .is_ok()
+ {
+ let buf = (*shared).buf;
+ let cap = (*shared).cap;
+
+ // Deallocate Shared
+ drop(Box::from_raw(shared as *mut mem::ManuallyDrop<Shared>));
+
+ // Copy back buffer
+ ptr::copy(ptr, buf, len);
+
+ Vec::from_raw_parts(buf, len, cap)
+ } else {
+ let v = slice::from_raw_parts(ptr, len).to_vec();
+ release_shared(shared);
+ v
+ }
+}
+
+unsafe fn shared_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
+ shared_to_vec_impl(data.load(Ordering::Relaxed).cast(), ptr, len)
+}
+
+pub(crate) unsafe fn shared_is_unique(data: &AtomicPtr<()>) -> bool {
+ let shared = data.load(Ordering::Acquire);
+ let ref_cnt = (*shared.cast::<Shared>()).ref_cnt.load(Ordering::Relaxed);
+ ref_cnt == 1
+}
+
+unsafe fn shared_drop(data: &mut AtomicPtr<()>, _ptr: *const u8, _len: usize) {
+ data.with_mut(|shared| {
+ release_shared(shared.cast());
+ });
+}
+
+unsafe fn shallow_clone_arc(shared: *mut Shared, ptr: *const u8, len: usize) -> Bytes {
+ let old_size = (*shared).ref_cnt.fetch_add(1, Ordering::Relaxed);
+
+ if old_size > usize::MAX >> 1 {
+ crate::abort();
+ }
+
+ Bytes {
+ ptr,
+ len,
+ data: AtomicPtr::new(shared as _),
+ vtable: &SHARED_VTABLE,
+ }
+}
+
+#[cold]
+unsafe fn shallow_clone_vec(
+ atom: &AtomicPtr<()>,
+ ptr: *const (),
+ buf: *mut u8,
+ offset: *const u8,
+ len: usize,
+) -> Bytes {
+ // If the buffer is still tracked in a `Vec<u8>`. It is time to
+ // promote the vec to an `Arc`. This could potentially be called
+ // concurrently, so some care must be taken.
+
+ // First, allocate a new `Shared` instance containing the
+ // `Vec` fields. It's important to note that `ptr`, `len`,
+ // and `cap` cannot be mutated without having `&mut self`.
+ // This means that these fields will not be concurrently
+ // updated and since the buffer hasn't been promoted to an
+ // `Arc`, those three fields still are the components of the
+ // vector.
+ let shared = Box::new(Shared {
+ buf,
+ cap: (offset as usize - buf as usize) + len,
+ // Initialize refcount to 2. One for this reference, and one
+ // for the new clone that will be returned from
+ // `shallow_clone`.
+ ref_cnt: AtomicUsize::new(2),
+ });
+
+ let shared = Box::into_raw(shared);
+
+ // The pointer should be aligned, so this assert should
+ // always succeed.
+ debug_assert!(
+ 0 == (shared as usize & KIND_MASK),
+ "internal: Box<Shared> should have an aligned pointer",
+ );
+
+ // Try compare & swapping the pointer into the `arc` field.
+ // `Release` is used synchronize with other threads that
+ // will load the `arc` field.
+ //
+ // If the `compare_exchange` fails, then the thread lost the
+ // race to promote the buffer to shared. The `Acquire`
+ // ordering will synchronize with the `compare_exchange`
+ // that happened in the other thread and the `Shared`
+ // pointed to by `actual` will be visible.
+ match atom.compare_exchange(ptr as _, shared as _, Ordering::AcqRel, Ordering::Acquire) {
+ Ok(actual) => {
+ debug_assert!(actual as usize == ptr as usize);
+ // The upgrade was successful, the new handle can be
+ // returned.
+ Bytes {
+ ptr: offset,
+ len,
+ data: AtomicPtr::new(shared as _),
+ vtable: &SHARED_VTABLE,
+ }
+ }
+ Err(actual) => {
+ // The upgrade failed, a concurrent clone happened. Release
+ // the allocation that was made in this thread, it will not
+ // be needed.
+ let shared = Box::from_raw(shared);
+ mem::forget(*shared);
+
+ // Buffer already promoted to shared storage, so increment ref
+ // count.
+ shallow_clone_arc(actual as _, offset, len)
+ }
+ }
+}
+
+unsafe fn release_shared(ptr: *mut Shared) {
+ // `Shared` storage... follow the drop steps from Arc.
+ if (*ptr).ref_cnt.fetch_sub(1, Ordering::Release) != 1 {
+ return;
+ }
+
+ // This fence is needed to prevent reordering of use of the data and
+ // deletion of the data. Because it is marked `Release`, the decreasing
+ // of the reference count synchronizes with this `Acquire` fence. This
+ // means that use of the data happens before decreasing the reference
+ // count, which happens before this fence, which happens before the
+ // deletion of the data.
+ //
+ // As explained in the [Boost documentation][1],
+ //
+ // > It is important to enforce any possible access to the object in one
+ // > thread (through an existing reference) to *happen before* deleting
+ // > the object in a different thread. This is achieved by a "release"
+ // > operation after dropping a reference (any access to the object
+ // > through this reference must obviously happened before), and an
+ // > "acquire" operation before deleting the object.
+ //
+ // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
+ //
+ // Thread sanitizer does not support atomic fences. Use an atomic load
+ // instead.
+ (*ptr).ref_cnt.load(Ordering::Acquire);
+
+ // Drop the data
+ drop(Box::from_raw(ptr));
+}
+
+// Ideally we would always use this version of `ptr_map` since it is strict
+// provenance compatible, but it results in worse codegen. We will however still
+// use it on miri because it gives better diagnostics for people who test bytes
+// code with miri.
+//
+// See https://github.com/tokio-rs/bytes/pull/545 for more info.
+#[cfg(miri)]
+fn ptr_map<F>(ptr: *mut u8, f: F) -> *mut u8
+where
+ F: FnOnce(usize) -> usize,
+{
+ let old_addr = ptr as usize;
+ let new_addr = f(old_addr);
+ let diff = new_addr.wrapping_sub(old_addr);
+ ptr.wrapping_add(diff)
+}
+
+#[cfg(not(miri))]
+fn ptr_map<F>(ptr: *mut u8, f: F) -> *mut u8
+where
+ F: FnOnce(usize) -> usize,
+{
+ let old_addr = ptr as usize;
+ let new_addr = f(old_addr);
+ new_addr as *mut u8
+}
+
+// compile-fails
+
+/// ```compile_fail
+/// use bytes::Bytes;
+/// #[deny(unused_must_use)]
+/// {
+/// let mut b1 = Bytes::from("hello world");
+/// b1.split_to(6);
+/// }
+/// ```
+fn _split_to_must_use() {}
+
+/// ```compile_fail
+/// use bytes::Bytes;
+/// #[deny(unused_must_use)]
+/// {
+/// let mut b1 = Bytes::from("hello world");
+/// b1.split_off(6);
+/// }
+/// ```
+fn _split_off_must_use() {}
+
+// fuzz tests
+#[cfg(all(test, loom))]
+mod fuzz {
+ use loom::sync::Arc;
+ use loom::thread;
+
+ use super::Bytes;
+ #[test]
+ fn bytes_cloning_vec() {
+ loom::model(|| {
+ let a = Bytes::from(b"abcdefgh".to_vec());
+ let addr = a.as_ptr() as usize;
+
+ // test the Bytes::clone is Sync by putting it in an Arc
+ let a1 = Arc::new(a);
+ let a2 = a1.clone();
+
+ let t1 = thread::spawn(move || {
+ let b: Bytes = (*a1).clone();
+ assert_eq!(b.as_ptr() as usize, addr);
+ });
+
+ let t2 = thread::spawn(move || {
+ let b: Bytes = (*a2).clone();
+ assert_eq!(b.as_ptr() as usize, addr);
+ });
+
+ t1.join().unwrap();
+ t2.join().unwrap();
+ });
+ }
+}
+
1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 +70 +71 +72 +73 +74 +75 +76 +77 +78 +79 +80 +81 +82 +83 +84 +85 +86 +87 +88 +89 +90 +91 +92 +93 +94 +95 +96 +97 +98 +99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 +235 +236 +237 +238 +239 +240 +241 +242 +243 +244 +245 +246 +247 +248 +249 +250 +251 +252 +253 +254 +255 +256 +257 +258 +259 +260 +261 +262 +263 +264 +265 +266 +267 +268 +269 +270 +271 +272 +273 +274 +275 +276 +277 +278 +279 +280 +281 +282 +283 +284 +285 +286 +287 +288 +289 +290 +291 +292 +293 +294 +295 +296 +297 +298 +299 +300 +301 +302 +303 +304 +305 +306 +307 +308 +309 +310 +311 +312 +313 +314 +315 +316 +317 +318 +319 +320 +321 +322 +323 +324 +325 +326 +327 +328 +329 +330 +331 +332 +333 +334 +335 +336 +337 +338 +339 +340 +341 +342 +343 +344 +345 +346 +347 +348 +349 +350 +351 +352 +353 +354 +355 +356 +357 +358 +359 +360 +361 +362 +363 +364 +365 +366 +367 +368 +369 +370 +371 +372 +373 +374 +375 +376 +377 +378 +379 +380 +381 +382 +383 +384 +385 +386 +387 +388 +389 +390 +391 +392 +393 +394 +395 +396 +397 +398 +399 +400 +401 +402 +403 +404 +405 +406 +407 +408 +409 +410 +411 +412 +413 +414 +415 +416 +417 +418 +419 +420 +421 +422 +423 +424 +425 +426 +427 +428 +429 +430 +431 +432 +433 +434 +435 +436 +437 +438 +439 +440 +441 +442 +443 +444 +445 +446 +447 +448 +449 +450 +451 +452 +453 +454 +455 +456 +457 +458 +459 +460 +461 +462 +463 +464 +465 +466 +467 +468 +469 +470 +471 +472 +473 +474 +475 +476 +477 +478 +479 +480 +481 +482 +483 +484 +485 +486 +487 +488 +489 +490 +491 +492 +493 +494 +495 +496 +497 +498 +499 +500 +501 +502 +503 +504 +505 +506 +507 +508 +509 +510 +511 +512 +513 +514 +515 +516 +517 +518 +519 +520 +521 +522 +523 +524 +525 +526 +527 +528 +529 +530 +531 +532 +533 +534 +535 +536 +537 +538 +539 +540 +541 +542 +543 +544 +545 +546 +547 +548 +549 +550 +551 +552 +553 +554 +555 +556 +557 +558 +559 +560 +561 +562 +563 +564 +565 +566 +567 +568 +569 +570 +571 +572 +573 +574 +575 +576 +577 +578 +579 +580 +581 +582 +583 +584 +585 +586 +587 +588 +589 +590 +591 +592 +593 +594 +595 +596 +597 +598 +599 +600 +601 +602 +603 +604 +605 +606 +607 +608 +609 +610 +611 +612 +613 +614 +615 +616 +617 +618 +619 +620 +621 +622 +623 +624 +625 +626 +627 +628 +629 +630 +631 +632 +633 +634 +635 +636 +637 +638 +639 +640 +641 +642 +643 +644 +645 +646 +647 +648 +649 +650 +651 +652 +653 +654 +655 +656 +657 +658 +659 +660 +661 +662 +663 +664 +665 +666 +667 +668 +669 +670 +671 +672 +673 +674 +675 +676 +677 +678 +679 +680 +681 +682 +683 +684 +685 +686 +687 +688 +689 +690 +691 +692 +693 +694 +695 +696 +697 +698 +699 +700 +701 +702 +703 +704 +705 +706 +707 +708 +709 +710 +711 +712 +713 +714 +715 +716 +717 +718 +719 +720 +721 +722 +723 +724 +725 +726 +727 +728 +729 +730 +731 +732 +733 +734 +735 +736 +737 +738 +739 +740 +741 +742 +743 +744 +745 +746 +747 +748 +749 +750 +751 +752 +753 +754 +755 +756 +757 +758 +759 +760 +761 +762 +763 +764 +765 +766 +767 +768 +769 +770 +771 +772 +773 +774 +775 +776 +777 +778 +779 +780 +781 +782 +783 +784 +785 +786 +787 +788 +789 +790 +791 +792 +793 +794 +795 +796 +797 +798 +799 +800 +801 +802 +803 +804 +805 +806 +807 +808 +809 +810 +811 +812 +813 +814 +815 +816 +817 +818 +819 +820 +821 +822 +823 +824 +825 +826 +827 +828 +829 +830 +831 +832 +833 +834 +835 +836 +837 +838 +839 +840 +841 +842 +843 +844 +845 +846 +847 +848 +849 +850 +851 +852 +853 +854 +855 +856 +857 +858 +859 +860 +861 +862 +863 +864 +865 +866 +867 +868 +869 +870 +871 +872 +873 +874 +875 +876 +877 +878 +879 +880 +881 +882 +883 +884 +885 +886 +887 +888 +889 +890 +891 +892 +893 +894 +895 +896 +897 +898 +899 +900 +901 +902 +903 +904 +905 +906 +907 +908 +909 +910 +911 +912 +913 +914 +915 +916 +917 +918 +919 +920 +921 +922 +923 +924 +925 +926 +927 +928 +929 +930 +931 +932 +933 +934 +935 +936 +937 +938 +939 +940 +941 +942 +943 +944 +945 +946 +947 +948 +949 +950 +951 +952 +953 +954 +955 +956 +957 +958 +959 +960 +961 +962 +963 +964 +965 +966 +967 +968 +969 +970 +971 +972 +973 +974 +975 +976 +977 +978 +979 +980 +981 +982 +983 +984 +985 +986 +987 +988 +989 +990 +991 +992 +993 +994 +995 +996 +997 +998 +999 +1000 +1001 +1002 +1003 +1004 +1005 +1006 +1007 +1008 +1009 +1010 +1011 +1012 +1013 +1014 +1015 +1016 +1017 +1018 +1019 +1020 +1021 +1022 +1023 +1024 +1025 +1026 +1027 +1028 +1029 +1030 +1031 +1032 +1033 +1034 +1035 +1036 +1037 +1038 +1039 +1040 +1041 +1042 +1043 +1044 +1045 +1046 +1047 +1048 +1049 +1050 +1051 +1052 +1053 +1054 +1055 +1056 +1057 +1058 +1059 +1060 +1061 +1062 +1063 +1064 +1065 +1066 +1067 +1068 +1069 +1070 +1071 +1072 +1073 +1074 +1075 +1076 +1077 +1078 +1079 +1080 +1081 +1082 +1083 +1084 +1085 +1086 +1087 +1088 +1089 +1090 +1091 +1092 +1093 +1094 +1095 +1096 +1097 +1098 +1099 +1100 +1101 +1102 +1103 +1104 +1105 +1106 +1107 +1108 +1109 +1110 +1111 +1112 +1113 +1114 +1115 +1116 +1117 +1118 +1119 +1120 +1121 +1122 +1123 +1124 +1125 +1126 +1127 +1128 +1129 +1130 +1131 +1132 +1133 +1134 +1135 +1136 +1137 +1138 +1139 +1140 +1141 +1142 +1143 +1144 +1145 +1146 +1147 +1148 +1149 +1150 +1151 +1152 +1153 +1154 +1155 +1156 +1157 +1158 +1159 +1160 +1161 +1162 +1163 +1164 +1165 +1166 +1167 +1168 +1169 +1170 +1171 +1172 +1173 +1174 +1175 +1176 +1177 +1178 +1179 +1180 +1181 +1182 +1183 +1184 +1185 +1186 +1187 +1188 +1189 +1190 +1191 +1192 +1193 +1194 +1195 +1196 +1197 +1198 +1199 +1200 +1201 +1202 +1203 +1204 +1205 +1206 +1207 +1208 +1209 +1210 +1211 +1212 +1213 +1214 +1215 +1216 +1217 +1218 +1219 +1220 +1221 +1222 +1223 +1224 +1225 +1226 +1227 +1228 +1229 +1230 +1231 +1232 +1233 +1234 +1235 +1236 +1237 +1238 +1239 +1240 +1241 +1242 +1243 +1244 +1245 +1246 +1247 +1248 +1249 +1250 +1251 +1252 +1253 +1254 +1255 +1256 +1257 +1258 +1259 +1260 +1261 +1262 +1263 +1264 +1265 +1266 +1267 +1268 +1269 +1270 +1271 +1272 +1273 +1274 +1275 +1276 +1277 +1278 +1279 +1280 +1281 +1282 +1283 +1284 +1285 +1286 +1287 +1288 +1289 +1290 +1291 +1292 +1293 +1294 +1295 +1296 +1297 +1298 +1299 +1300 +1301 +1302 +1303 +1304 +1305 +1306 +1307 +1308 +1309 +1310 +1311 +1312 +1313 +1314 +1315 +1316 +1317 +1318 +1319 +1320 +1321 +1322 +1323 +1324 +1325 +1326 +1327 +1328 +1329 +1330 +1331 +1332 +1333 +1334 +1335 +1336 +1337 +1338 +1339 +1340 +1341 +1342 +1343 +1344 +1345 +1346 +1347 +1348 +1349 +1350 +1351 +1352 +1353 +1354 +1355 +1356 +1357 +1358 +1359 +1360 +1361 +1362 +1363 +1364 +1365 +1366 +1367 +1368 +1369 +1370 +1371 +1372 +1373 +1374 +1375 +1376 +1377 +1378 +1379 +1380 +1381 +1382 +1383 +1384 +1385 +1386 +1387 +1388 +1389 +1390 +1391 +1392 +1393 +1394 +1395 +1396 +1397 +1398 +1399 +1400 +1401 +1402 +1403 +1404 +1405 +1406 +1407 +1408 +1409 +1410 +1411 +1412 +1413 +1414 +1415 +1416 +1417 +1418 +1419 +1420 +1421 +1422 +1423 +1424 +1425 +1426 +1427 +1428 +1429 +1430 +1431 +1432 +1433 +1434 +1435 +1436 +1437 +1438 +1439 +1440 +1441 +1442 +1443 +1444 +1445 +1446 +1447 +1448 +1449 +1450 +1451 +1452 +1453 +1454 +1455 +1456 +1457 +1458 +1459 +1460 +1461 +1462 +1463 +1464 +1465 +1466 +1467 +1468 +1469 +1470 +1471 +1472 +1473 +1474 +1475 +1476 +1477 +1478 +1479 +1480 +1481 +1482 +1483 +1484 +1485 +1486 +1487 +1488 +1489 +1490 +1491 +1492 +1493 +1494 +1495 +1496 +1497 +1498 +1499 +1500 +1501 +1502 +1503 +1504 +1505 +1506 +1507 +1508 +1509 +1510 +1511 +1512 +1513 +1514 +1515 +1516 +1517 +1518 +1519 +1520 +1521 +1522 +1523 +1524 +1525 +1526 +1527 +1528 +1529 +1530 +1531 +1532 +1533 +1534 +1535 +1536 +1537 +1538 +1539 +1540 +1541 +1542 +1543 +1544 +1545 +1546 +1547 +1548 +1549 +1550 +1551 +1552 +1553 +1554 +1555 +1556 +1557 +1558 +1559 +1560 +1561 +1562 +1563 +1564 +1565 +1566 +1567 +1568 +1569 +1570 +1571 +1572 +1573 +1574 +1575 +1576 +1577 +1578 +1579 +1580 +1581 +1582 +1583 +1584 +1585 +1586 +1587 +1588 +1589 +1590 +1591 +1592 +1593 +1594 +1595 +1596 +1597 +1598 +1599 +1600 +1601 +1602 +1603 +1604 +1605 +1606 +1607 +1608 +1609 +1610 +1611 +1612 +1613 +1614 +1615 +1616 +1617 +1618 +1619 +1620 +1621 +1622 +1623 +1624 +1625 +1626 +1627 +1628 +1629 +1630 +1631 +1632 +1633 +1634 +1635 +1636 +1637 +1638 +1639 +1640 +1641 +1642 +1643 +1644 +1645 +1646 +1647 +1648 +1649 +1650 +1651 +1652 +1653 +1654 +1655 +1656 +1657 +1658 +1659 +1660 +1661 +1662 +1663 +1664 +1665 +1666 +1667 +1668 +1669 +1670 +1671 +1672 +1673 +1674 +1675 +1676 +1677 +1678 +1679 +1680 +1681 +1682 +1683 +1684 +1685 +1686 +1687 +1688 +1689 +1690 +1691 +1692 +1693 +1694 +1695 +1696 +1697 +1698 +1699 +1700 +1701 +1702 +1703 +1704 +1705 +1706 +1707 +1708 +1709 +1710 +1711 +1712 +1713 +1714 +1715 +1716 +1717 +1718 +1719 +1720 +1721 +1722 +1723 +1724 +1725 +1726 +1727 +1728 +1729 +1730 +1731 +1732 +1733 +1734 +1735 +1736 +1737 +1738 +1739 +1740 +1741 +1742 +1743 +1744 +1745 +1746 +1747 +1748 +1749 +1750 +1751 +1752 +1753 +1754 +1755 +1756 +1757 +1758 +1759 +1760 +1761 +1762 +1763 +1764 +1765 +1766 +1767 +1768 +1769 +1770 +1771 +1772 +1773 +1774 +1775 +1776 +1777 +1778 +1779 +1780 +1781 +1782 +1783 +1784 +1785 +1786 +1787 +1788 +1789 +1790 +1791 +1792 +1793 +1794 +1795 +1796 +1797 +1798 +1799 +1800 +1801 +1802 +1803 +1804 +1805 +1806 +1807 +1808 +1809 +1810 +1811 +1812 +
use core::iter::{FromIterator, Iterator};
+use core::mem::{self, ManuallyDrop, MaybeUninit};
+use core::ops::{Deref, DerefMut};
+use core::ptr::{self, NonNull};
+use core::{cmp, fmt, hash, isize, slice, usize};
+
+use alloc::{
+ borrow::{Borrow, BorrowMut},
+ boxed::Box,
+ string::String,
+ vec,
+ vec::Vec,
+};
+
+use crate::buf::{IntoIter, UninitSlice};
+use crate::bytes::Vtable;
+#[allow(unused)]
+use crate::loom::sync::atomic::AtomicMut;
+use crate::loom::sync::atomic::{AtomicPtr, AtomicUsize, Ordering};
+use crate::{Buf, BufMut, Bytes};
+
+/// A unique reference to a contiguous slice of memory.
+///
+/// `BytesMut` represents a unique view into a potentially shared memory region.
+/// Given the uniqueness guarantee, owners of `BytesMut` handles are able to
+/// mutate the memory.
+///
+/// `BytesMut` can be thought of as containing a `buf: Arc<Vec<u8>>`, an offset
+/// into `buf`, a slice length, and a guarantee that no other `BytesMut` for the
+/// same `buf` overlaps with its slice. That guarantee means that a write lock
+/// is not required.
+///
+/// # Growth
+///
+/// `BytesMut`'s `BufMut` implementation will implicitly grow its buffer as
+/// necessary. However, explicitly reserving the required space up-front before
+/// a series of inserts will be more efficient.
+///
+/// # Examples
+///
+/// ```
+/// use bytes::{BytesMut, BufMut};
+///
+/// let mut buf = BytesMut::with_capacity(64);
+///
+/// buf.put_u8(b'h');
+/// buf.put_u8(b'e');
+/// buf.put(&b"llo"[..]);
+///
+/// assert_eq!(&buf[..], b"hello");
+///
+/// // Freeze the buffer so that it can be shared
+/// let a = buf.freeze();
+///
+/// // This does not allocate, instead `b` points to the same memory.
+/// let b = a.clone();
+///
+/// assert_eq!(&a[..], b"hello");
+/// assert_eq!(&b[..], b"hello");
+/// ```
+pub struct BytesMut {
+ ptr: NonNull<u8>,
+ len: usize,
+ cap: usize,
+ data: *mut Shared,
+}
+
+// Thread-safe reference-counted container for the shared storage. This mostly
+// the same as `core::sync::Arc` but without the weak counter. The ref counting
+// fns are based on the ones found in `std`.
+//
+// The main reason to use `Shared` instead of `core::sync::Arc` is that it ends
+// up making the overall code simpler and easier to reason about. This is due to
+// some of the logic around setting `Inner::arc` and other ways the `arc` field
+// is used. Using `Arc` ended up requiring a number of funky transmutes and
+// other shenanigans to make it work.
+struct Shared {
+ vec: Vec<u8>,
+ original_capacity_repr: usize,
+ ref_count: AtomicUsize,
+}
+
+// Assert that the alignment of `Shared` is divisible by 2.
+// This is a necessary invariant since we depend on allocating `Shared` a
+// shared object to implicitly carry the `KIND_ARC` flag in its pointer.
+// This flag is set when the LSB is 0.
+const _: [(); 0 - mem::align_of::<Shared>() % 2] = []; // Assert that the alignment of `Shared` is divisible by 2.
+
+// Buffer storage strategy flags.
+const KIND_ARC: usize = 0b0;
+const KIND_VEC: usize = 0b1;
+const KIND_MASK: usize = 0b1;
+
+// The max original capacity value. Any `Bytes` allocated with a greater initial
+// capacity will default to this.
+const MAX_ORIGINAL_CAPACITY_WIDTH: usize = 17;
+// The original capacity algorithm will not take effect unless the originally
+// allocated capacity was at least 1kb in size.
+const MIN_ORIGINAL_CAPACITY_WIDTH: usize = 10;
+// The original capacity is stored in powers of 2 starting at 1kb to a max of
+// 64kb. Representing it as such requires only 3 bits of storage.
+const ORIGINAL_CAPACITY_MASK: usize = 0b11100;
+const ORIGINAL_CAPACITY_OFFSET: usize = 2;
+
+const VEC_POS_OFFSET: usize = 5;
+// When the storage is in the `Vec` representation, the pointer can be advanced
+// at most this value. This is due to the amount of storage available to track
+// the offset is usize - number of KIND bits and number of ORIGINAL_CAPACITY
+// bits.
+const MAX_VEC_POS: usize = usize::MAX >> VEC_POS_OFFSET;
+const NOT_VEC_POS_MASK: usize = 0b11111;
+
+#[cfg(target_pointer_width = "64")]
+const PTR_WIDTH: usize = 64;
+#[cfg(target_pointer_width = "32")]
+const PTR_WIDTH: usize = 32;
+
+/*
+ *
+ * ===== BytesMut =====
+ *
+ */
+
+impl BytesMut {
+ /// Creates a new `BytesMut` with the specified capacity.
+ ///
+ /// The returned `BytesMut` will be able to hold at least `capacity` bytes
+ /// without reallocating.
+ ///
+ /// It is important to note that this function does not specify the length
+ /// of the returned `BytesMut`, but only the capacity.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::{BytesMut, BufMut};
+ ///
+ /// let mut bytes = BytesMut::with_capacity(64);
+ ///
+ /// // `bytes` contains no data, even though there is capacity
+ /// assert_eq!(bytes.len(), 0);
+ ///
+ /// bytes.put(&b"hello world"[..]);
+ ///
+ /// assert_eq!(&bytes[..], b"hello world");
+ /// ```
+ #[inline]
+ pub fn with_capacity(capacity: usize) -> BytesMut {
+ BytesMut::from_vec(Vec::with_capacity(capacity))
+ }
+
+ /// Creates a new `BytesMut` with default capacity.
+ ///
+ /// Resulting object has length 0 and unspecified capacity.
+ /// This function does not allocate.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::{BytesMut, BufMut};
+ ///
+ /// let mut bytes = BytesMut::new();
+ ///
+ /// assert_eq!(0, bytes.len());
+ ///
+ /// bytes.reserve(2);
+ /// bytes.put_slice(b"xy");
+ ///
+ /// assert_eq!(&b"xy"[..], &bytes[..]);
+ /// ```
+ #[inline]
+ pub fn new() -> BytesMut {
+ BytesMut::with_capacity(0)
+ }
+
+ /// Returns the number of bytes contained in this `BytesMut`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BytesMut;
+ ///
+ /// let b = BytesMut::from(&b"hello"[..]);
+ /// assert_eq!(b.len(), 5);
+ /// ```
+ #[inline]
+ pub fn len(&self) -> usize {
+ self.len
+ }
+
+ /// Returns true if the `BytesMut` has a length of 0.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BytesMut;
+ ///
+ /// let b = BytesMut::with_capacity(64);
+ /// assert!(b.is_empty());
+ /// ```
+ #[inline]
+ pub fn is_empty(&self) -> bool {
+ self.len == 0
+ }
+
+ /// Returns the number of bytes the `BytesMut` can hold without reallocating.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BytesMut;
+ ///
+ /// let b = BytesMut::with_capacity(64);
+ /// assert_eq!(b.capacity(), 64);
+ /// ```
+ #[inline]
+ pub fn capacity(&self) -> usize {
+ self.cap
+ }
+
+ /// Converts `self` into an immutable `Bytes`.
+ ///
+ /// The conversion is zero cost and is used to indicate that the slice
+ /// referenced by the handle will no longer be mutated. Once the conversion
+ /// is done, the handle can be cloned and shared across threads.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::{BytesMut, BufMut};
+ /// use std::thread;
+ ///
+ /// let mut b = BytesMut::with_capacity(64);
+ /// b.put(&b"hello world"[..]);
+ /// let b1 = b.freeze();
+ /// let b2 = b1.clone();
+ ///
+ /// let th = thread::spawn(move || {
+ /// assert_eq!(&b1[..], b"hello world");
+ /// });
+ ///
+ /// assert_eq!(&b2[..], b"hello world");
+ /// th.join().unwrap();
+ /// ```
+ #[inline]
+ pub fn freeze(self) -> Bytes {
+ if self.kind() == KIND_VEC {
+ // Just re-use `Bytes` internal Vec vtable
+ unsafe {
+ let off = self.get_vec_pos();
+ let vec = rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off);
+ mem::forget(self);
+ let mut b: Bytes = vec.into();
+ b.advance(off);
+ b
+ }
+ } else {
+ debug_assert_eq!(self.kind(), KIND_ARC);
+
+ let ptr = self.ptr.as_ptr();
+ let len = self.len;
+ let data = AtomicPtr::new(self.data.cast());
+ mem::forget(self);
+ unsafe { Bytes::with_vtable(ptr, len, data, &SHARED_VTABLE) }
+ }
+ }
+
+ /// Creates a new `BytesMut`, which is initialized with zero.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BytesMut;
+ ///
+ /// let zeros = BytesMut::zeroed(42);
+ ///
+ /// assert_eq!(zeros.len(), 42);
+ /// zeros.into_iter().for_each(|x| assert_eq!(x, 0));
+ /// ```
+ pub fn zeroed(len: usize) -> BytesMut {
+ BytesMut::from_vec(vec![0; len])
+ }
+
+ /// Splits the bytes into two at the given index.
+ ///
+ /// Afterwards `self` contains elements `[0, at)`, and the returned
+ /// `BytesMut` contains elements `[at, capacity)`.
+ ///
+ /// This is an `O(1)` operation that just increases the reference count
+ /// and sets a few indices.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BytesMut;
+ ///
+ /// let mut a = BytesMut::from(&b"hello world"[..]);
+ /// let mut b = a.split_off(5);
+ ///
+ /// a[0] = b'j';
+ /// b[0] = b'!';
+ ///
+ /// assert_eq!(&a[..], b"jello");
+ /// assert_eq!(&b[..], b"!world");
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// Panics if `at > capacity`.
+ #[must_use = "consider BytesMut::truncate if you don't need the other half"]
+ pub fn split_off(&mut self, at: usize) -> BytesMut {
+ assert!(
+ at <= self.capacity(),
+ "split_off out of bounds: {:?} <= {:?}",
+ at,
+ self.capacity(),
+ );
+ unsafe {
+ let mut other = self.shallow_clone();
+ other.set_start(at);
+ self.set_end(at);
+ other
+ }
+ }
+
+ /// Removes the bytes from the current view, returning them in a new
+ /// `BytesMut` handle.
+ ///
+ /// Afterwards, `self` will be empty, but will retain any additional
+ /// capacity that it had before the operation. This is identical to
+ /// `self.split_to(self.len())`.
+ ///
+ /// This is an `O(1)` operation that just increases the reference count and
+ /// sets a few indices.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::{BytesMut, BufMut};
+ ///
+ /// let mut buf = BytesMut::with_capacity(1024);
+ /// buf.put(&b"hello world"[..]);
+ ///
+ /// let other = buf.split();
+ ///
+ /// assert!(buf.is_empty());
+ /// assert_eq!(1013, buf.capacity());
+ ///
+ /// assert_eq!(other, b"hello world"[..]);
+ /// ```
+ #[must_use = "consider BytesMut::advance(len()) if you don't need the other half"]
+ pub fn split(&mut self) -> BytesMut {
+ let len = self.len();
+ self.split_to(len)
+ }
+
+ /// Splits the buffer into two at the given index.
+ ///
+ /// Afterwards `self` contains elements `[at, len)`, and the returned `BytesMut`
+ /// contains elements `[0, at)`.
+ ///
+ /// This is an `O(1)` operation that just increases the reference count and
+ /// sets a few indices.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BytesMut;
+ ///
+ /// let mut a = BytesMut::from(&b"hello world"[..]);
+ /// let mut b = a.split_to(5);
+ ///
+ /// a[0] = b'!';
+ /// b[0] = b'j';
+ ///
+ /// assert_eq!(&a[..], b"!world");
+ /// assert_eq!(&b[..], b"jello");
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// Panics if `at > len`.
+ #[must_use = "consider BytesMut::advance if you don't need the other half"]
+ pub fn split_to(&mut self, at: usize) -> BytesMut {
+ assert!(
+ at <= self.len(),
+ "split_to out of bounds: {:?} <= {:?}",
+ at,
+ self.len(),
+ );
+
+ unsafe {
+ let mut other = self.shallow_clone();
+ other.set_end(at);
+ self.set_start(at);
+ other
+ }
+ }
+
+ /// Shortens the buffer, keeping the first `len` bytes and dropping the
+ /// rest.
+ ///
+ /// If `len` is greater than the buffer's current length, this has no
+ /// effect.
+ ///
+ /// Existing underlying capacity is preserved.
+ ///
+ /// The [split_off](`Self::split_off()`) method can emulate `truncate`, but this causes the
+ /// excess bytes to be returned instead of dropped.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BytesMut;
+ ///
+ /// let mut buf = BytesMut::from(&b"hello world"[..]);
+ /// buf.truncate(5);
+ /// assert_eq!(buf, b"hello"[..]);
+ /// ```
+ pub fn truncate(&mut self, len: usize) {
+ if len <= self.len() {
+ unsafe {
+ self.set_len(len);
+ }
+ }
+ }
+
+ /// Clears the buffer, removing all data. Existing capacity is preserved.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BytesMut;
+ ///
+ /// let mut buf = BytesMut::from(&b"hello world"[..]);
+ /// buf.clear();
+ /// assert!(buf.is_empty());
+ /// ```
+ pub fn clear(&mut self) {
+ self.truncate(0);
+ }
+
+ /// Resizes the buffer so that `len` is equal to `new_len`.
+ ///
+ /// If `new_len` is greater than `len`, the buffer is extended by the
+ /// difference with each additional byte set to `value`. If `new_len` is
+ /// less than `len`, the buffer is simply truncated.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BytesMut;
+ ///
+ /// let mut buf = BytesMut::new();
+ ///
+ /// buf.resize(3, 0x1);
+ /// assert_eq!(&buf[..], &[0x1, 0x1, 0x1]);
+ ///
+ /// buf.resize(2, 0x2);
+ /// assert_eq!(&buf[..], &[0x1, 0x1]);
+ ///
+ /// buf.resize(4, 0x3);
+ /// assert_eq!(&buf[..], &[0x1, 0x1, 0x3, 0x3]);
+ /// ```
+ pub fn resize(&mut self, new_len: usize, value: u8) {
+ let len = self.len();
+ if new_len > len {
+ let additional = new_len - len;
+ self.reserve(additional);
+ unsafe {
+ let dst = self.chunk_mut().as_mut_ptr();
+ ptr::write_bytes(dst, value, additional);
+ self.set_len(new_len);
+ }
+ } else {
+ self.truncate(new_len);
+ }
+ }
+
+ /// Sets the length of the buffer.
+ ///
+ /// This will explicitly set the size of the buffer without actually
+ /// modifying the data, so it is up to the caller to ensure that the data
+ /// has been initialized.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BytesMut;
+ ///
+ /// let mut b = BytesMut::from(&b"hello world"[..]);
+ ///
+ /// unsafe {
+ /// b.set_len(5);
+ /// }
+ ///
+ /// assert_eq!(&b[..], b"hello");
+ ///
+ /// unsafe {
+ /// b.set_len(11);
+ /// }
+ ///
+ /// assert_eq!(&b[..], b"hello world");
+ /// ```
+ #[inline]
+ pub unsafe fn set_len(&mut self, len: usize) {
+ debug_assert!(len <= self.cap, "set_len out of bounds");
+ self.len = len;
+ }
+
+ /// Reserves capacity for at least `additional` more bytes to be inserted
+ /// into the given `BytesMut`.
+ ///
+ /// More than `additional` bytes may be reserved in order to avoid frequent
+ /// reallocations. A call to `reserve` may result in an allocation.
+ ///
+ /// Before allocating new buffer space, the function will attempt to reclaim
+ /// space in the existing buffer. If the current handle references a view
+ /// into a larger original buffer, and all other handles referencing part
+ /// of the same original buffer have been dropped, then the current view
+ /// can be copied/shifted to the front of the buffer and the handle can take
+ /// ownership of the full buffer, provided that the full buffer is large
+ /// enough to fit the requested additional capacity.
+ ///
+ /// This optimization will only happen if shifting the data from the current
+ /// view to the front of the buffer is not too expensive in terms of the
+ /// (amortized) time required. The precise condition is subject to change;
+ /// as of now, the length of the data being shifted needs to be at least as
+ /// large as the distance that it's shifted by. If the current view is empty
+ /// and the original buffer is large enough to fit the requested additional
+ /// capacity, then reallocations will never happen.
+ ///
+ /// # Examples
+ ///
+ /// In the following example, a new buffer is allocated.
+ ///
+ /// ```
+ /// use bytes::BytesMut;
+ ///
+ /// let mut buf = BytesMut::from(&b"hello"[..]);
+ /// buf.reserve(64);
+ /// assert!(buf.capacity() >= 69);
+ /// ```
+ ///
+ /// In the following example, the existing buffer is reclaimed.
+ ///
+ /// ```
+ /// use bytes::{BytesMut, BufMut};
+ ///
+ /// let mut buf = BytesMut::with_capacity(128);
+ /// buf.put(&[0; 64][..]);
+ ///
+ /// let ptr = buf.as_ptr();
+ /// let other = buf.split();
+ ///
+ /// assert!(buf.is_empty());
+ /// assert_eq!(buf.capacity(), 64);
+ ///
+ /// drop(other);
+ /// buf.reserve(128);
+ ///
+ /// assert_eq!(buf.capacity(), 128);
+ /// assert_eq!(buf.as_ptr(), ptr);
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// Panics if the new capacity overflows `usize`.
+ #[inline]
+ pub fn reserve(&mut self, additional: usize) {
+ let len = self.len();
+ let rem = self.capacity() - len;
+
+ if additional <= rem {
+ // The handle can already store at least `additional` more bytes, so
+ // there is no further work needed to be done.
+ return;
+ }
+
+ self.reserve_inner(additional);
+ }
+
+ // In separate function to allow the short-circuits in `reserve` to
+ // be inline-able. Significant helps performance.
+ fn reserve_inner(&mut self, additional: usize) {
+ let len = self.len();
+ let kind = self.kind();
+
+ if kind == KIND_VEC {
+ // If there's enough free space before the start of the buffer, then
+ // just copy the data backwards and reuse the already-allocated
+ // space.
+ //
+ // Otherwise, since backed by a vector, use `Vec::reserve`
+ //
+ // We need to make sure that this optimization does not kill the
+ // amortized runtimes of BytesMut's operations.
+ unsafe {
+ let off = self.get_vec_pos();
+
+ // Only reuse space if we can satisfy the requested additional space.
+ //
+ // Also check if the value of `off` suggests that enough bytes
+ // have been read to account for the overhead of shifting all
+ // the data (in an amortized analysis).
+ // Hence the condition `off >= self.len()`.
+ //
+ // This condition also already implies that the buffer is going
+ // to be (at least) half-empty in the end; so we do not break
+ // the (amortized) runtime with future resizes of the underlying
+ // `Vec`.
+ //
+ // [For more details check issue #524, and PR #525.]
+ if self.capacity() - self.len() + off >= additional && off >= self.len() {
+ // There's enough space, and it's not too much overhead:
+ // reuse the space!
+ //
+ // Just move the pointer back to the start after copying
+ // data back.
+ let base_ptr = self.ptr.as_ptr().sub(off);
+ // Since `off >= self.len()`, the two regions don't overlap.
+ ptr::copy_nonoverlapping(self.ptr.as_ptr(), base_ptr, self.len);
+ self.ptr = vptr(base_ptr);
+ self.set_vec_pos(0);
+
+ // Length stays constant, but since we moved backwards we
+ // can gain capacity back.
+ self.cap += off;
+ } else {
+ // Not enough space, or reusing might be too much overhead:
+ // allocate more space!
+ let mut v =
+ ManuallyDrop::new(rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off));
+ v.reserve(additional);
+
+ // Update the info
+ self.ptr = vptr(v.as_mut_ptr().add(off));
+ self.len = v.len() - off;
+ self.cap = v.capacity() - off;
+ }
+
+ return;
+ }
+ }
+
+ debug_assert_eq!(kind, KIND_ARC);
+ let shared: *mut Shared = self.data;
+
+ // Reserving involves abandoning the currently shared buffer and
+ // allocating a new vector with the requested capacity.
+ //
+ // Compute the new capacity
+ let mut new_cap = len.checked_add(additional).expect("overflow");
+
+ unsafe {
+ // First, try to reclaim the buffer. This is possible if the current
+ // handle is the only outstanding handle pointing to the buffer.
+ if (*shared).is_unique() {
+ // This is the only handle to the buffer. It can be reclaimed.
+ // However, before doing the work of copying data, check to make
+ // sure that the vector has enough capacity.
+ let v = &mut (*shared).vec;
+
+ let v_capacity = v.capacity();
+ let ptr = v.as_mut_ptr();
+
+ let offset = offset_from(self.ptr.as_ptr(), ptr);
+
+ // Compare the condition in the `kind == KIND_VEC` case above
+ // for more details.
+ if v_capacity >= new_cap + offset {
+ self.cap = new_cap;
+ // no copy is necessary
+ } else if v_capacity >= new_cap && offset >= len {
+ // The capacity is sufficient, and copying is not too much
+ // overhead: reclaim the buffer!
+
+ // `offset >= len` means: no overlap
+ ptr::copy_nonoverlapping(self.ptr.as_ptr(), ptr, len);
+
+ self.ptr = vptr(ptr);
+ self.cap = v.capacity();
+ } else {
+ // calculate offset
+ let off = (self.ptr.as_ptr() as usize) - (v.as_ptr() as usize);
+
+ // new_cap is calculated in terms of `BytesMut`, not the underlying
+ // `Vec`, so it does not take the offset into account.
+ //
+ // Thus we have to manually add it here.
+ new_cap = new_cap.checked_add(off).expect("overflow");
+
+ // The vector capacity is not sufficient. The reserve request is
+ // asking for more than the initial buffer capacity. Allocate more
+ // than requested if `new_cap` is not much bigger than the current
+ // capacity.
+ //
+ // There are some situations, using `reserve_exact` that the
+ // buffer capacity could be below `original_capacity`, so do a
+ // check.
+ let double = v.capacity().checked_shl(1).unwrap_or(new_cap);
+
+ new_cap = cmp::max(double, new_cap);
+
+ // No space - allocate more
+ //
+ // The length field of `Shared::vec` is not used by the `BytesMut`;
+ // instead we use the `len` field in the `BytesMut` itself. However,
+ // when calling `reserve`, it doesn't guarantee that data stored in
+ // the unused capacity of the vector is copied over to the new
+ // allocation, so we need to ensure that we don't have any data we
+ // care about in the unused capacity before calling `reserve`.
+ debug_assert!(off + len <= v.capacity());
+ v.set_len(off + len);
+ v.reserve(new_cap - v.len());
+
+ // Update the info
+ self.ptr = vptr(v.as_mut_ptr().add(off));
+ self.cap = v.capacity() - off;
+ }
+
+ return;
+ }
+ }
+
+ let original_capacity_repr = unsafe { (*shared).original_capacity_repr };
+ let original_capacity = original_capacity_from_repr(original_capacity_repr);
+
+ new_cap = cmp::max(new_cap, original_capacity);
+
+ // Create a new vector to store the data
+ let mut v = ManuallyDrop::new(Vec::with_capacity(new_cap));
+
+ // Copy the bytes
+ v.extend_from_slice(self.as_ref());
+
+ // Release the shared handle. This must be done *after* the bytes are
+ // copied.
+ unsafe { release_shared(shared) };
+
+ // Update self
+ let data = (original_capacity_repr << ORIGINAL_CAPACITY_OFFSET) | KIND_VEC;
+ self.data = invalid_ptr(data);
+ self.ptr = vptr(v.as_mut_ptr());
+ self.len = v.len();
+ self.cap = v.capacity();
+ }
+
+ /// Appends given bytes to this `BytesMut`.
+ ///
+ /// If this `BytesMut` object does not have enough capacity, it is resized
+ /// first.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BytesMut;
+ ///
+ /// let mut buf = BytesMut::with_capacity(0);
+ /// buf.extend_from_slice(b"aaabbb");
+ /// buf.extend_from_slice(b"cccddd");
+ ///
+ /// assert_eq!(b"aaabbbcccddd", &buf[..]);
+ /// ```
+ #[inline]
+ pub fn extend_from_slice(&mut self, extend: &[u8]) {
+ let cnt = extend.len();
+ self.reserve(cnt);
+
+ unsafe {
+ let dst = self.spare_capacity_mut();
+ // Reserved above
+ debug_assert!(dst.len() >= cnt);
+
+ ptr::copy_nonoverlapping(extend.as_ptr(), dst.as_mut_ptr().cast(), cnt);
+ }
+
+ unsafe {
+ self.advance_mut(cnt);
+ }
+ }
+
+ /// Absorbs a `BytesMut` that was previously split off.
+ ///
+ /// If the two `BytesMut` objects were previously contiguous and not mutated
+ /// in a way that causes re-allocation i.e., if `other` was created by
+ /// calling `split_off` on this `BytesMut`, then this is an `O(1)` operation
+ /// that just decreases a reference count and sets a few indices.
+ /// Otherwise this method degenerates to
+ /// `self.extend_from_slice(other.as_ref())`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BytesMut;
+ ///
+ /// let mut buf = BytesMut::with_capacity(64);
+ /// buf.extend_from_slice(b"aaabbbcccddd");
+ ///
+ /// let split = buf.split_off(6);
+ /// assert_eq!(b"aaabbb", &buf[..]);
+ /// assert_eq!(b"cccddd", &split[..]);
+ ///
+ /// buf.unsplit(split);
+ /// assert_eq!(b"aaabbbcccddd", &buf[..]);
+ /// ```
+ pub fn unsplit(&mut self, other: BytesMut) {
+ if self.is_empty() {
+ *self = other;
+ return;
+ }
+
+ if let Err(other) = self.try_unsplit(other) {
+ self.extend_from_slice(other.as_ref());
+ }
+ }
+
+ // private
+
+ // For now, use a `Vec` to manage the memory for us, but we may want to
+ // change that in the future to some alternate allocator strategy.
+ //
+ // Thus, we don't expose an easy way to construct from a `Vec` since an
+ // internal change could make a simple pattern (`BytesMut::from(vec)`)
+ // suddenly a lot more expensive.
+ #[inline]
+ pub(crate) fn from_vec(mut vec: Vec<u8>) -> BytesMut {
+ let ptr = vptr(vec.as_mut_ptr());
+ let len = vec.len();
+ let cap = vec.capacity();
+ mem::forget(vec);
+
+ let original_capacity_repr = original_capacity_to_repr(cap);
+ let data = (original_capacity_repr << ORIGINAL_CAPACITY_OFFSET) | KIND_VEC;
+
+ BytesMut {
+ ptr,
+ len,
+ cap,
+ data: invalid_ptr(data),
+ }
+ }
+
+ #[inline]
+ fn as_slice(&self) -> &[u8] {
+ unsafe { slice::from_raw_parts(self.ptr.as_ptr(), self.len) }
+ }
+
+ #[inline]
+ fn as_slice_mut(&mut self) -> &mut [u8] {
+ unsafe { slice::from_raw_parts_mut(self.ptr.as_ptr(), self.len) }
+ }
+
+ unsafe fn set_start(&mut self, start: usize) {
+ // Setting the start to 0 is a no-op, so return early if this is the
+ // case.
+ if start == 0 {
+ return;
+ }
+
+ debug_assert!(start <= self.cap, "internal: set_start out of bounds");
+
+ let kind = self.kind();
+
+ if kind == KIND_VEC {
+ // Setting the start when in vec representation is a little more
+ // complicated. First, we have to track how far ahead the
+ // "start" of the byte buffer from the beginning of the vec. We
+ // also have to ensure that we don't exceed the maximum shift.
+ let pos = self.get_vec_pos() + start;
+
+ if pos <= MAX_VEC_POS {
+ self.set_vec_pos(pos);
+ } else {
+ // The repr must be upgraded to ARC. This will never happen
+ // on 64 bit systems and will only happen on 32 bit systems
+ // when shifting past 134,217,727 bytes. As such, we don't
+ // worry too much about performance here.
+ self.promote_to_shared(/*ref_count = */ 1);
+ }
+ }
+
+ // Updating the start of the view is setting `ptr` to point to the
+ // new start and updating the `len` field to reflect the new length
+ // of the view.
+ self.ptr = vptr(self.ptr.as_ptr().add(start));
+
+ if self.len >= start {
+ self.len -= start;
+ } else {
+ self.len = 0;
+ }
+
+ self.cap -= start;
+ }
+
+ unsafe fn set_end(&mut self, end: usize) {
+ debug_assert_eq!(self.kind(), KIND_ARC);
+ assert!(end <= self.cap, "set_end out of bounds");
+
+ self.cap = end;
+ self.len = cmp::min(self.len, end);
+ }
+
+ fn try_unsplit(&mut self, other: BytesMut) -> Result<(), BytesMut> {
+ if other.capacity() == 0 {
+ return Ok(());
+ }
+
+ let ptr = unsafe { self.ptr.as_ptr().add(self.len) };
+ if ptr == other.ptr.as_ptr()
+ && self.kind() == KIND_ARC
+ && other.kind() == KIND_ARC
+ && self.data == other.data
+ {
+ // Contiguous blocks, just combine directly
+ self.len += other.len;
+ self.cap += other.cap;
+ Ok(())
+ } else {
+ Err(other)
+ }
+ }
+
+ #[inline]
+ fn kind(&self) -> usize {
+ self.data as usize & KIND_MASK
+ }
+
+ unsafe fn promote_to_shared(&mut self, ref_cnt: usize) {
+ debug_assert_eq!(self.kind(), KIND_VEC);
+ debug_assert!(ref_cnt == 1 || ref_cnt == 2);
+
+ let original_capacity_repr =
+ (self.data as usize & ORIGINAL_CAPACITY_MASK) >> ORIGINAL_CAPACITY_OFFSET;
+
+ // The vec offset cannot be concurrently mutated, so there
+ // should be no danger reading it.
+ let off = (self.data as usize) >> VEC_POS_OFFSET;
+
+ // First, allocate a new `Shared` instance containing the
+ // `Vec` fields. It's important to note that `ptr`, `len`,
+ // and `cap` cannot be mutated without having `&mut self`.
+ // This means that these fields will not be concurrently
+ // updated and since the buffer hasn't been promoted to an
+ // `Arc`, those three fields still are the components of the
+ // vector.
+ let shared = Box::new(Shared {
+ vec: rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off),
+ original_capacity_repr,
+ ref_count: AtomicUsize::new(ref_cnt),
+ });
+
+ let shared = Box::into_raw(shared);
+
+ // The pointer should be aligned, so this assert should
+ // always succeed.
+ debug_assert_eq!(shared as usize & KIND_MASK, KIND_ARC);
+
+ self.data = shared;
+ }
+
+ /// Makes an exact shallow clone of `self`.
+ ///
+ /// The kind of `self` doesn't matter, but this is unsafe
+ /// because the clone will have the same offsets. You must
+ /// be sure the returned value to the user doesn't allow
+ /// two views into the same range.
+ #[inline]
+ unsafe fn shallow_clone(&mut self) -> BytesMut {
+ if self.kind() == KIND_ARC {
+ increment_shared(self.data);
+ ptr::read(self)
+ } else {
+ self.promote_to_shared(/*ref_count = */ 2);
+ ptr::read(self)
+ }
+ }
+
+ #[inline]
+ unsafe fn get_vec_pos(&self) -> usize {
+ debug_assert_eq!(self.kind(), KIND_VEC);
+
+ self.data as usize >> VEC_POS_OFFSET
+ }
+
+ #[inline]
+ unsafe fn set_vec_pos(&mut self, pos: usize) {
+ debug_assert_eq!(self.kind(), KIND_VEC);
+ debug_assert!(pos <= MAX_VEC_POS);
+
+ self.data = invalid_ptr((pos << VEC_POS_OFFSET) | (self.data as usize & NOT_VEC_POS_MASK));
+ }
+
+ /// Returns the remaining spare capacity of the buffer as a slice of `MaybeUninit<u8>`.
+ ///
+ /// The returned slice can be used to fill the buffer with data (e.g. by
+ /// reading from a file) before marking the data as initialized using the
+ /// [`set_len`] method.
+ ///
+ /// [`set_len`]: BytesMut::set_len
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BytesMut;
+ ///
+ /// // Allocate buffer big enough for 10 bytes.
+ /// let mut buf = BytesMut::with_capacity(10);
+ ///
+ /// // Fill in the first 3 elements.
+ /// let uninit = buf.spare_capacity_mut();
+ /// uninit[0].write(0);
+ /// uninit[1].write(1);
+ /// uninit[2].write(2);
+ ///
+ /// // Mark the first 3 bytes of the buffer as being initialized.
+ /// unsafe {
+ /// buf.set_len(3);
+ /// }
+ ///
+ /// assert_eq!(&buf[..], &[0, 1, 2]);
+ /// ```
+ #[inline]
+ pub fn spare_capacity_mut(&mut self) -> &mut [MaybeUninit<u8>] {
+ unsafe {
+ let ptr = self.ptr.as_ptr().add(self.len);
+ let len = self.cap - self.len;
+
+ slice::from_raw_parts_mut(ptr.cast(), len)
+ }
+ }
+}
+
+impl Drop for BytesMut {
+ fn drop(&mut self) {
+ let kind = self.kind();
+
+ if kind == KIND_VEC {
+ unsafe {
+ let off = self.get_vec_pos();
+
+ // Vector storage, free the vector
+ let _ = rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off);
+ }
+ } else if kind == KIND_ARC {
+ unsafe { release_shared(self.data) };
+ }
+ }
+}
+
+impl Buf for BytesMut {
+ #[inline]
+ fn remaining(&self) -> usize {
+ self.len()
+ }
+
+ #[inline]
+ fn chunk(&self) -> &[u8] {
+ self.as_slice()
+ }
+
+ #[inline]
+ fn advance(&mut self, cnt: usize) {
+ assert!(
+ cnt <= self.remaining(),
+ "cannot advance past `remaining`: {:?} <= {:?}",
+ cnt,
+ self.remaining(),
+ );
+ unsafe {
+ self.set_start(cnt);
+ }
+ }
+
+ fn copy_to_bytes(&mut self, len: usize) -> Bytes {
+ self.split_to(len).freeze()
+ }
+}
+
+unsafe impl BufMut for BytesMut {
+ #[inline]
+ fn remaining_mut(&self) -> usize {
+ usize::MAX - self.len()
+ }
+
+ #[inline]
+ unsafe fn advance_mut(&mut self, cnt: usize) {
+ let remaining = self.cap - self.len();
+ if cnt > remaining {
+ super::panic_advance(cnt, remaining);
+ }
+ // Addition won't overflow since it is at most `self.cap`.
+ self.len = self.len() + cnt;
+ }
+
+ #[inline]
+ fn chunk_mut(&mut self) -> &mut UninitSlice {
+ if self.capacity() == self.len() {
+ self.reserve(64);
+ }
+ self.spare_capacity_mut().into()
+ }
+
+ // Specialize these methods so they can skip checking `remaining_mut`
+ // and `advance_mut`.
+
+ fn put<T: Buf>(&mut self, mut src: T)
+ where
+ Self: Sized,
+ {
+ while src.has_remaining() {
+ let s = src.chunk();
+ let l = s.len();
+ self.extend_from_slice(s);
+ src.advance(l);
+ }
+ }
+
+ fn put_slice(&mut self, src: &[u8]) {
+ self.extend_from_slice(src);
+ }
+
+ fn put_bytes(&mut self, val: u8, cnt: usize) {
+ self.reserve(cnt);
+ unsafe {
+ let dst = self.spare_capacity_mut();
+ // Reserved above
+ debug_assert!(dst.len() >= cnt);
+
+ ptr::write_bytes(dst.as_mut_ptr(), val, cnt);
+
+ self.advance_mut(cnt);
+ }
+ }
+}
+
+impl AsRef<[u8]> for BytesMut {
+ #[inline]
+ fn as_ref(&self) -> &[u8] {
+ self.as_slice()
+ }
+}
+
+impl Deref for BytesMut {
+ type Target = [u8];
+
+ #[inline]
+ fn deref(&self) -> &[u8] {
+ self.as_ref()
+ }
+}
+
+impl AsMut<[u8]> for BytesMut {
+ #[inline]
+ fn as_mut(&mut self) -> &mut [u8] {
+ self.as_slice_mut()
+ }
+}
+
+impl DerefMut for BytesMut {
+ #[inline]
+ fn deref_mut(&mut self) -> &mut [u8] {
+ self.as_mut()
+ }
+}
+
+impl<'a> From<&'a [u8]> for BytesMut {
+ fn from(src: &'a [u8]) -> BytesMut {
+ BytesMut::from_vec(src.to_vec())
+ }
+}
+
+impl<'a> From<&'a str> for BytesMut {
+ fn from(src: &'a str) -> BytesMut {
+ BytesMut::from(src.as_bytes())
+ }
+}
+
+impl From<BytesMut> for Bytes {
+ fn from(src: BytesMut) -> Bytes {
+ src.freeze()
+ }
+}
+
+impl PartialEq for BytesMut {
+ fn eq(&self, other: &BytesMut) -> bool {
+ self.as_slice() == other.as_slice()
+ }
+}
+
+impl PartialOrd for BytesMut {
+ fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
+ self.as_slice().partial_cmp(other.as_slice())
+ }
+}
+
+impl Ord for BytesMut {
+ fn cmp(&self, other: &BytesMut) -> cmp::Ordering {
+ self.as_slice().cmp(other.as_slice())
+ }
+}
+
+impl Eq for BytesMut {}
+
+impl Default for BytesMut {
+ #[inline]
+ fn default() -> BytesMut {
+ BytesMut::new()
+ }
+}
+
+impl hash::Hash for BytesMut {
+ fn hash<H>(&self, state: &mut H)
+ where
+ H: hash::Hasher,
+ {
+ let s: &[u8] = self.as_ref();
+ s.hash(state);
+ }
+}
+
+impl Borrow<[u8]> for BytesMut {
+ fn borrow(&self) -> &[u8] {
+ self.as_ref()
+ }
+}
+
+impl BorrowMut<[u8]> for BytesMut {
+ fn borrow_mut(&mut self) -> &mut [u8] {
+ self.as_mut()
+ }
+}
+
+impl fmt::Write for BytesMut {
+ #[inline]
+ fn write_str(&mut self, s: &str) -> fmt::Result {
+ if self.remaining_mut() >= s.len() {
+ self.put_slice(s.as_bytes());
+ Ok(())
+ } else {
+ Err(fmt::Error)
+ }
+ }
+
+ #[inline]
+ fn write_fmt(&mut self, args: fmt::Arguments<'_>) -> fmt::Result {
+ fmt::write(self, args)
+ }
+}
+
+impl Clone for BytesMut {
+ fn clone(&self) -> BytesMut {
+ BytesMut::from(&self[..])
+ }
+}
+
+impl IntoIterator for BytesMut {
+ type Item = u8;
+ type IntoIter = IntoIter<BytesMut>;
+
+ fn into_iter(self) -> Self::IntoIter {
+ IntoIter::new(self)
+ }
+}
+
+impl<'a> IntoIterator for &'a BytesMut {
+ type Item = &'a u8;
+ type IntoIter = core::slice::Iter<'a, u8>;
+
+ fn into_iter(self) -> Self::IntoIter {
+ self.as_ref().iter()
+ }
+}
+
+impl Extend<u8> for BytesMut {
+ fn extend<T>(&mut self, iter: T)
+ where
+ T: IntoIterator<Item = u8>,
+ {
+ let iter = iter.into_iter();
+
+ let (lower, _) = iter.size_hint();
+ self.reserve(lower);
+
+ // TODO: optimize
+ // 1. If self.kind() == KIND_VEC, use Vec::extend
+ // 2. Make `reserve` inline-able
+ for b in iter {
+ self.reserve(1);
+ self.put_u8(b);
+ }
+ }
+}
+
+impl<'a> Extend<&'a u8> for BytesMut {
+ fn extend<T>(&mut self, iter: T)
+ where
+ T: IntoIterator<Item = &'a u8>,
+ {
+ self.extend(iter.into_iter().copied())
+ }
+}
+
+impl Extend<Bytes> for BytesMut {
+ fn extend<T>(&mut self, iter: T)
+ where
+ T: IntoIterator<Item = Bytes>,
+ {
+ for bytes in iter {
+ self.extend_from_slice(&bytes)
+ }
+ }
+}
+
+impl FromIterator<u8> for BytesMut {
+ fn from_iter<T: IntoIterator<Item = u8>>(into_iter: T) -> Self {
+ BytesMut::from_vec(Vec::from_iter(into_iter))
+ }
+}
+
+impl<'a> FromIterator<&'a u8> for BytesMut {
+ fn from_iter<T: IntoIterator<Item = &'a u8>>(into_iter: T) -> Self {
+ BytesMut::from_iter(into_iter.into_iter().copied())
+ }
+}
+
+/*
+ *
+ * ===== Inner =====
+ *
+ */
+
+unsafe fn increment_shared(ptr: *mut Shared) {
+ let old_size = (*ptr).ref_count.fetch_add(1, Ordering::Relaxed);
+
+ if old_size > isize::MAX as usize {
+ crate::abort();
+ }
+}
+
+unsafe fn release_shared(ptr: *mut Shared) {
+ // `Shared` storage... follow the drop steps from Arc.
+ if (*ptr).ref_count.fetch_sub(1, Ordering::Release) != 1 {
+ return;
+ }
+
+ // This fence is needed to prevent reordering of use of the data and
+ // deletion of the data. Because it is marked `Release`, the decreasing
+ // of the reference count synchronizes with this `Acquire` fence. This
+ // means that use of the data happens before decreasing the reference
+ // count, which happens before this fence, which happens before the
+ // deletion of the data.
+ //
+ // As explained in the [Boost documentation][1],
+ //
+ // > It is important to enforce any possible access to the object in one
+ // > thread (through an existing reference) to *happen before* deleting
+ // > the object in a different thread. This is achieved by a "release"
+ // > operation after dropping a reference (any access to the object
+ // > through this reference must obviously happened before), and an
+ // > "acquire" operation before deleting the object.
+ //
+ // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
+ //
+ // Thread sanitizer does not support atomic fences. Use an atomic load
+ // instead.
+ (*ptr).ref_count.load(Ordering::Acquire);
+
+ // Drop the data
+ drop(Box::from_raw(ptr));
+}
+
+impl Shared {
+ fn is_unique(&self) -> bool {
+ // The goal is to check if the current handle is the only handle
+ // that currently has access to the buffer. This is done by
+ // checking if the `ref_count` is currently 1.
+ //
+ // The `Acquire` ordering synchronizes with the `Release` as
+ // part of the `fetch_sub` in `release_shared`. The `fetch_sub`
+ // operation guarantees that any mutations done in other threads
+ // are ordered before the `ref_count` is decremented. As such,
+ // this `Acquire` will guarantee that those mutations are
+ // visible to the current thread.
+ self.ref_count.load(Ordering::Acquire) == 1
+ }
+}
+
+#[inline]
+fn original_capacity_to_repr(cap: usize) -> usize {
+ let width = PTR_WIDTH - ((cap >> MIN_ORIGINAL_CAPACITY_WIDTH).leading_zeros() as usize);
+ cmp::min(
+ width,
+ MAX_ORIGINAL_CAPACITY_WIDTH - MIN_ORIGINAL_CAPACITY_WIDTH,
+ )
+}
+
+fn original_capacity_from_repr(repr: usize) -> usize {
+ if repr == 0 {
+ return 0;
+ }
+
+ 1 << (repr + (MIN_ORIGINAL_CAPACITY_WIDTH - 1))
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_original_capacity_to_repr() {
+ assert_eq!(original_capacity_to_repr(0), 0);
+
+ let max_width = 32;
+
+ for width in 1..(max_width + 1) {
+ let cap = 1 << width - 1;
+
+ let expected = if width < MIN_ORIGINAL_CAPACITY_WIDTH {
+ 0
+ } else if width < MAX_ORIGINAL_CAPACITY_WIDTH {
+ width - MIN_ORIGINAL_CAPACITY_WIDTH
+ } else {
+ MAX_ORIGINAL_CAPACITY_WIDTH - MIN_ORIGINAL_CAPACITY_WIDTH
+ };
+
+ assert_eq!(original_capacity_to_repr(cap), expected);
+
+ if width > 1 {
+ assert_eq!(original_capacity_to_repr(cap + 1), expected);
+ }
+
+ // MIN_ORIGINAL_CAPACITY_WIDTH must be bigger than 7 to pass tests below
+ if width == MIN_ORIGINAL_CAPACITY_WIDTH + 1 {
+ assert_eq!(original_capacity_to_repr(cap - 24), expected - 1);
+ assert_eq!(original_capacity_to_repr(cap + 76), expected);
+ } else if width == MIN_ORIGINAL_CAPACITY_WIDTH + 2 {
+ assert_eq!(original_capacity_to_repr(cap - 1), expected - 1);
+ assert_eq!(original_capacity_to_repr(cap - 48), expected - 1);
+ }
+ }
+ }
+
+ #[test]
+ fn test_original_capacity_from_repr() {
+ assert_eq!(0, original_capacity_from_repr(0));
+
+ let min_cap = 1 << MIN_ORIGINAL_CAPACITY_WIDTH;
+
+ assert_eq!(min_cap, original_capacity_from_repr(1));
+ assert_eq!(min_cap * 2, original_capacity_from_repr(2));
+ assert_eq!(min_cap * 4, original_capacity_from_repr(3));
+ assert_eq!(min_cap * 8, original_capacity_from_repr(4));
+ assert_eq!(min_cap * 16, original_capacity_from_repr(5));
+ assert_eq!(min_cap * 32, original_capacity_from_repr(6));
+ assert_eq!(min_cap * 64, original_capacity_from_repr(7));
+ }
+}
+
+unsafe impl Send for BytesMut {}
+unsafe impl Sync for BytesMut {}
+
+/*
+ *
+ * ===== PartialEq / PartialOrd =====
+ *
+ */
+
+impl PartialEq<[u8]> for BytesMut {
+ fn eq(&self, other: &[u8]) -> bool {
+ &**self == other
+ }
+}
+
+impl PartialOrd<[u8]> for BytesMut {
+ fn partial_cmp(&self, other: &[u8]) -> Option<cmp::Ordering> {
+ (**self).partial_cmp(other)
+ }
+}
+
+impl PartialEq<BytesMut> for [u8] {
+ fn eq(&self, other: &BytesMut) -> bool {
+ *other == *self
+ }
+}
+
+impl PartialOrd<BytesMut> for [u8] {
+ fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
+ <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other)
+ }
+}
+
+impl PartialEq<str> for BytesMut {
+ fn eq(&self, other: &str) -> bool {
+ &**self == other.as_bytes()
+ }
+}
+
+impl PartialOrd<str> for BytesMut {
+ fn partial_cmp(&self, other: &str) -> Option<cmp::Ordering> {
+ (**self).partial_cmp(other.as_bytes())
+ }
+}
+
+impl PartialEq<BytesMut> for str {
+ fn eq(&self, other: &BytesMut) -> bool {
+ *other == *self
+ }
+}
+
+impl PartialOrd<BytesMut> for str {
+ fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
+ <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other)
+ }
+}
+
+impl PartialEq<Vec<u8>> for BytesMut {
+ fn eq(&self, other: &Vec<u8>) -> bool {
+ *self == other[..]
+ }
+}
+
+impl PartialOrd<Vec<u8>> for BytesMut {
+ fn partial_cmp(&self, other: &Vec<u8>) -> Option<cmp::Ordering> {
+ (**self).partial_cmp(&other[..])
+ }
+}
+
+impl PartialEq<BytesMut> for Vec<u8> {
+ fn eq(&self, other: &BytesMut) -> bool {
+ *other == *self
+ }
+}
+
+impl PartialOrd<BytesMut> for Vec<u8> {
+ fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
+ other.partial_cmp(self)
+ }
+}
+
+impl PartialEq<String> for BytesMut {
+ fn eq(&self, other: &String) -> bool {
+ *self == other[..]
+ }
+}
+
+impl PartialOrd<String> for BytesMut {
+ fn partial_cmp(&self, other: &String) -> Option<cmp::Ordering> {
+ (**self).partial_cmp(other.as_bytes())
+ }
+}
+
+impl PartialEq<BytesMut> for String {
+ fn eq(&self, other: &BytesMut) -> bool {
+ *other == *self
+ }
+}
+
+impl PartialOrd<BytesMut> for String {
+ fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
+ <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other)
+ }
+}
+
+impl<'a, T: ?Sized> PartialEq<&'a T> for BytesMut
+where
+ BytesMut: PartialEq<T>,
+{
+ fn eq(&self, other: &&'a T) -> bool {
+ *self == **other
+ }
+}
+
+impl<'a, T: ?Sized> PartialOrd<&'a T> for BytesMut
+where
+ BytesMut: PartialOrd<T>,
+{
+ fn partial_cmp(&self, other: &&'a T) -> Option<cmp::Ordering> {
+ self.partial_cmp(*other)
+ }
+}
+
+impl PartialEq<BytesMut> for &[u8] {
+ fn eq(&self, other: &BytesMut) -> bool {
+ *other == *self
+ }
+}
+
+impl PartialOrd<BytesMut> for &[u8] {
+ fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
+ <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other)
+ }
+}
+
+impl PartialEq<BytesMut> for &str {
+ fn eq(&self, other: &BytesMut) -> bool {
+ *other == *self
+ }
+}
+
+impl PartialOrd<BytesMut> for &str {
+ fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
+ other.partial_cmp(self)
+ }
+}
+
+impl PartialEq<BytesMut> for Bytes {
+ fn eq(&self, other: &BytesMut) -> bool {
+ other[..] == self[..]
+ }
+}
+
+impl PartialEq<Bytes> for BytesMut {
+ fn eq(&self, other: &Bytes) -> bool {
+ other[..] == self[..]
+ }
+}
+
+impl From<BytesMut> for Vec<u8> {
+ fn from(bytes: BytesMut) -> Self {
+ let kind = bytes.kind();
+
+ let mut vec = if kind == KIND_VEC {
+ unsafe {
+ let off = bytes.get_vec_pos();
+ rebuild_vec(bytes.ptr.as_ptr(), bytes.len, bytes.cap, off)
+ }
+ } else {
+ let shared = bytes.data as *mut Shared;
+
+ if unsafe { (*shared).is_unique() } {
+ let vec = mem::replace(unsafe { &mut (*shared).vec }, Vec::new());
+
+ unsafe { release_shared(shared) };
+
+ vec
+ } else {
+ return bytes.deref().to_vec();
+ }
+ };
+
+ let len = bytes.len;
+
+ unsafe {
+ ptr::copy(bytes.ptr.as_ptr(), vec.as_mut_ptr(), len);
+ vec.set_len(len);
+ }
+
+ mem::forget(bytes);
+
+ vec
+ }
+}
+
+#[inline]
+fn vptr(ptr: *mut u8) -> NonNull<u8> {
+ if cfg!(debug_assertions) {
+ NonNull::new(ptr).expect("Vec pointer should be non-null")
+ } else {
+ unsafe { NonNull::new_unchecked(ptr) }
+ }
+}
+
+/// Returns a dangling pointer with the given address. This is used to store
+/// integer data in pointer fields.
+///
+/// It is equivalent to `addr as *mut T`, but this fails on miri when strict
+/// provenance checking is enabled.
+#[inline]
+fn invalid_ptr<T>(addr: usize) -> *mut T {
+ let ptr = core::ptr::null_mut::<u8>().wrapping_add(addr);
+ debug_assert_eq!(ptr as usize, addr);
+ ptr.cast::<T>()
+}
+
+/// Precondition: dst >= original
+///
+/// The following line is equivalent to:
+///
+/// ```rust,ignore
+/// self.ptr.as_ptr().offset_from(ptr) as usize;
+/// ```
+///
+/// But due to min rust is 1.39 and it is only stabilized
+/// in 1.47, we cannot use it.
+#[inline]
+fn offset_from(dst: *mut u8, original: *mut u8) -> usize {
+ debug_assert!(dst >= original);
+
+ dst as usize - original as usize
+}
+
+unsafe fn rebuild_vec(ptr: *mut u8, mut len: usize, mut cap: usize, off: usize) -> Vec<u8> {
+ let ptr = ptr.sub(off);
+ len += off;
+ cap += off;
+
+ Vec::from_raw_parts(ptr, len, cap)
+}
+
+// ===== impl SharedVtable =====
+
+static SHARED_VTABLE: Vtable = Vtable {
+ clone: shared_v_clone,
+ to_vec: shared_v_to_vec,
+ is_unique: crate::bytes::shared_is_unique,
+ drop: shared_v_drop,
+};
+
+unsafe fn shared_v_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
+ let shared = data.load(Ordering::Relaxed) as *mut Shared;
+ increment_shared(shared);
+
+ let data = AtomicPtr::new(shared as *mut ());
+ Bytes::with_vtable(ptr, len, data, &SHARED_VTABLE)
+}
+
+unsafe fn shared_v_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
+ let shared: *mut Shared = data.load(Ordering::Relaxed).cast();
+
+ if (*shared).is_unique() {
+ let shared = &mut *shared;
+
+ // Drop shared
+ let mut vec = mem::replace(&mut shared.vec, Vec::new());
+ release_shared(shared);
+
+ // Copy back buffer
+ ptr::copy(ptr, vec.as_mut_ptr(), len);
+ vec.set_len(len);
+
+ vec
+ } else {
+ let v = slice::from_raw_parts(ptr, len).to_vec();
+ release_shared(shared);
+ v
+ }
+}
+
+unsafe fn shared_v_drop(data: &mut AtomicPtr<()>, _ptr: *const u8, _len: usize) {
+ data.with_mut(|shared| {
+ release_shared(*shared as *mut Shared);
+ });
+}
+
+// compile-fails
+
+/// ```compile_fail
+/// use bytes::BytesMut;
+/// #[deny(unused_must_use)]
+/// {
+/// let mut b1 = BytesMut::from("hello world");
+/// b1.split_to(6);
+/// }
+/// ```
+fn _split_to_must_use() {}
+
+/// ```compile_fail
+/// use bytes::BytesMut;
+/// #[deny(unused_must_use)]
+/// {
+/// let mut b1 = BytesMut::from("hello world");
+/// b1.split_off(6);
+/// }
+/// ```
+fn _split_off_must_use() {}
+
+/// ```compile_fail
+/// use bytes::BytesMut;
+/// #[deny(unused_must_use)]
+/// {
+/// let mut b1 = BytesMut::from("hello world");
+/// b1.split();
+/// }
+/// ```
+fn _split_must_use() {}
+
+// fuzz tests
+#[cfg(all(test, loom))]
+mod fuzz {
+ use loom::sync::Arc;
+ use loom::thread;
+
+ use super::BytesMut;
+ use crate::Bytes;
+
+ #[test]
+ fn bytes_mut_cloning_frozen() {
+ loom::model(|| {
+ let a = BytesMut::from(&b"abcdefgh"[..]).split().freeze();
+ let addr = a.as_ptr() as usize;
+
+ // test the Bytes::clone is Sync by putting it in an Arc
+ let a1 = Arc::new(a);
+ let a2 = a1.clone();
+
+ let t1 = thread::spawn(move || {
+ let b: Bytes = (*a1).clone();
+ assert_eq!(b.as_ptr() as usize, addr);
+ });
+
+ let t2 = thread::spawn(move || {
+ let b: Bytes = (*a2).clone();
+ assert_eq!(b.as_ptr() as usize, addr);
+ });
+
+ t1.join().unwrap();
+ t2.join().unwrap();
+ });
+ }
+}
+
1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +
use core::fmt::{Debug, Formatter, Result};
+
+use super::BytesRef;
+use crate::{Bytes, BytesMut};
+
+/// Alternative implementation of `std::fmt::Debug` for byte slice.
+///
+/// Standard `Debug` implementation for `[u8]` is comma separated
+/// list of numbers. Since large amount of byte strings are in fact
+/// ASCII strings or contain a lot of ASCII strings (e. g. HTTP),
+/// it is convenient to print strings as ASCII when possible.
+impl Debug for BytesRef<'_> {
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result {
+ write!(f, "b\"")?;
+ for &b in self.0 {
+ // https://doc.rust-lang.org/reference/tokens.html#byte-escapes
+ if b == b'\n' {
+ write!(f, "\\n")?;
+ } else if b == b'\r' {
+ write!(f, "\\r")?;
+ } else if b == b'\t' {
+ write!(f, "\\t")?;
+ } else if b == b'\\' || b == b'"' {
+ write!(f, "\\{}", b as char)?;
+ } else if b == b'\0' {
+ write!(f, "\\0")?;
+ // ASCII printable
+ } else if (0x20..0x7f).contains(&b) {
+ write!(f, "{}", b as char)?;
+ } else {
+ write!(f, "\\x{:02x}", b)?;
+ }
+ }
+ write!(f, "\"")?;
+ Ok(())
+ }
+}
+
+impl Debug for Bytes {
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result {
+ Debug::fmt(&BytesRef(self.as_ref()), f)
+ }
+}
+
+impl Debug for BytesMut {
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result {
+ Debug::fmt(&BytesRef(self.as_ref()), f)
+ }
+}
+
1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +
use core::fmt::{Formatter, LowerHex, Result, UpperHex};
+
+use super::BytesRef;
+use crate::{Bytes, BytesMut};
+
+impl LowerHex for BytesRef<'_> {
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result {
+ for &b in self.0 {
+ write!(f, "{:02x}", b)?;
+ }
+ Ok(())
+ }
+}
+
+impl UpperHex for BytesRef<'_> {
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result {
+ for &b in self.0 {
+ write!(f, "{:02X}", b)?;
+ }
+ Ok(())
+ }
+}
+
+macro_rules! hex_impl {
+ ($tr:ident, $ty:ty) => {
+ impl $tr for $ty {
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result {
+ $tr::fmt(&BytesRef(self.as_ref()), f)
+ }
+ }
+ };
+}
+
+hex_impl!(LowerHex, Bytes);
+hex_impl!(LowerHex, BytesMut);
+hex_impl!(UpperHex, Bytes);
+hex_impl!(UpperHex, BytesMut);
+
1 +2 +3 +4 +5 +
mod debug;
+mod hex;
+
+/// `BytesRef` is not a part of public API of bytes crate.
+struct BytesRef<'a>(&'a [u8]);
+
1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 +70 +71 +72 +73 +74 +75 +76 +77 +78 +79 +80 +81 +82 +83 +84 +85 +86 +87 +88 +89 +90 +91 +92 +93 +94 +95 +96 +97 +98 +99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +
#![warn(missing_docs, missing_debug_implementations, rust_2018_idioms)]
+#![doc(test(
+ no_crate_inject,
+ attr(deny(warnings, rust_2018_idioms), allow(dead_code, unused_variables))
+))]
+#![no_std]
+#![cfg_attr(docsrs, feature(doc_cfg))]
+
+//! Provides abstractions for working with bytes.
+//!
+//! The `bytes` crate provides an efficient byte buffer structure
+//! ([`Bytes`]) and traits for working with buffer
+//! implementations ([`Buf`], [`BufMut`]).
+//!
+//! # `Bytes`
+//!
+//! `Bytes` is an efficient container for storing and operating on contiguous
+//! slices of memory. It is intended for use primarily in networking code, but
+//! could have applications elsewhere as well.
+//!
+//! `Bytes` values facilitate zero-copy network programming by allowing multiple
+//! `Bytes` objects to point to the same underlying memory. This is managed by
+//! using a reference count to track when the memory is no longer needed and can
+//! be freed.
+//!
+//! A `Bytes` handle can be created directly from an existing byte store (such as `&[u8]`
+//! or `Vec<u8>`), but usually a `BytesMut` is used first and written to. For
+//! example:
+//!
+//! ```rust
+//! use bytes::{BytesMut, BufMut};
+//!
+//! let mut buf = BytesMut::with_capacity(1024);
+//! buf.put(&b"hello world"[..]);
+//! buf.put_u16(1234);
+//!
+//! let a = buf.split();
+//! assert_eq!(a, b"hello world\x04\xD2"[..]);
+//!
+//! buf.put(&b"goodbye world"[..]);
+//!
+//! let b = buf.split();
+//! assert_eq!(b, b"goodbye world"[..]);
+//!
+//! assert_eq!(buf.capacity(), 998);
+//! ```
+//!
+//! In the above example, only a single buffer of 1024 is allocated. The handles
+//! `a` and `b` will share the underlying buffer and maintain indices tracking
+//! the view into the buffer represented by the handle.
+//!
+//! See the [struct docs](`Bytes`) for more details.
+//!
+//! # `Buf`, `BufMut`
+//!
+//! These two traits provide read and write access to buffers. The underlying
+//! storage may or may not be in contiguous memory. For example, `Bytes` is a
+//! buffer that guarantees contiguous memory, but a [rope] stores the bytes in
+//! disjoint chunks. `Buf` and `BufMut` maintain cursors tracking the current
+//! position in the underlying byte storage. When bytes are read or written, the
+//! cursor is advanced.
+//!
+//! [rope]: https://en.wikipedia.org/wiki/Rope_(data_structure)
+//!
+//! ## Relation with `Read` and `Write`
+//!
+//! At first glance, it may seem that `Buf` and `BufMut` overlap in
+//! functionality with [`std::io::Read`] and [`std::io::Write`]. However, they
+//! serve different purposes. A buffer is the value that is provided as an
+//! argument to `Read::read` and `Write::write`. `Read` and `Write` may then
+//! perform a syscall, which has the potential of failing. Operations on `Buf`
+//! and `BufMut` are infallible.
+
+extern crate alloc;
+
+#[cfg(feature = "std")]
+extern crate std;
+
+pub mod buf;
+pub use crate::buf::{Buf, BufMut};
+
+mod bytes;
+mod bytes_mut;
+mod fmt;
+mod loom;
+pub use crate::bytes::Bytes;
+pub use crate::bytes_mut::BytesMut;
+
+// Optional Serde support
+#[cfg(feature = "serde")]
+mod serde;
+
+#[inline(never)]
+#[cold]
+fn abort() -> ! {
+ #[cfg(feature = "std")]
+ {
+ std::process::abort();
+ }
+
+ #[cfg(not(feature = "std"))]
+ {
+ struct Abort;
+ impl Drop for Abort {
+ fn drop(&mut self) {
+ panic!();
+ }
+ }
+ let _a = Abort;
+ panic!("abort");
+ }
+}
+
+#[inline(always)]
+#[cfg(feature = "std")]
+fn saturating_sub_usize_u64(a: usize, b: u64) -> usize {
+ use core::convert::TryFrom;
+ match usize::try_from(b) {
+ Ok(b) => a.saturating_sub(b),
+ Err(_) => 0,
+ }
+}
+
+#[inline(always)]
+#[cfg(feature = "std")]
+fn min_u64_usize(a: u64, b: usize) -> usize {
+ use core::convert::TryFrom;
+ match usize::try_from(a) {
+ Ok(a) => usize::min(a, b),
+ Err(_) => b,
+ }
+}
+
+/// Panic with a nice error message.
+#[cold]
+fn panic_advance(idx: usize, len: usize) -> ! {
+ panic!(
+ "advance out of bounds: the len is {} but advancing by {}",
+ len, idx
+ );
+}
+
+#[cold]
+fn panic_does_not_fit(size: usize, nbytes: usize) -> ! {
+ panic!(
+ "size too large: the integer type can fit {} bytes, but nbytes is {}",
+ size, nbytes
+ );
+}
+
1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +
#[cfg(not(all(test, loom)))]
+pub(crate) mod sync {
+ pub(crate) mod atomic {
+ pub(crate) use core::sync::atomic::{AtomicPtr, AtomicUsize, Ordering};
+
+ pub(crate) trait AtomicMut<T> {
+ fn with_mut<F, R>(&mut self, f: F) -> R
+ where
+ F: FnOnce(&mut *mut T) -> R;
+ }
+
+ impl<T> AtomicMut<T> for AtomicPtr<T> {
+ fn with_mut<F, R>(&mut self, f: F) -> R
+ where
+ F: FnOnce(&mut *mut T) -> R,
+ {
+ f(self.get_mut())
+ }
+ }
+ }
+}
+
+#[cfg(all(test, loom))]
+pub(crate) mod sync {
+ pub(crate) mod atomic {
+ pub(crate) use loom::sync::atomic::{AtomicPtr, AtomicUsize, Ordering};
+
+ pub(crate) trait AtomicMut<T> {}
+ }
+}
+
1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 +70 +71 +72 +73 +74 +75 +76 +77 +78 +79 +80 +81 +82 +83 +84 +85 +86 +87 +88 +89 +
use super::{Bytes, BytesMut};
+use alloc::string::String;
+use alloc::vec::Vec;
+use core::{cmp, fmt};
+use serde::{de, Deserialize, Deserializer, Serialize, Serializer};
+
+macro_rules! serde_impl {
+ ($ty:ident, $visitor_ty:ident, $from_slice:ident, $from_vec:ident) => {
+ impl Serialize for $ty {
+ #[inline]
+ fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
+ where
+ S: Serializer,
+ {
+ serializer.serialize_bytes(&self)
+ }
+ }
+
+ struct $visitor_ty;
+
+ impl<'de> de::Visitor<'de> for $visitor_ty {
+ type Value = $ty;
+
+ fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
+ formatter.write_str("byte array")
+ }
+
+ #[inline]
+ fn visit_seq<V>(self, mut seq: V) -> Result<Self::Value, V::Error>
+ where
+ V: de::SeqAccess<'de>,
+ {
+ let len = cmp::min(seq.size_hint().unwrap_or(0), 4096);
+ let mut values: Vec<u8> = Vec::with_capacity(len);
+
+ while let Some(value) = seq.next_element()? {
+ values.push(value);
+ }
+
+ Ok($ty::$from_vec(values))
+ }
+
+ #[inline]
+ fn visit_bytes<E>(self, v: &[u8]) -> Result<Self::Value, E>
+ where
+ E: de::Error,
+ {
+ Ok($ty::$from_slice(v))
+ }
+
+ #[inline]
+ fn visit_byte_buf<E>(self, v: Vec<u8>) -> Result<Self::Value, E>
+ where
+ E: de::Error,
+ {
+ Ok($ty::$from_vec(v))
+ }
+
+ #[inline]
+ fn visit_str<E>(self, v: &str) -> Result<Self::Value, E>
+ where
+ E: de::Error,
+ {
+ Ok($ty::$from_slice(v.as_bytes()))
+ }
+
+ #[inline]
+ fn visit_string<E>(self, v: String) -> Result<Self::Value, E>
+ where
+ E: de::Error,
+ {
+ Ok($ty::$from_vec(v.into_bytes()))
+ }
+ }
+
+ impl<'de> Deserialize<'de> for $ty {
+ #[inline]
+ fn deserialize<D>(deserializer: D) -> Result<$ty, D::Error>
+ where
+ D: Deserializer<'de>,
+ {
+ deserializer.deserialize_byte_buf($visitor_ty)
+ }
+ }
+ };
+}
+
+serde_impl!(Bytes, BytesVisitor, copy_from_slice, from);
+serde_impl!(BytesMut, BytesMutVisitor, from, from_vec);
+
fn:
) to \
+ restrict the search to a given item kind.",
+ "Accepted kinds are: fn
, mod
, struct
, \
+ enum
, trait
, type
, macro
, \
+ and const
.",
+ "Search functions by type signature (e.g., vec -> usize
or \
+ -> vec
)",
+ "Search multiple things at once by splitting your query with comma (e.g., \
+ str,u8
or String,struct:Vec,test
)",
+ "You can look for items with an exact name by putting double quotes around \
+ your request: \"string\"
",
+ "Look for items inside another one by searching for a path: vec::Vec
",
+ ].map(x => "" + x + "
").join(""); + const div_infos = document.createElement("div"); + addClass(div_infos, "infos"); + div_infos.innerHTML = "