From c5adc1eef500a1ec50c4f333d140249a5e83e6db Mon Sep 17 00:00:00 2001 From: Daniel Eades Date: Mon, 22 Dec 2025 08:29:40 +0000 Subject: [PATCH] style: address some clippy warnings --- benches/buf.rs | 12 +++---- benches/bytes.rs | 20 +++++------ benches/bytes_mut.rs | 22 ++++++------ src/buf/buf_mut.rs | 58 +++++++++++++++--------------- src/buf/chain.rs | 6 ++-- src/buf/iter.rs | 4 +-- src/buf/limit.rs | 2 +- src/buf/reader.rs | 2 +- src/buf/take.rs | 2 +- src/buf/uninit_slice.rs | 30 ++++++++++++---- src/bytes.rs | 76 +++++++++++++++++++-------------------- src/bytes_mut.rs | 80 +++++++++++++++++++++-------------------- src/lib.rs | 2 +- tests/test_bytes.rs | 56 +++++++++++++++-------------- 14 files changed, 196 insertions(+), 176 deletions(-) diff --git a/benches/buf.rs b/benches/buf.rs index 616d18748..69ffdbf4f 100644 --- a/benches/buf.rs +++ b/benches/buf.rs @@ -16,8 +16,8 @@ struct TestBuf { readlen: usize, } impl TestBuf { - fn new(buf: &'static [u8], readlens: &'static [usize], init_pos: usize) -> TestBuf { - let mut buf = TestBuf { + fn new(buf: &'static [u8], readlens: &'static [usize], init_pos: usize) -> Self { + let mut buf = Self { buf, readlens, init_pos, @@ -68,13 +68,13 @@ struct TestBufC { inner: TestBuf, } impl TestBufC { - fn new(buf: &'static [u8], readlens: &'static [usize], init_pos: usize) -> TestBufC { - TestBufC { + fn new(buf: &'static [u8], readlens: &'static [usize], init_pos: usize) -> Self { + Self { inner: TestBuf::new(buf, readlens, init_pos), } } fn reset(&mut self) { - self.inner.reset() + self.inner.reset(); } } impl Buf for TestBufC { @@ -84,7 +84,7 @@ impl Buf for TestBufC { } #[inline(never)] fn advance(&mut self, cnt: usize) { - self.inner.advance(cnt) + self.inner.advance(cnt); } #[inline(never)] fn chunk(&self) -> &[u8] { diff --git a/benches/bytes.rs b/benches/bytes.rs index 8782d0066..4fc5316d7 100644 --- a/benches/bytes.rs +++ b/benches/bytes.rs @@ -14,7 +14,7 @@ fn deref_unique(b: &mut Bencher) { for _ in 0..1024 { test::black_box(&buf[..]); } - }) + }); } #[bench] @@ -26,7 +26,7 @@ fn deref_shared(b: &mut Bencher) { for _ in 0..1024 { test::black_box(&buf[..]); } - }) + }); } #[bench] @@ -37,7 +37,7 @@ fn deref_static(b: &mut Bencher) { for _ in 0..1024 { test::black_box(&buf[..]); } - }) + }); } #[bench] @@ -49,7 +49,7 @@ fn clone_static(b: &mut Bencher) { for _ in 0..1024 { test::black_box(test::black_box(&bytes).clone()); } - }) + }); } #[bench] @@ -60,7 +60,7 @@ fn clone_shared(b: &mut Bencher) { for _ in 0..1024 { test::black_box(test::black_box(&bytes).clone()); } - }) + }); } #[bench] @@ -72,7 +72,7 @@ fn clone_arc_vec(b: &mut Bencher) { for _ in 0..1024 { test::black_box(test::black_box(&bytes).clone()); } - }) + }); } #[bench] @@ -82,7 +82,7 @@ fn from_long_slice(b: &mut Bencher) { b.iter(|| { let buf = Bytes::copy_from_slice(&data[..]); test::black_box(buf); - }) + }); } #[bench] @@ -93,7 +93,7 @@ fn slice_empty(b: &mut Bencher) { for i in 0..1000 { test::black_box(b.slice(i % 100..i % 100)); } - }) + }); } #[bench] @@ -104,7 +104,7 @@ fn slice_short_from_arc(b: &mut Bencher) { for i in 0..1000 { test::black_box(b.slice(1..2 + i % 10)); } - }) + }); } #[bench] @@ -116,5 +116,5 @@ fn split_off_and_drop(b: &mut Bencher) { test::black_box(b.split_off(100)); test::black_box(b); } - }) + }); } diff --git a/benches/bytes_mut.rs b/benches/bytes_mut.rs index b06943621..54725c8ec 100644 --- a/benches/bytes_mut.rs +++ b/benches/bytes_mut.rs @@ -12,21 +12,21 @@ fn alloc_small(b: &mut Bencher) { for _ in 0..1024 { test::black_box(BytesMut::with_capacity(12)); } - }) + }); } #[bench] fn alloc_mid(b: &mut Bencher) { b.iter(|| { test::black_box(BytesMut::with_capacity(128)); - }) + }); } #[bench] fn alloc_big(b: &mut Bencher) { b.iter(|| { test::black_box(BytesMut::with_capacity(4096)); - }) + }); } #[bench] @@ -38,7 +38,7 @@ fn deref_unique(b: &mut Bencher) { for _ in 0..1024 { test::black_box(&buf[..]); } - }) + }); } #[bench] @@ -57,7 +57,7 @@ fn deref_unique_unroll(b: &mut Bencher) { test::black_box(&buf[..]); test::black_box(&buf[..]); } - }) + }); } #[bench] @@ -70,7 +70,7 @@ fn deref_shared(b: &mut Bencher) { for _ in 0..1024 { test::black_box(&buf[..]); } - }) + }); } #[bench] @@ -86,7 +86,7 @@ fn deref_two(b: &mut Bencher) { test::black_box(&buf1[..]); test::black_box(&buf2[..]); } - }) + }); } #[bench] @@ -99,7 +99,7 @@ fn clone_frozen(b: &mut Bencher) { for _ in 0..1024 { test::black_box(&bytes.clone()); } - }) + }); } #[bench] @@ -108,7 +108,7 @@ fn alloc_write_split_to_mid(b: &mut Bencher) { let mut buf = BytesMut::with_capacity(128); buf.put_slice(&[0u8; 64]); test::black_box(buf.split_to(64)); - }) + }); } #[bench] @@ -125,7 +125,7 @@ fn drain_write_drain(b: &mut Bencher) { } test::black_box(parts); - }) + }); } #[bench] @@ -141,7 +141,7 @@ fn fmt_write(b: &mut Bencher) { unsafe { buf.set_len(0); } - }) + }); } #[bench] diff --git a/src/buf/buf_mut.rs b/src/buf/buf_mut.rs index 3154dfeca..19f9057d6 100644 --- a/src/buf/buf_mut.rs +++ b/src/buf/buf_mut.rs @@ -353,7 +353,7 @@ pub unsafe trait BufMut { #[inline] fn put_i8(&mut self, n: i8) { let src = [n as u8]; - self.put_slice(&src) + self.put_slice(&src); } /// Writes an unsigned 16 bit integer to `self` in big-endian byte order. @@ -376,7 +376,7 @@ pub unsafe trait BufMut { /// `self`. #[inline] fn put_u16(&mut self, n: u16) { - self.put_slice(&n.to_be_bytes()) + self.put_slice(&n.to_be_bytes()); } /// Writes an unsigned 16 bit integer to `self` in little-endian byte order. @@ -399,7 +399,7 @@ pub unsafe trait BufMut { /// `self`. #[inline] fn put_u16_le(&mut self, n: u16) { - self.put_slice(&n.to_le_bytes()) + self.put_slice(&n.to_le_bytes()); } /// Writes an unsigned 16 bit integer to `self` in native-endian byte order. @@ -426,7 +426,7 @@ pub unsafe trait BufMut { /// `self`. #[inline] fn put_u16_ne(&mut self, n: u16) { - self.put_slice(&n.to_ne_bytes()) + self.put_slice(&n.to_ne_bytes()); } /// Writes a signed 16 bit integer to `self` in big-endian byte order. @@ -449,7 +449,7 @@ pub unsafe trait BufMut { /// `self`. #[inline] fn put_i16(&mut self, n: i16) { - self.put_slice(&n.to_be_bytes()) + self.put_slice(&n.to_be_bytes()); } /// Writes a signed 16 bit integer to `self` in little-endian byte order. @@ -472,7 +472,7 @@ pub unsafe trait BufMut { /// `self`. #[inline] fn put_i16_le(&mut self, n: i16) { - self.put_slice(&n.to_le_bytes()) + self.put_slice(&n.to_le_bytes()); } /// Writes a signed 16 bit integer to `self` in native-endian byte order. @@ -499,7 +499,7 @@ pub unsafe trait BufMut { /// `self`. #[inline] fn put_i16_ne(&mut self, n: i16) { - self.put_slice(&n.to_ne_bytes()) + self.put_slice(&n.to_ne_bytes()); } /// Writes an unsigned 32 bit integer to `self` in big-endian byte order. @@ -522,7 +522,7 @@ pub unsafe trait BufMut { /// `self`. #[inline] fn put_u32(&mut self, n: u32) { - self.put_slice(&n.to_be_bytes()) + self.put_slice(&n.to_be_bytes()); } /// Writes an unsigned 32 bit integer to `self` in little-endian byte order. @@ -545,7 +545,7 @@ pub unsafe trait BufMut { /// `self`. #[inline] fn put_u32_le(&mut self, n: u32) { - self.put_slice(&n.to_le_bytes()) + self.put_slice(&n.to_le_bytes()); } /// Writes an unsigned 32 bit integer to `self` in native-endian byte order. @@ -572,7 +572,7 @@ pub unsafe trait BufMut { /// `self`. #[inline] fn put_u32_ne(&mut self, n: u32) { - self.put_slice(&n.to_ne_bytes()) + self.put_slice(&n.to_ne_bytes()); } /// Writes a signed 32 bit integer to `self` in big-endian byte order. @@ -595,7 +595,7 @@ pub unsafe trait BufMut { /// `self`. #[inline] fn put_i32(&mut self, n: i32) { - self.put_slice(&n.to_be_bytes()) + self.put_slice(&n.to_be_bytes()); } /// Writes a signed 32 bit integer to `self` in little-endian byte order. @@ -618,7 +618,7 @@ pub unsafe trait BufMut { /// `self`. #[inline] fn put_i32_le(&mut self, n: i32) { - self.put_slice(&n.to_le_bytes()) + self.put_slice(&n.to_le_bytes()); } /// Writes a signed 32 bit integer to `self` in native-endian byte order. @@ -645,7 +645,7 @@ pub unsafe trait BufMut { /// `self`. #[inline] fn put_i32_ne(&mut self, n: i32) { - self.put_slice(&n.to_ne_bytes()) + self.put_slice(&n.to_ne_bytes()); } /// Writes an unsigned 64 bit integer to `self` in the big-endian byte order. @@ -668,7 +668,7 @@ pub unsafe trait BufMut { /// `self`. #[inline] fn put_u64(&mut self, n: u64) { - self.put_slice(&n.to_be_bytes()) + self.put_slice(&n.to_be_bytes()); } /// Writes an unsigned 64 bit integer to `self` in little-endian byte order. @@ -691,7 +691,7 @@ pub unsafe trait BufMut { /// `self`. #[inline] fn put_u64_le(&mut self, n: u64) { - self.put_slice(&n.to_le_bytes()) + self.put_slice(&n.to_le_bytes()); } /// Writes an unsigned 64 bit integer to `self` in native-endian byte order. @@ -718,7 +718,7 @@ pub unsafe trait BufMut { /// `self`. #[inline] fn put_u64_ne(&mut self, n: u64) { - self.put_slice(&n.to_ne_bytes()) + self.put_slice(&n.to_ne_bytes()); } /// Writes a signed 64 bit integer to `self` in the big-endian byte order. @@ -741,7 +741,7 @@ pub unsafe trait BufMut { /// `self`. #[inline] fn put_i64(&mut self, n: i64) { - self.put_slice(&n.to_be_bytes()) + self.put_slice(&n.to_be_bytes()); } /// Writes a signed 64 bit integer to `self` in little-endian byte order. @@ -764,7 +764,7 @@ pub unsafe trait BufMut { /// `self`. #[inline] fn put_i64_le(&mut self, n: i64) { - self.put_slice(&n.to_le_bytes()) + self.put_slice(&n.to_le_bytes()); } /// Writes a signed 64 bit integer to `self` in native-endian byte order. @@ -791,7 +791,7 @@ pub unsafe trait BufMut { /// `self`. #[inline] fn put_i64_ne(&mut self, n: i64) { - self.put_slice(&n.to_ne_bytes()) + self.put_slice(&n.to_ne_bytes()); } /// Writes an unsigned 128 bit integer to `self` in the big-endian byte order. @@ -814,7 +814,7 @@ pub unsafe trait BufMut { /// `self`. #[inline] fn put_u128(&mut self, n: u128) { - self.put_slice(&n.to_be_bytes()) + self.put_slice(&n.to_be_bytes()); } /// Writes an unsigned 128 bit integer to `self` in little-endian byte order. @@ -837,7 +837,7 @@ pub unsafe trait BufMut { /// `self`. #[inline] fn put_u128_le(&mut self, n: u128) { - self.put_slice(&n.to_le_bytes()) + self.put_slice(&n.to_le_bytes()); } /// Writes an unsigned 128 bit integer to `self` in native-endian byte order. @@ -864,7 +864,7 @@ pub unsafe trait BufMut { /// `self`. #[inline] fn put_u128_ne(&mut self, n: u128) { - self.put_slice(&n.to_ne_bytes()) + self.put_slice(&n.to_ne_bytes()); } /// Writes a signed 128 bit integer to `self` in the big-endian byte order. @@ -887,7 +887,7 @@ pub unsafe trait BufMut { /// `self`. #[inline] fn put_i128(&mut self, n: i128) { - self.put_slice(&n.to_be_bytes()) + self.put_slice(&n.to_be_bytes()); } /// Writes a signed 128 bit integer to `self` in little-endian byte order. @@ -910,7 +910,7 @@ pub unsafe trait BufMut { /// `self`. #[inline] fn put_i128_le(&mut self, n: i128) { - self.put_slice(&n.to_le_bytes()) + self.put_slice(&n.to_le_bytes()); } /// Writes a signed 128 bit integer to `self` in native-endian byte order. @@ -937,7 +937,7 @@ pub unsafe trait BufMut { /// `self`. #[inline] fn put_i128_ne(&mut self, n: i128) { - self.put_slice(&n.to_ne_bytes()) + self.put_slice(&n.to_ne_bytes()); } /// Writes an unsigned n-byte integer to `self` in big-endian byte order. @@ -1022,9 +1022,9 @@ pub unsafe trait BufMut { #[inline] fn put_uint_ne(&mut self, n: u64, nbytes: usize) { if cfg!(target_endian = "big") { - self.put_uint(n, nbytes) + self.put_uint(n, nbytes); } else { - self.put_uint_le(n, nbytes) + self.put_uint_le(n, nbytes); } } @@ -1110,9 +1110,9 @@ pub unsafe trait BufMut { #[inline] fn put_int_ne(&mut self, n: i64, nbytes: usize) { if cfg!(target_endian = "big") { - self.put_int(n, nbytes) + self.put_int(n, nbytes); } else { - self.put_int_le(n, nbytes) + self.put_int_le(n, nbytes); } } diff --git a/src/buf/chain.rs b/src/buf/chain.rs index c8bc36de9..5c0d3f11e 100644 --- a/src/buf/chain.rs +++ b/src/buf/chain.rs @@ -34,8 +34,8 @@ pub struct Chain { impl Chain { /// Creates a new `Chain` sequencing the provided values. - pub(crate) fn new(a: T, b: U) -> Chain { - Chain { a, b } + pub(crate) fn new(a: T, b: U) -> Self { + Self { a, b } } /// Gets a reference to the first underlying `Buf`. @@ -232,7 +232,7 @@ where U: Buf, { type Item = u8; - type IntoIter = IntoIter>; + type IntoIter = IntoIter; fn into_iter(self) -> Self::IntoIter { IntoIter::new(self) diff --git a/src/buf/iter.rs b/src/buf/iter.rs index 74f9b991e..b9de74cfc 100644 --- a/src/buf/iter.rs +++ b/src/buf/iter.rs @@ -38,8 +38,8 @@ impl IntoIter { /// assert_eq!(iter.next(), Some(b'c')); /// assert_eq!(iter.next(), None); /// ``` - pub fn new(inner: T) -> IntoIter { - IntoIter { inner } + pub fn new(inner: T) -> Self { + Self { inner } } /// Consumes this `IntoIter`, returning the underlying value. diff --git a/src/buf/limit.rs b/src/buf/limit.rs index b422be538..ecfe3b306 100644 --- a/src/buf/limit.rs +++ b/src/buf/limit.rs @@ -52,7 +52,7 @@ impl Limit { /// If the inner `BufMut` has fewer bytes than `lim` then that is the actual /// number of available bytes. pub fn set_limit(&mut self, lim: usize) { - self.limit = lim + self.limit = lim; } } diff --git a/src/buf/reader.rs b/src/buf/reader.rs index 521494958..343d065be 100644 --- a/src/buf/reader.rs +++ b/src/buf/reader.rs @@ -76,6 +76,6 @@ impl io::BufRead for Reader { Ok(self.buf.chunk()) } fn consume(&mut self, amt: usize) { - self.buf.advance(amt) + self.buf.advance(amt); } } diff --git a/src/buf/take.rs b/src/buf/take.rs index d8621f3f3..3349d52fb 100644 --- a/src/buf/take.rs +++ b/src/buf/take.rs @@ -128,7 +128,7 @@ impl Take { /// assert_eq!(*dst, b"llo"[..]); /// ``` pub fn set_limit(&mut self, lim: usize) { - self.limit = lim + self.limit = lim; } } diff --git a/src/buf/uninit_slice.rs b/src/buf/uninit_slice.rs index aea096ae6..b8cffeba5 100644 --- a/src/buf/uninit_slice.rs +++ b/src/buf/uninit_slice.rs @@ -33,8 +33,8 @@ impl UninitSlice { /// let slice = UninitSlice::new(&mut buffer[..]); /// ``` #[inline] - pub fn new(slice: &mut [u8]) -> &mut UninitSlice { - unsafe { &mut *(slice as *mut [u8] as *mut [MaybeUninit] as *mut UninitSlice) } + pub fn new(slice: &mut [u8]) -> &mut Self { + unsafe { &mut *(slice as *mut [u8] as *mut [MaybeUninit] as *mut Self) } } /// Creates a `&mut UninitSlice` wrapping a slice of uninitialised memory. @@ -52,12 +52,12 @@ impl UninitSlice { /// let spare: &mut UninitSlice = vec.spare_capacity_mut().into(); /// ``` #[inline] - pub fn uninit(slice: &mut [MaybeUninit]) -> &mut UninitSlice { - unsafe { &mut *(slice as *mut [MaybeUninit] as *mut UninitSlice) } + pub fn uninit(slice: &mut [MaybeUninit]) -> &mut Self { + unsafe { &mut *(slice as *mut [MaybeUninit] as *mut Self) } } - fn uninit_ref(slice: &[MaybeUninit]) -> &UninitSlice { - unsafe { &*(slice as *const [MaybeUninit] as *const UninitSlice) } + fn uninit_ref(slice: &[MaybeUninit]) -> &Self { + unsafe { &*(slice as *const [MaybeUninit] as *const Self) } } /// Create a `&mut UninitSlice` from a pointer and a length. @@ -79,7 +79,7 @@ impl UninitSlice { /// let slice = unsafe { UninitSlice::from_raw_parts_mut(ptr, len) }; /// ``` #[inline] - pub unsafe fn from_raw_parts_mut<'a>(ptr: *mut u8, len: usize) -> &'a mut UninitSlice { + pub unsafe fn from_raw_parts_mut<'a>(ptr: *mut u8, len: usize) -> &'a mut Self { let maybe_init: &mut [MaybeUninit] = core::slice::from_raw_parts_mut(ptr as *mut _, len); Self::uninit(maybe_init) @@ -205,6 +205,22 @@ impl UninitSlice { pub fn len(&self) -> usize { self.0.len() } + + /// Returns whether a slice is empty. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut data = [0u8; 0]; + /// let mut slice = &mut data[..]; + /// assert!(BufMut::chunk_mut(&mut slice).is_empty()); + /// ``` + #[must_use] + pub fn is_empty(&self) -> bool { + self.len() == 0 + } } impl fmt::Debug for UninitSlice { diff --git a/src/bytes.rs b/src/bytes.rs index 441ca80d7..f2adb2dd7 100644 --- a/src/bytes.rs +++ b/src/bytes.rs @@ -139,7 +139,7 @@ impl Bytes { // Make it a named const to work around // "unsizing casts are not allowed in const fn" const EMPTY: &[u8] = &[]; - Bytes::from_static(EMPTY) + Self::from_static(EMPTY) } /// Creates a new empty `Bytes`. @@ -165,7 +165,7 @@ impl Bytes { #[inline] #[cfg(not(all(loom, test)))] pub const fn from_static(bytes: &'static [u8]) -> Self { - Bytes { + Self { ptr: bytes.as_ptr(), len: bytes.len(), data: AtomicPtr::new(ptr::null_mut()), @@ -192,7 +192,7 @@ impl Bytes { // to the provenance of the fake ZST [u8;0] at the same address. let ptr = without_provenance(ptr as usize); - Bytes { + Self { ptr, len: 0, data: AtomicPtr::new(ptr::null_mut()), @@ -271,7 +271,7 @@ impl Bytes { owner, })); - let mut ret = Bytes { + let mut ret = Self { ptr: NonNull::dangling().as_ptr(), len: 0, data: AtomicPtr::new(owned.cast()), @@ -397,7 +397,7 @@ impl Bytes { ); if end == begin { - return Bytes::new_empty_with_ptr(self.ptr.wrapping_add(begin)); + return Self::new_empty_with_ptr(self.ptr.wrapping_add(begin)); } let mut ret = self.clone(); @@ -437,7 +437,7 @@ impl Bytes { // Empty slice and empty Bytes may have their pointers reset // so explicitly allow empty slice to be a subslice of any slice. if subset.is_empty() { - return Bytes::new(); + return Self::new(); } let bytes_p = self.as_ptr() as usize; @@ -494,11 +494,11 @@ impl Bytes { #[must_use = "consider Bytes::truncate if you don't need the other half"] pub fn split_off(&mut self, at: usize) -> Self { if at == self.len() { - return Bytes::new_empty_with_ptr(self.ptr.wrapping_add(at)); + return Self::new_empty_with_ptr(self.ptr.wrapping_add(at)); } if at == 0 { - return mem::replace(self, Bytes::new_empty_with_ptr(self.ptr)); + return mem::replace(self, Self::new_empty_with_ptr(self.ptr)); } assert!( @@ -544,11 +544,11 @@ impl Bytes { pub fn split_to(&mut self, at: usize) -> Self { if at == self.len() { let end_ptr = self.ptr.wrapping_add(at); - return mem::replace(self, Bytes::new_empty_with_ptr(end_ptr)); + return mem::replace(self, Self::new_empty_with_ptr(end_ptr)); } if at == 0 { - return Bytes::new_empty_with_ptr(self.ptr); + return Self::new_empty_with_ptr(self.ptr); } assert!( @@ -624,7 +624,7 @@ impl Bytes { /// and return self. /// /// This will also always fail if the buffer was constructed via either - /// [from_owner](Bytes::from_owner) or [from_static](Bytes::from_static). + /// [`from_owner`](Bytes::from_owner) or [`from_static`](Bytes::from_static). /// /// # Examples /// @@ -634,7 +634,7 @@ impl Bytes { /// let bytes = Bytes::from(b"hello".to_vec()); /// assert_eq!(bytes.try_into_mut(), Ok(BytesMut::from(&b"hello"[..]))); /// ``` - pub fn try_into_mut(self) -> Result { + pub fn try_into_mut(self) -> Result { if self.is_unique() { Ok(self.into()) } else { @@ -648,8 +648,8 @@ impl Bytes { len: usize, data: AtomicPtr<()>, vtable: &'static Vtable, - ) -> Bytes { - Bytes { + ) -> Self { + Self { ptr, len, data, @@ -686,7 +686,7 @@ impl Drop for Bytes { impl Clone for Bytes { #[inline] - fn clone(&self) -> Bytes { + fn clone(&self) -> Self { unsafe { (self.vtable.clone)(&self.data, self.ptr, self.len) } } } @@ -754,7 +754,7 @@ impl Borrow<[u8]> for Bytes { impl IntoIterator for Bytes { type Item = u8; - type IntoIter = IntoIter; + type IntoIter = IntoIter; fn into_iter(self) -> Self::IntoIter { IntoIter::new(self) @@ -779,19 +779,19 @@ impl FromIterator for Bytes { // impl Eq impl PartialEq for Bytes { - fn eq(&self, other: &Bytes) -> bool { + fn eq(&self, other: &Self) -> bool { self.as_slice() == other.as_slice() } } impl PartialOrd for Bytes { - fn partial_cmp(&self, other: &Bytes) -> Option { + fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) } } impl Ord for Bytes { - fn cmp(&self, other: &Bytes) -> cmp::Ordering { + fn cmp(&self, other: &Self) -> cmp::Ordering { self.as_slice().cmp(other.as_slice()) } } @@ -920,7 +920,7 @@ impl PartialOrd for &str { impl<'a, T: ?Sized> PartialEq<&'a T> for Bytes where - Bytes: PartialEq, + Self: PartialEq, { fn eq(&self, other: &&'a T) -> bool { *self == **other @@ -929,7 +929,7 @@ where impl<'a, T: ?Sized> PartialOrd<&'a T> for Bytes where - Bytes: PartialOrd, + Self: PartialOrd, { fn partial_cmp(&self, other: &&'a T) -> Option { self.partial_cmp(&**other) @@ -940,25 +940,25 @@ where impl Default for Bytes { #[inline] - fn default() -> Bytes { - Bytes::new() + fn default() -> Self { + Self::new() } } impl From<&'static [u8]> for Bytes { - fn from(slice: &'static [u8]) -> Bytes { - Bytes::from_static(slice) + fn from(slice: &'static [u8]) -> Self { + Self::from_static(slice) } } impl From<&'static str> for Bytes { - fn from(slice: &'static str) -> Bytes { - Bytes::from_static(slice.as_bytes()) + fn from(slice: &'static str) -> Self { + Self::from_static(slice.as_bytes()) } } impl From> for Bytes { - fn from(vec: Vec) -> Bytes { + fn from(vec: Vec) -> Self { let mut vec = ManuallyDrop::new(vec); let ptr = vec.as_mut_ptr(); let len = vec.len(); @@ -967,7 +967,7 @@ impl From> for Bytes { // Avoid an extra allocation if possible. if len == cap { let vec = ManuallyDrop::into_inner(vec); - return Bytes::from(vec.into_boxed_slice()); + return Self::from(vec.into_boxed_slice()); } let shared = Box::new(Shared { @@ -983,7 +983,7 @@ impl From> for Bytes { 0 == (shared as usize & KIND_MASK), "internal: Box should have an aligned pointer", ); - Bytes { + Self { ptr, len, data: AtomicPtr::new(shared as _), @@ -993,12 +993,12 @@ impl From> for Bytes { } impl From> for Bytes { - fn from(slice: Box<[u8]>) -> Bytes { + fn from(slice: Box<[u8]>) -> Self { // Box<[u8]> doesn't contain a heap allocation for empty slices, // so the pointer isn't aligned enough for the KIND_VEC stashing to // work. if slice.is_empty() { - return Bytes::new(); + return Self::new(); } let len = slice.len(); @@ -1006,14 +1006,14 @@ impl From> for Bytes { if ptr as usize & 0x1 == 0 { let data = ptr_map(ptr, |addr| addr | KIND_VEC); - Bytes { + Self { ptr, len, data: AtomicPtr::new(data.cast()), vtable: &PROMOTABLE_EVEN_VTABLE, } } else { - Bytes { + Self { ptr, len, data: AtomicPtr::new(ptr.cast()), @@ -1046,13 +1046,13 @@ impl From for BytesMut { } impl From for Bytes { - fn from(s: String) -> Bytes { - Bytes::from(s.into_bytes()) + fn from(s: String) -> Self { + Self::from(s.into_bytes()) } } impl From for Vec { - fn from(bytes: Bytes) -> Vec { + fn from(bytes: Bytes) -> Self { let bytes = ManuallyDrop::new(bytes); unsafe { (bytes.vtable.into_vec)(&bytes.data, bytes.ptr, bytes.len) } } @@ -1335,7 +1335,7 @@ unsafe fn promotable_is_unique(data: &AtomicPtr<()>) -> bool { unsafe fn free_boxed_slice(buf: *mut u8, offset: *const u8, len: usize) { let cap = offset.offset_from(buf) as usize + len; - dealloc(buf, Layout::from_size_align(cap, 1).unwrap()) + dealloc(buf, Layout::from_size_align(cap, 1).unwrap()); } // ===== impl SharedVtable ===== diff --git a/src/bytes_mut.rs b/src/bytes_mut.rs index fe6dbc733..404c75793 100644 --- a/src/bytes_mut.rs +++ b/src/bytes_mut.rs @@ -1,3 +1,5 @@ +#![allow(clippy::items_after_test_module)] + use core::mem::{self, ManuallyDrop, MaybeUninit}; use core::ops::{Deref, DerefMut}; use core::ptr::{self, NonNull}; @@ -144,8 +146,8 @@ impl BytesMut { /// assert_eq!(&bytes[..], b"hello world"); /// ``` #[inline] - pub fn with_capacity(capacity: usize) -> BytesMut { - BytesMut::from_vec(Vec::with_capacity(capacity)) + pub fn with_capacity(capacity: usize) -> Self { + Self::from_vec(Vec::with_capacity(capacity)) } /// Creates a new `BytesMut` with default capacity. @@ -168,8 +170,8 @@ impl BytesMut { /// assert_eq!(&b"xy"[..], &bytes[..]); /// ``` #[inline] - pub fn new() -> BytesMut { - BytesMut::with_capacity(0) + pub fn new() -> Self { + Self::with_capacity(0) } /// Returns the number of bytes contained in this `BytesMut`. @@ -283,8 +285,8 @@ impl BytesMut { /// assert_eq!(zeros.len(), 42); /// zeros.into_iter().for_each(|x| assert_eq!(x, 0)); /// ``` - pub fn zeroed(len: usize) -> BytesMut { - BytesMut::from_vec(vec![0; len]) + pub fn zeroed(len: usize) -> Self { + Self::from_vec(vec![0; len]) } /// Splits the bytes into two at the given index. @@ -316,7 +318,7 @@ impl BytesMut { /// /// Panics if `at > capacity`. #[must_use = "consider BytesMut::truncate if you don't need the other half"] - pub fn split_off(&mut self, at: usize) -> BytesMut { + pub fn split_off(&mut self, at: usize) -> Self { assert!( at <= self.capacity(), "split_off out of bounds: {:?} <= {:?}", @@ -359,7 +361,7 @@ impl BytesMut { /// assert_eq!(other, b"hello world"[..]); /// ``` #[must_use = "consider BytesMut::clear if you don't need the other half"] - pub fn split(&mut self) -> BytesMut { + pub fn split(&mut self) -> Self { let len = self.len(); self.split_to(len) } @@ -391,7 +393,7 @@ impl BytesMut { /// /// Panics if `at > len`. #[must_use = "consider BytesMut::advance if you don't need the other half"] - pub fn split_to(&mut self, at: usize) -> BytesMut { + pub fn split_to(&mut self, at: usize) -> Self { assert!( at <= self.len(), "split_to out of bounds: {:?} <= {:?}", @@ -418,7 +420,7 @@ impl BytesMut { /// /// Existing underlying capacity is preserved. /// - /// The [split_off](`Self::split_off()`) method can emulate `truncate`, but this causes the + /// The [`split_off`](Self::split_off) method can emulate `truncate`, but this causes the /// excess bytes to be returned instead of dropped. /// /// # Examples @@ -904,7 +906,7 @@ impl BytesMut { /// buf.unsplit(split); /// assert_eq!(b"aaabbbcccddd", &buf[..]); /// ``` - pub fn unsplit(&mut self, other: BytesMut) { + pub fn unsplit(&mut self, other: Self) { if self.is_empty() { *self = other; return; @@ -924,7 +926,7 @@ impl BytesMut { // internal change could make a simple pattern (`BytesMut::from(vec)`) // suddenly a lot more expensive. #[inline] - pub(crate) fn from_vec(vec: Vec) -> BytesMut { + pub(crate) fn from_vec(vec: Vec) -> Self { let mut vec = ManuallyDrop::new(vec); let ptr = vptr(vec.as_mut_ptr()); let len = vec.len(); @@ -933,7 +935,7 @@ impl BytesMut { let original_capacity_repr = original_capacity_to_repr(cap); let data = (original_capacity_repr << ORIGINAL_CAPACITY_OFFSET) | KIND_VEC; - BytesMut { + Self { ptr, len, cap, @@ -993,7 +995,7 @@ impl BytesMut { self.cap -= count; } - fn try_unsplit(&mut self, other: BytesMut) -> Result<(), BytesMut> { + fn try_unsplit(&mut self, other: Self) -> Result<(), Self> { if other.capacity() == 0 { return Ok(()); } @@ -1058,14 +1060,13 @@ impl BytesMut { /// be sure the returned value to the user doesn't allow /// two views into the same range. #[inline] - unsafe fn shallow_clone(&mut self) -> BytesMut { + unsafe fn shallow_clone(&mut self) -> Self { if self.kind() == KIND_ARC { increment_shared(self.data); - ptr::read(self) } else { self.promote_to_shared(/*ref_count = */ 2); - ptr::read(self) } + ptr::read(self) } #[inline] @@ -1209,7 +1210,8 @@ unsafe impl BufMut for BytesMut { if !src.has_remaining() { // prevent calling `copy_to_bytes`->`put`->`copy_to_bytes` infintely when src is empty return; - } else if self.capacity() == 0 { + } + if self.capacity() == 0 { // When capacity is zero, try reusing allocation of `src`. let src_copy = src.copy_to_bytes(src.remaining()); drop(src); @@ -1279,37 +1281,37 @@ impl DerefMut for BytesMut { } impl<'a> From<&'a [u8]> for BytesMut { - fn from(src: &'a [u8]) -> BytesMut { - BytesMut::from_vec(src.to_vec()) + fn from(src: &'a [u8]) -> Self { + Self::from_vec(src.to_vec()) } } impl<'a> From<&'a str> for BytesMut { - fn from(src: &'a str) -> BytesMut { - BytesMut::from(src.as_bytes()) + fn from(src: &'a str) -> Self { + Self::from(src.as_bytes()) } } impl From for Bytes { - fn from(src: BytesMut) -> Bytes { + fn from(src: BytesMut) -> Self { src.freeze() } } impl PartialEq for BytesMut { - fn eq(&self, other: &BytesMut) -> bool { + fn eq(&self, other: &Self) -> bool { self.as_slice() == other.as_slice() } } impl PartialOrd for BytesMut { - fn partial_cmp(&self, other: &BytesMut) -> Option { + fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) } } impl Ord for BytesMut { - fn cmp(&self, other: &BytesMut) -> cmp::Ordering { + fn cmp(&self, other: &Self) -> cmp::Ordering { self.as_slice().cmp(other.as_slice()) } } @@ -1318,8 +1320,8 @@ impl Eq for BytesMut {} impl Default for BytesMut { #[inline] - fn default() -> BytesMut { - BytesMut::new() + fn default() -> Self { + Self::new() } } @@ -1363,14 +1365,14 @@ impl fmt::Write for BytesMut { } impl Clone for BytesMut { - fn clone(&self) -> BytesMut { - BytesMut::from(&self[..]) + fn clone(&self) -> Self { + Self::from(&self[..]) } } impl IntoIterator for BytesMut { type Item = u8; - type IntoIter = IntoIter; + type IntoIter = IntoIter; fn into_iter(self) -> Self::IntoIter { IntoIter::new(self) @@ -1409,7 +1411,7 @@ impl<'a> Extend<&'a u8> for BytesMut { where T: IntoIterator, { - self.extend(iter.into_iter().copied()) + self.extend(iter.into_iter().copied()); } } @@ -1419,20 +1421,20 @@ impl Extend for BytesMut { T: IntoIterator, { for bytes in iter { - self.extend_from_slice(&bytes) + self.extend_from_slice(&bytes); } } } impl FromIterator for BytesMut { fn from_iter>(into_iter: T) -> Self { - BytesMut::from_vec(Vec::from_iter(into_iter)) + Self::from_vec(Vec::from_iter(into_iter)) } } impl<'a> FromIterator<&'a u8> for BytesMut { fn from_iter>(into_iter: T) -> Self { - BytesMut::from_iter(into_iter.into_iter().copied()) + Self::from_iter(into_iter.into_iter().copied()) } } @@ -1525,8 +1527,8 @@ mod tests { let max_width = 32; - for width in 1..(max_width + 1) { - let cap = 1 << width - 1; + for width in 1..=max_width { + let cap = 1 << (width - 1); let expected = if width < MIN_ORIGINAL_CAPACITY_WIDTH { 0 @@ -1676,7 +1678,7 @@ impl PartialOrd for String { impl<'a, T: ?Sized> PartialEq<&'a T> for BytesMut where - BytesMut: PartialEq, + Self: PartialEq, { fn eq(&self, other: &&'a T) -> bool { *self == **other @@ -1685,7 +1687,7 @@ where impl<'a, T: ?Sized> PartialOrd<&'a T> for BytesMut where - BytesMut: PartialOrd, + Self: PartialOrd, { fn partial_cmp(&self, other: &&'a T) -> Option { self.partial_cmp(*other) diff --git a/src/lib.rs b/src/lib.rs index fb5c506e8..3a0c2c106 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -160,7 +160,7 @@ impl std::error::Error for TryGetError {} #[cfg(feature = "std")] impl From for std::io::Error { fn from(error: TryGetError) -> Self { - std::io::Error::new(std::io::ErrorKind::Other, error) + Self::new(std::io::ErrorKind::Other, error) } } diff --git a/tests/test_bytes.rs b/tests/test_bytes.rs index ec9a60e6c..f00683b97 100644 --- a/tests/test_bytes.rs +++ b/tests/test_bytes.rs @@ -81,16 +81,16 @@ fn fmt() { #[test] fn fmt_write() { use std::fmt::Write; - let s = String::from_iter((0..10).map(|_| "abcdefg")); + let s: String = (0..10).map(|_| "abcdefg").collect(); let mut a = BytesMut::with_capacity(64); write!(a, "{}", &s[..64]).unwrap(); - assert_eq!(a, s[..64].as_bytes()); + assert_eq!(a, &s.as_bytes()[..64]); let mut b = BytesMut::with_capacity(64); write!(b, "{}", &s[..32]).unwrap(); write!(b, "{}", &s[32..64]).unwrap(); - assert_eq!(b, s[..64].as_bytes()); + assert_eq!(b, &s.as_bytes()[..64]); let mut c = BytesMut::with_capacity(64); write!(c, "{}", s).unwrap(); @@ -159,6 +159,7 @@ fn slice_oob_2() { #[should_panic] fn slice_start_greater_than_end() { let a = Bytes::from(&b"hello world"[..]); + #[allow(clippy::reversed_empty_ranges)] a.slice(5..3); } @@ -207,7 +208,7 @@ fn split_off_uninitialized() { fn split_off_to_loop() { let s = b"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"; - for i in 0..(s.len() + 1) { + for i in 0..=s.len() { { let mut bytes = Bytes::from(&s[..]); let off = bytes.split_off(i); @@ -431,7 +432,7 @@ fn fns_defined_for_bytes_mut() { let _ = bytes.as_mut_ptr(); // Iterator - let v: Vec = bytes.as_ref().iter().cloned().collect(); + let v: Vec = bytes.as_ref().to_vec(); assert_eq!(&v[..], bytes); } @@ -1108,7 +1109,7 @@ fn test_bytes_into_vec() { eprintln!("4"); let b2 = b1.clone(); - eprintln!("{:#?}", (&*b1).as_ptr()); + eprintln!("{:#?}", (*b1).as_ptr()); // shared.is_unique() = False eprintln!("5"); @@ -1359,12 +1360,12 @@ fn test_bytesmut_from_bytes_promotable_even_arc_offset() { #[test] fn try_reclaim_empty() { let mut buf = BytesMut::new(); - assert_eq!(false, buf.try_reclaim(6)); + assert!(!buf.try_reclaim(6)); buf.reserve(6); - assert_eq!(true, buf.try_reclaim(6)); + assert!(buf.try_reclaim(6)); let cap = buf.capacity(); assert!(cap >= 6); - assert_eq!(false, buf.try_reclaim(cap + 1)); + assert!(!buf.try_reclaim(cap + 1)); let mut buf = BytesMut::new(); buf.reserve(6); @@ -1373,8 +1374,8 @@ fn try_reclaim_empty() { let mut split = buf.split(); drop(buf); assert_eq!(0, split.capacity()); - assert_eq!(true, split.try_reclaim(6)); - assert_eq!(false, split.try_reclaim(cap + 1)); + assert!(split.try_reclaim(6)); + assert!(!split.try_reclaim(cap + 1)); } #[test] @@ -1382,17 +1383,17 @@ fn try_reclaim_vec() { let mut buf = BytesMut::with_capacity(6); buf.put_slice(b"abc"); // Reclaiming a ludicrous amount of space should calmly return false - assert_eq!(false, buf.try_reclaim(usize::MAX)); + assert!(!buf.try_reclaim(usize::MAX)); - assert_eq!(false, buf.try_reclaim(6)); + assert!(!buf.try_reclaim(6)); buf.advance(2); assert_eq!(4, buf.capacity()); // We can reclaim 5 bytes, because the byte in the buffer can be moved to the front. 6 bytes // cannot be reclaimed because there is already one byte stored - assert_eq!(false, buf.try_reclaim(6)); - assert_eq!(true, buf.try_reclaim(5)); + assert!(!buf.try_reclaim(6)); + assert!(buf.try_reclaim(5)); buf.advance(1); - assert_eq!(true, buf.try_reclaim(6)); + assert!(buf.try_reclaim(6)); assert_eq!(6, buf.capacity()); } @@ -1403,27 +1404,27 @@ fn try_reclaim_arc() { let x = buf.split().freeze(); buf.put_slice(b"def"); // Reclaiming a ludicrous amount of space should calmly return false - assert_eq!(false, buf.try_reclaim(usize::MAX)); + assert!(!buf.try_reclaim(usize::MAX)); let y = buf.split().freeze(); let z = y.clone(); - assert_eq!(false, buf.try_reclaim(6)); + assert!(!buf.try_reclaim(6)); drop(x); drop(z); - assert_eq!(false, buf.try_reclaim(6)); + assert!(!buf.try_reclaim(6)); drop(y); - assert_eq!(true, buf.try_reclaim(6)); + assert!(buf.try_reclaim(6)); assert_eq!(6, buf.capacity()); assert_eq!(0, buf.len()); buf.put_slice(b"abc"); buf.put_slice(b"def"); assert_eq!(6, buf.capacity()); assert_eq!(6, buf.len()); - assert_eq!(false, buf.try_reclaim(6)); + assert!(!buf.try_reclaim(6)); buf.advance(4); - assert_eq!(true, buf.try_reclaim(4)); + assert!(buf.try_reclaim(4)); buf.advance(2); - assert_eq!(true, buf.try_reclaim(6)); + assert!(buf.try_reclaim(6)); } #[test] @@ -1531,7 +1532,7 @@ struct SharedAtomicCounter(Arc); impl SharedAtomicCounter { pub fn new() -> Self { - SharedAtomicCounter(Arc::new(AtomicUsize::new(0))) + Self(Arc::new(AtomicUsize::new(0))) } pub fn increment(&self) { @@ -1562,9 +1563,10 @@ impl OwnedTester { impl AsRef<[u8]> for OwnedTester { fn as_ref(&self) -> &[u8] { - if self.panic_as_ref { - panic!("test-triggered panic in `AsRef<[u8]> for OwnedTester`"); - } + assert!( + !self.panic_as_ref, + "test-triggered panic in `AsRef<[u8]> for OwnedTester`" + ); self.buf.as_slice() } }