diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 512906d767f840..d0ae127ed8ced5 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -7,7 +7,7 @@ on: jobs: ci: runs-on: ubuntu-20.04 - container: ghcr.io/rust-for-linux/ci:Rust-1.66.0 + container: ghcr.io/rust-for-linux/ci:Rust-1.70.0 timeout-minutes: 25 strategy: diff --git a/.github/workflows/kernel-arm-debug.config b/.github/workflows/kernel-arm-debug.config index c91d8bdb3c63ef..76a999e7daebad 100644 --- a/.github/workflows/kernel-arm-debug.config +++ b/.github/workflows/kernel-arm-debug.config @@ -1817,7 +1817,7 @@ CONFIG_CC_HAS_SANCOV_TRACE_PC=y # # Rust hacking # -CONFIG_RUST_DEBUG_ASSERTIONS=y +#CONFIG_RUST_DEBUG_ASSERTIONS is not set CONFIG_RUST_OVERFLOW_CHECKS=y # CONFIG_RUST_OPT_LEVEL_SIMILAR_AS_CHOSEN_FOR_C is not set # CONFIG_RUST_OPT_LEVEL_0 is not set diff --git a/.github/workflows/kernel-arm64-debug-thinlto.config b/.github/workflows/kernel-arm64-debug-thinlto.config index 685c8e0982efe1..a1f963368297c6 100644 --- a/.github/workflows/kernel-arm64-debug-thinlto.config +++ b/.github/workflows/kernel-arm64-debug-thinlto.config @@ -1465,7 +1465,7 @@ CONFIG_CC_HAS_SANCOV_TRACE_PC=y # # Rust hacking # -CONFIG_RUST_DEBUG_ASSERTIONS=y +#CONFIG_RUST_DEBUG_ASSERTIONS is not set CONFIG_RUST_OVERFLOW_CHECKS=y # CONFIG_RUST_OPT_LEVEL_SIMILAR_AS_CHOSEN_FOR_C is not set # CONFIG_RUST_OPT_LEVEL_0 is not set diff --git a/.github/workflows/kernel-arm64-debug.config b/.github/workflows/kernel-arm64-debug.config index b20218794e90da..f3c75e485e075f 100644 --- a/.github/workflows/kernel-arm64-debug.config +++ b/.github/workflows/kernel-arm64-debug.config @@ -1460,7 +1460,7 @@ CONFIG_CC_HAS_SANCOV_TRACE_PC=y # # Rust hacking # -CONFIG_RUST_DEBUG_ASSERTIONS=y +#CONFIG_RUST_DEBUG_ASSERTIONS is not set CONFIG_RUST_OVERFLOW_CHECKS=y # CONFIG_RUST_OPT_LEVEL_SIMILAR_AS_CHOSEN_FOR_C is not set # CONFIG_RUST_OPT_LEVEL_0 is not set diff --git a/.github/workflows/kernel-ppc64le-debug.config b/.github/workflows/kernel-ppc64le-debug.config index 163173416726b2..039b53bf470119 100644 --- a/.github/workflows/kernel-ppc64le-debug.config +++ b/.github/workflows/kernel-ppc64le-debug.config @@ -1582,7 +1582,7 @@ CONFIG_RUNTIME_TESTING_MENU=y # # Rust hacking # -CONFIG_RUST_DEBUG_ASSERTIONS=y +#CONFIG_RUST_DEBUG_ASSERTIONS is not set CONFIG_RUST_OVERFLOW_CHECKS=y # CONFIG_RUST_OPT_LEVEL_SIMILAR_AS_CHOSEN_FOR_C is not set CONFIG_RUST_OPT_LEVEL_0=y diff --git a/.github/workflows/kernel-riscv64-debug.config b/.github/workflows/kernel-riscv64-debug.config index 0c39089a35ef68..046d8758f9597d 100644 --- a/.github/workflows/kernel-riscv64-debug.config +++ b/.github/workflows/kernel-riscv64-debug.config @@ -1310,7 +1310,7 @@ CONFIG_CC_HAS_SANCOV_TRACE_PC=y # # Rust hacking # -CONFIG_RUST_DEBUG_ASSERTIONS=y +#CONFIG_RUST_DEBUG_ASSERTIONS is not set CONFIG_RUST_OVERFLOW_CHECKS=y # CONFIG_RUST_OPT_LEVEL_SIMILAR_AS_CHOSEN_FOR_C is not set CONFIG_RUST_OPT_LEVEL_0=y diff --git a/.github/workflows/kernel-x86_64-debug-thinlto.config b/.github/workflows/kernel-x86_64-debug-thinlto.config index a4db37edef2a48..2a06e662506c06 100644 --- a/.github/workflows/kernel-x86_64-debug-thinlto.config +++ b/.github/workflows/kernel-x86_64-debug-thinlto.config @@ -1533,7 +1533,7 @@ CONFIG_RUNTIME_TESTING_MENU=y # # Rust hacking # -CONFIG_RUST_DEBUG_ASSERTIONS=y +#CONFIG_RUST_DEBUG_ASSERTIONS is not set CONFIG_RUST_OVERFLOW_CHECKS=y # CONFIG_RUST_OPT_LEVEL_SIMILAR_AS_CHOSEN_FOR_C is not set CONFIG_RUST_OPT_LEVEL_0=y diff --git a/.github/workflows/kernel-x86_64-debug.config b/.github/workflows/kernel-x86_64-debug.config index 635eecc7ec72d6..aa113d97674a9b 100644 --- a/.github/workflows/kernel-x86_64-debug.config +++ b/.github/workflows/kernel-x86_64-debug.config @@ -1536,7 +1536,7 @@ CONFIG_RUNTIME_TESTING_MENU=y # # Rust hacking # -CONFIG_RUST_DEBUG_ASSERTIONS=y +#CONFIG_RUST_DEBUG_ASSERTIONS is not set CONFIG_RUST_OVERFLOW_CHECKS=y # CONFIG_RUST_OPT_LEVEL_SIMILAR_AS_CHOSEN_FOR_C is not set CONFIG_RUST_OPT_LEVEL_0=y diff --git a/Documentation/process/changes.rst b/Documentation/process/changes.rst index 68596479847931..849b36f2069ba0 100644 --- a/Documentation/process/changes.rst +++ b/Documentation/process/changes.rst @@ -31,7 +31,7 @@ you probably needn't concern yourself with pcmciautils. ====================== =============== ======================================== GNU C 5.1 gcc --version Clang/LLVM (optional) 11.0.0 clang --version -Rust (optional) 1.66.0 rustc --version +Rust (optional) 1.70.0 rustc --version bindgen (optional) 0.56.0 bindgen --version GNU make 3.82 make --version bash 4.2 bash --version diff --git a/Makefile b/Makefile index f5543eef4f8227..c33d8533c46d35 100644 --- a/Makefile +++ b/Makefile @@ -466,6 +466,7 @@ export rust_common_flags := --edition=2021 \ -Dclippy::let_unit_value -Dclippy::mut_mut \ -Dclippy::needless_bitwise_bool \ -Dclippy::needless_continue \ + -Dclippy::no_mangle_with_rust_abi \ -Wclippy::dbg_macro KBUILD_HOSTCFLAGS := $(KBUILD_USERHOSTCFLAGS) $(HOST_LFS_CFLAGS) $(HOSTCFLAGS) diff --git a/rust/alloc/alloc.rs b/rust/alloc/alloc.rs index 0142178370e9af..5b2517225fc9f5 100644 --- a/rust/alloc/alloc.rs +++ b/rust/alloc/alloc.rs @@ -16,34 +16,28 @@ use core::ptr::{self, NonNull}; #[doc(inline)] pub use core::alloc::*; -use core::marker::Destruct; - #[cfg(test)] mod tests; extern "Rust" { - // These are the magic symbols to call the global allocator. rustc generates + // These are the magic symbols to call the global allocator. rustc generates // them to call `__rg_alloc` etc. if there is a `#[global_allocator]` attribute // (the code expanding that attribute macro generates those functions), or to call - // the default implementations in libstd (`__rdl_alloc` etc. in `library/std/src/alloc.rs`) + // the default implementations in std (`__rdl_alloc` etc. in `library/std/src/alloc.rs`) // otherwise. // The rustc fork of LLVM 14 and earlier also special-cases these function names to be able to optimize them // like `malloc`, `realloc`, and `free`, respectively. #[rustc_allocator] - #[cfg_attr(not(bootstrap), rustc_nounwind)] - #[cfg_attr(bootstrap, rustc_allocator_nounwind)] + #[rustc_nounwind] fn __rust_alloc(size: usize, align: usize) -> *mut u8; #[rustc_deallocator] - #[cfg_attr(not(bootstrap), rustc_nounwind)] - #[cfg_attr(bootstrap, rustc_allocator_nounwind)] + #[rustc_nounwind] fn __rust_dealloc(ptr: *mut u8, size: usize, align: usize); #[rustc_reallocator] - #[cfg_attr(not(bootstrap), rustc_nounwind)] - #[cfg_attr(bootstrap, rustc_allocator_nounwind)] + #[rustc_nounwind] fn __rust_realloc(ptr: *mut u8, old_size: usize, align: usize, new_size: usize) -> *mut u8; #[rustc_allocator_zeroed] - #[cfg_attr(not(bootstrap), rustc_nounwind)] - #[cfg_attr(bootstrap, rustc_allocator_nounwind)] + #[rustc_nounwind] fn __rust_alloc_zeroed(size: usize, align: usize) -> *mut u8; } @@ -337,16 +331,12 @@ unsafe fn exchange_malloc(size: usize, align: usize) -> *mut u8 { #[cfg_attr(not(test), lang = "box_free")] #[inline] -#[rustc_const_unstable(feature = "const_box", issue = "92521")] // This signature has to be the same as `Box`, otherwise an ICE will happen. // When an additional parameter to `Box` is added (like `A: Allocator`), this has to be added here as // well. // For example if `Box` is changed to `struct Box(Unique, A)`, // this function has to be changed to `fn box_free(Unique, A)` as well. -pub(crate) const unsafe fn box_free( - ptr: Unique, - alloc: A, -) { +pub(crate) unsafe fn box_free(ptr: Unique, alloc: A) { unsafe { let size = size_of_val(ptr.as_ref()); let align = min_align_of_val(ptr.as_ref()); @@ -359,7 +349,7 @@ pub(crate) const unsafe fn box_free !; @@ -404,25 +394,24 @@ pub use std::alloc::handle_alloc_error; #[allow(unused_attributes)] #[unstable(feature = "alloc_internals", issue = "none")] pub mod __alloc_error_handler { - use crate::alloc::Layout; - - // called via generated `__rust_alloc_error_handler` - - // if there is no `#[alloc_error_handler]` + // called via generated `__rust_alloc_error_handler` if there is no + // `#[alloc_error_handler]`. #[rustc_std_internal_symbol] pub unsafe fn __rdl_oom(size: usize, _align: usize) -> ! { - panic!("memory allocation of {size} bytes failed") - } - - // if there is an `#[alloc_error_handler]` - #[rustc_std_internal_symbol] - pub unsafe fn __rg_oom(size: usize, align: usize) -> ! { - let layout = unsafe { Layout::from_size_align_unchecked(size, align) }; extern "Rust" { - #[lang = "oom"] - fn oom_impl(layout: Layout) -> !; + // This symbol is emitted by rustc next to __rust_alloc_error_handler. + // Its value depends on the -Zoom={panic,abort} compiler option. + static __rust_alloc_error_handler_should_panic: u8; + } + + #[allow(unused_unsafe)] + if unsafe { __rust_alloc_error_handler_should_panic != 0 } { + panic!("memory allocation of {size} bytes failed") + } else { + core::panicking::panic_nounwind_fmt(format_args!( + "memory allocation of {size} bytes failed" + )) } - unsafe { oom_impl(layout) } } } diff --git a/rust/alloc/borrow.rs b/rust/alloc/borrow.rs index be72b51798beac..bdf29cf790827a 100644 --- a/rust/alloc/borrow.rs +++ b/rust/alloc/borrow.rs @@ -330,10 +330,9 @@ impl Cow<'_, B> { } #[stable(feature = "rust1", since = "1.0.0")] -#[rustc_const_unstable(feature = "const_deref", issue = "88955")] -impl const Deref for Cow<'_, B> +impl Deref for Cow<'_, B> where - B::Owned: ~const Borrow, + B::Owned: Borrow, { type Target = B; diff --git a/rust/alloc/boxed.rs b/rust/alloc/boxed.rs index 7875adbab839d9..0186dab33a3748 100644 --- a/rust/alloc/boxed.rs +++ b/rust/alloc/boxed.rs @@ -152,15 +152,13 @@ use core::any::Any; use core::async_iter::AsyncIterator; use core::borrow; use core::cmp::Ordering; -use core::convert::{From, TryFrom}; use core::error::Error; use core::fmt; use core::future::Future; use core::hash::{Hash, Hasher}; -#[cfg(not(no_global_oom_handling))] -use core::iter::FromIterator; -use core::iter::{FusedIterator, Iterator}; -use core::marker::{Destruct, Unpin, Unsize}; +use core::iter::FusedIterator; +use core::marker::Tuple; +use core::marker::Unsize; use core::mem; use core::ops::{ CoerceUnsized, Deref, DerefMut, DispatchFromDyn, Generator, GeneratorState, Receiver, @@ -187,7 +185,7 @@ pub use thin::ThinBox; mod thin; -/// A pointer type for heap allocation. +/// A pointer type that uniquely owns a heap allocation of type `T`. /// /// See the [module-level documentation](../../std/boxed/index.html) for more. #[lang = "owned_box"] @@ -215,6 +213,7 @@ impl Box { #[inline(always)] #[stable(feature = "rust1", since = "1.0.0")] #[must_use] + #[rustc_diagnostic_item = "box_new"] pub fn new(x: T) -> Self { #[rustc_box] Box::new(x) @@ -284,9 +283,7 @@ impl Box { #[must_use] #[inline(always)] pub fn pin(x: T) -> Pin> { - (#[rustc_box] - Box::new(x)) - .into() + Box::new(x).into() } /// Allocates memory on the heap then places `x` into it, @@ -378,12 +375,11 @@ impl Box { /// ``` #[cfg(not(no_global_oom_handling))] #[unstable(feature = "allocator_api", issue = "32838")] - #[rustc_const_unstable(feature = "const_box", issue = "92521")] #[must_use] #[inline] - pub const fn new_in(x: T, alloc: A) -> Self + pub fn new_in(x: T, alloc: A) -> Self where - A: ~const Allocator + ~const Destruct, + A: Allocator, { let mut boxed = Self::new_uninit_in(alloc); unsafe { @@ -408,12 +404,10 @@ impl Box { /// # Ok::<(), std::alloc::AllocError>(()) /// ``` #[unstable(feature = "allocator_api", issue = "32838")] - #[rustc_const_unstable(feature = "const_box", issue = "92521")] #[inline] - pub const fn try_new_in(x: T, alloc: A) -> Result + pub fn try_new_in(x: T, alloc: A) -> Result where - T: ~const Destruct, - A: ~const Allocator + ~const Destruct, + A: Allocator, { let mut boxed = Self::try_new_uninit_in(alloc)?; unsafe { @@ -443,13 +437,12 @@ impl Box { /// assert_eq!(*five, 5) /// ``` #[unstable(feature = "allocator_api", issue = "32838")] - #[rustc_const_unstable(feature = "const_box", issue = "92521")] #[cfg(not(no_global_oom_handling))] #[must_use] // #[unstable(feature = "new_uninit", issue = "63291")] - pub const fn new_uninit_in(alloc: A) -> Box, A> + pub fn new_uninit_in(alloc: A) -> Box, A> where - A: ~const Allocator + ~const Destruct, + A: Allocator, { let layout = Layout::new::>(); // NOTE: Prefer match over unwrap_or_else since closure sometimes not inlineable. @@ -484,10 +477,9 @@ impl Box { /// ``` #[unstable(feature = "allocator_api", issue = "32838")] // #[unstable(feature = "new_uninit", issue = "63291")] - #[rustc_const_unstable(feature = "const_box", issue = "92521")] - pub const fn try_new_uninit_in(alloc: A) -> Result, A>, AllocError> + pub fn try_new_uninit_in(alloc: A) -> Result, A>, AllocError> where - A: ~const Allocator + ~const Destruct, + A: Allocator, { let layout = Layout::new::>(); let ptr = alloc.allocate(layout)?.cast(); @@ -515,13 +507,12 @@ impl Box { /// /// [zeroed]: mem::MaybeUninit::zeroed #[unstable(feature = "allocator_api", issue = "32838")] - #[rustc_const_unstable(feature = "const_box", issue = "92521")] #[cfg(not(no_global_oom_handling))] // #[unstable(feature = "new_uninit", issue = "63291")] #[must_use] - pub const fn new_zeroed_in(alloc: A) -> Box, A> + pub fn new_zeroed_in(alloc: A) -> Box, A> where - A: ~const Allocator + ~const Destruct, + A: Allocator, { let layout = Layout::new::>(); // NOTE: Prefer match over unwrap_or_else since closure sometimes not inlineable. @@ -556,10 +547,9 @@ impl Box { /// [zeroed]: mem::MaybeUninit::zeroed #[unstable(feature = "allocator_api", issue = "32838")] // #[unstable(feature = "new_uninit", issue = "63291")] - #[rustc_const_unstable(feature = "const_box", issue = "92521")] - pub const fn try_new_zeroed_in(alloc: A) -> Result, A>, AllocError> + pub fn try_new_zeroed_in(alloc: A) -> Result, A>, AllocError> where - A: ~const Allocator + ~const Destruct, + A: Allocator, { let layout = Layout::new::>(); let ptr = alloc.allocate_zeroed(layout)?.cast(); @@ -575,12 +565,11 @@ impl Box { /// construct a (pinned) `Box` in a different way than with [`Box::new_in`]. #[cfg(not(no_global_oom_handling))] #[unstable(feature = "allocator_api", issue = "32838")] - #[rustc_const_unstable(feature = "const_box", issue = "92521")] #[must_use] #[inline(always)] - pub const fn pin_in(x: T, alloc: A) -> Pin + pub fn pin_in(x: T, alloc: A) -> Pin where - A: 'static + ~const Allocator + ~const Destruct, + A: 'static + Allocator, { Self::into_pin(Self::new_in(x, alloc)) } @@ -607,12 +596,8 @@ impl Box { /// assert_eq!(Box::into_inner(c), 5); /// ``` #[unstable(feature = "box_into_inner", issue = "80437")] - #[rustc_const_unstable(feature = "const_box", issue = "92521")] #[inline] - pub const fn into_inner(boxed: Self) -> T - where - Self: ~const Destruct, - { + pub fn into_inner(boxed: Self) -> T { *boxed } } @@ -954,7 +939,7 @@ impl Box { /// [`Layout`]: crate::Layout #[stable(feature = "box_raw", since = "1.4.0")] #[inline] - #[must_use = "call `drop(from_raw(ptr))` if you intend to drop the `Box`"] + #[must_use = "call `drop(Box::from_raw(ptr))` if you intend to drop the `Box`"] pub unsafe fn from_raw(raw: *mut T) -> Self { unsafe { Self::from_raw_in(raw, Global) } } @@ -1243,8 +1228,8 @@ unsafe impl<#[may_dangle] T: ?Sized, A: Allocator> Drop for Box { #[stable(feature = "rust1", since = "1.0.0")] impl Default for Box { /// Creates a `Box`, with the `Default` value for T. + #[inline] fn default() -> Self { - #[rustc_box] Box::new(T::default()) } } @@ -1253,6 +1238,7 @@ impl Default for Box { #[stable(feature = "rust1", since = "1.0.0")] #[rustc_const_unstable(feature = "const_default_impls", issue = "87864")] impl const Default for Box<[T]> { + #[inline] fn default() -> Self { let ptr: Unique<[T]> = Unique::<[T; 0]>::dangling(); Box(ptr, Global) @@ -1263,6 +1249,7 @@ impl const Default for Box<[T]> { #[stable(feature = "default_box_extra", since = "1.17.0")] #[rustc_const_unstable(feature = "const_default_impls", issue = "87864")] impl const Default for Box { + #[inline] fn default() -> Self { // SAFETY: This is the same as `Unique::cast` but with an unsized `U = str`. let ptr: Unique = unsafe { @@ -1617,7 +1604,6 @@ impl From<[T; N]> for Box<[T]> { /// println!("{boxed:?}"); /// ``` fn from(array: [T; N]) -> Box<[T]> { - #[rustc_box] Box::new(array) } } @@ -1982,7 +1968,7 @@ impl ExactSizeIterator for Box FusedIterator for Box {} #[stable(feature = "boxed_closure_impls", since = "1.35.0")] -impl + ?Sized, A: Allocator> FnOnce for Box { +impl + ?Sized, A: Allocator> FnOnce for Box { type Output = >::Output; extern "rust-call" fn call_once(self, args: Args) -> Self::Output { @@ -1991,20 +1977,20 @@ impl + ?Sized, A: Allocator> FnOnce for Box { } #[stable(feature = "boxed_closure_impls", since = "1.35.0")] -impl + ?Sized, A: Allocator> FnMut for Box { +impl + ?Sized, A: Allocator> FnMut for Box { extern "rust-call" fn call_mut(&mut self, args: Args) -> Self::Output { >::call_mut(self, args) } } #[stable(feature = "boxed_closure_impls", since = "1.35.0")] -impl + ?Sized, A: Allocator> Fn for Box { +impl + ?Sized, A: Allocator> Fn for Box { extern "rust-call" fn call(&self, args: Args) -> Self::Output { >::call(self, args) } } -#[unstable(feature = "coerce_unsized", issue = "27732")] +#[unstable(feature = "coerce_unsized", issue = "18598")] impl, U: ?Sized, A: Allocator> CoerceUnsized> for Box {} #[unstable(feature = "dispatch_from_dyn", issue = "none")] diff --git a/rust/alloc/boxed/thin.rs b/rust/alloc/boxed/thin.rs index 696ab6b6f0f11b..3aebf3ea94525e 100644 --- a/rust/alloc/boxed/thin.rs +++ b/rust/alloc/boxed/thin.rs @@ -50,7 +50,7 @@ unsafe impl Sync for ThinBox {} #[unstable(feature = "thin_box", issue = "92791")] impl ThinBox { - /// Moves a type to the heap with its `Metadata` stored in the heap allocation instead of on + /// Moves a type to the heap with its [`Metadata`] stored in the heap allocation instead of on /// the stack. /// /// # Examples @@ -61,6 +61,8 @@ impl ThinBox { /// /// let five = ThinBox::new(5); /// ``` + /// + /// [`Metadata`]: core::ptr::Pointee::Metadata #[cfg(not(no_global_oom_handling))] pub fn new(value: T) -> Self { let meta = ptr::metadata(&value); @@ -71,7 +73,7 @@ impl ThinBox { #[unstable(feature = "thin_box", issue = "92791")] impl ThinBox { - /// Moves a type to the heap with its `Metadata` stored in the heap allocation instead of on + /// Moves a type to the heap with its [`Metadata`] stored in the heap allocation instead of on /// the stack. /// /// # Examples @@ -82,6 +84,8 @@ impl ThinBox { /// /// let thin_slice = ThinBox::<[i32]>::new_unsize([1, 2, 3, 4]); /// ``` + /// + /// [`Metadata`]: core::ptr::Pointee::Metadata #[cfg(not(no_global_oom_handling))] pub fn new_unsize(value: T) -> Self where @@ -228,24 +232,45 @@ impl WithHeader { // - Assumes that either `value` can be dereferenced, or is the // `NonNull::dangling()` we use when both `T` and `H` are ZSTs. unsafe fn drop(&self, value: *mut T) { + struct DropGuard { + ptr: NonNull, + value_layout: Layout, + _marker: PhantomData, + } + + impl Drop for DropGuard { + fn drop(&mut self) { + unsafe { + // SAFETY: Layout must have been computable if we're in drop + let (layout, value_offset) = + WithHeader::::alloc_layout(self.value_layout).unwrap_unchecked(); + + // Note: Don't deallocate if the layout size is zero, because the pointer + // didn't come from the allocator. + if layout.size() != 0 { + alloc::dealloc(self.ptr.as_ptr().sub(value_offset), layout); + } else { + debug_assert!( + value_offset == 0 + && mem::size_of::() == 0 + && self.value_layout.size() == 0 + ); + } + } + } + } + unsafe { - let value_layout = Layout::for_value_raw(value); - // SAFETY: Layout must have been computable if we're in drop - let (layout, value_offset) = Self::alloc_layout(value_layout).unwrap_unchecked(); + // `_guard` will deallocate the memory when dropped, even if `drop_in_place` unwinds. + let _guard = DropGuard { + ptr: self.0, + value_layout: Layout::for_value_raw(value), + _marker: PhantomData::, + }; // We only drop the value because the Pointee trait requires that the metadata is copy // aka trivially droppable. ptr::drop_in_place::(value); - - // Note: Don't deallocate if the layout size is zero, because the pointer - // didn't come from the allocator. - if layout.size() != 0 { - alloc::dealloc(self.0.as_ptr().sub(value_offset), layout); - } else { - debug_assert!( - value_offset == 0 && mem::size_of::() == 0 && value_layout.size() == 0 - ); - } } } diff --git a/rust/alloc/collections/mod.rs b/rust/alloc/collections/mod.rs index da61544124168c..2506065d158a95 100644 --- a/rust/alloc/collections/mod.rs +++ b/rust/alloc/collections/mod.rs @@ -141,7 +141,7 @@ impl Display for TryReserveError { " because the computed capacity exceeded the collection's maximum" } TryReserveErrorKind::AllocError { .. } => { - " because the memory allocator returned a error" + " because the memory allocator returned an error" } }; fmt.write_str(reason) diff --git a/rust/alloc/ffi/c_str.rs b/rust/alloc/ffi/c_str.rs index aeb86b2f162f01..8f572cb5196566 100644 --- a/rust/alloc/ffi/c_str.rs +++ b/rust/alloc/ffi/c_str.rs @@ -993,12 +993,6 @@ impl IntoStringError { pub fn utf8_error(&self) -> Utf8Error { self.error } - - #[doc(hidden)] - #[unstable(feature = "cstr_internals", issue = "none")] - pub fn __source(&self) -> &Utf8Error { - &self.error - } } impl IntoStringError { @@ -1143,6 +1137,6 @@ impl core::error::Error for IntoStringError { } fn source(&self) -> Option<&(dyn core::error::Error + 'static)> { - Some(self.__source()) + Some(&self.error) } } diff --git a/rust/alloc/fmt.rs b/rust/alloc/fmt.rs index 7faa3e1476464b..4594140df595b1 100644 --- a/rust/alloc/fmt.rs +++ b/rust/alloc/fmt.rs @@ -421,7 +421,7 @@ //! // documentation for details, and the function `pad` can be used //! // to pad strings. //! let decimals = f.precision().unwrap_or(3); -//! let string = format!("{:.*}", decimals, magnitude); +//! let string = format!("{magnitude:.decimals$}"); //! f.pad_integral(true, "", &string) //! } //! } @@ -520,7 +520,7 @@ //! write!(&mut some_writer, "{}", format_args!("print with a {}", "macro")); //! //! fn my_fmt_fn(args: fmt::Arguments) { -//! write!(&mut io::stdout(), "{}", args); +//! write!(&mut io::stdout(), "{args}"); //! } //! my_fmt_fn(format_args!(", or a {} too", "function")); //! ``` @@ -560,7 +560,7 @@ pub use core::fmt::Alignment; #[stable(feature = "rust1", since = "1.0.0")] pub use core::fmt::Error; #[stable(feature = "rust1", since = "1.0.0")] -pub use core::fmt::{write, ArgumentV1, Arguments}; +pub use core::fmt::{write, Arguments}; #[stable(feature = "rust1", since = "1.0.0")] pub use core::fmt::{Binary, Octal}; #[stable(feature = "rust1", since = "1.0.0")] diff --git a/rust/alloc/lib.rs b/rust/alloc/lib.rs index d361238272db4f..2f462a5216cfa0 100644 --- a/rust/alloc/lib.rs +++ b/rust/alloc/lib.rs @@ -5,7 +5,7 @@ //! This library provides smart pointers and collections for managing //! heap-allocated values. //! -//! This library, like libcore, normally doesn’t need to be used directly +//! This library, like core, normally doesn’t need to be used directly //! since its contents are re-exported in the [`std` crate](../std/index.html). //! Crates that use the `#![no_std]` attribute however will typically //! not depend on `std`, so they’d use this crate instead. @@ -77,19 +77,26 @@ ))] #![no_std] #![needs_allocator] -// To run liballoc tests without x.py without ending up with two copies of liballoc, Miri needs to be +// To run alloc tests without x.py without ending up with two copies of alloc, Miri needs to be // able to "empty" this crate. See . // rustc itself never sets the feature, so this line has no affect there. #![cfg(any(not(feature = "miri-test-libstd"), test, doctest))] // // Lints: #![deny(unsafe_op_in_unsafe_fn)] +#![deny(fuzzy_provenance_casts)] #![warn(deprecated_in_future)] #![warn(missing_debug_implementations)] #![warn(missing_docs)] #![allow(explicit_outlives_requirements)] +#![warn(multiple_supertrait_upcastable)] // // Library features: +// tidy-alphabetical-start +#![cfg_attr(not(no_global_oom_handling), feature(const_alloc_error))] +#![cfg_attr(not(no_global_oom_handling), feature(const_btree_len))] +#![cfg_attr(test, feature(is_sorted))] +#![cfg_attr(test, feature(new_uninit))] #![feature(alloc_layout_extra)] #![feature(allocator_api)] #![feature(array_chunks)] @@ -99,22 +106,21 @@ #![feature(assert_matches)] #![feature(async_iterator)] #![feature(coerce_unsized)] -#![cfg_attr(not(no_global_oom_handling), feature(const_alloc_error))] +#![feature(const_align_of_val)] #![feature(const_box)] -#![cfg_attr(not(no_global_oom_handling), feature(const_btree_len))] -#![feature(const_cow_is_borrowed)] #![feature(const_convert)] -#![feature(const_size_of_val)] -#![feature(const_align_of_val)] -#![feature(const_ptr_read)] -#![feature(const_maybe_uninit_write)] -#![feature(const_maybe_uninit_as_mut_ptr)] -#![feature(const_refs_to_cell)] -#![feature(core_intrinsics)] +#![feature(const_cow_is_borrowed)] #![feature(const_eval_select)] +#![feature(const_maybe_uninit_as_mut_ptr)] +#![feature(const_maybe_uninit_write)] +#![feature(const_maybe_uninit_zeroed)] #![feature(const_pin)] +#![feature(const_ptr_read)] +#![feature(const_refs_to_cell)] +#![feature(const_size_of_val)] #![feature(const_waker)] -#![feature(cstr_from_bytes_until_nul)] +#![feature(core_intrinsics)] +#![feature(core_panic)] #![feature(dispatch_from_dyn)] #![feature(error_generic_member_access)] #![feature(error_in_core)] @@ -123,15 +129,15 @@ #![feature(fmt_internals)] #![feature(fn_traits)] #![feature(hasher_prefixfree_extras)] +#![feature(inline_const)] #![feature(inplace_iteration)] #![feature(iter_advance_by)] #![feature(iter_next_chunk)] +#![feature(iter_repeat_n)] #![feature(layout_for_ptr)] #![feature(maybe_uninit_slice)] #![feature(maybe_uninit_uninit_array)] #![feature(maybe_uninit_uninit_array_transpose)] -#![cfg_attr(test, feature(new_uninit))] -#![feature(nonnull_slice_from_raw_parts)] #![feature(pattern)] #![feature(pointer_byte_offsets)] #![feature(provide_any)] @@ -147,48 +153,54 @@ #![feature(slice_ptr_get)] #![feature(slice_ptr_len)] #![feature(slice_range)] +#![feature(std_internals)] #![feature(str_internals)] #![feature(strict_provenance)] #![feature(trusted_len)] #![feature(trusted_random_access)] #![feature(try_trait_v2)] +#![feature(tuple_trait)] #![feature(unchecked_math)] #![feature(unicode_internals)] #![feature(unsize)] #![feature(utf8_chunks)] -#![feature(std_internals)] +// tidy-alphabetical-end // // Language features: +// tidy-alphabetical-start +#![cfg_attr(not(test), feature(generator_trait))] +#![cfg_attr(test, feature(panic_update_hook))] +#![cfg_attr(test, feature(test))] #![feature(allocator_internals)] #![feature(allow_internal_unstable)] #![feature(associated_type_bounds)] +#![feature(c_unwind)] #![feature(cfg_sanitize)] #![feature(const_deref)] #![feature(const_mut_refs)] -#![feature(const_ptr_write)] #![feature(const_precise_live_drops)] +#![feature(const_ptr_write)] #![feature(const_trait_impl)] #![feature(const_try)] #![feature(dropck_eyepatch)] #![feature(exclusive_range_pattern)] #![feature(fundamental)] -#![cfg_attr(not(test), feature(generator_trait))] #![feature(hashmap_internals)] #![feature(lang_items)] #![feature(min_specialization)] +#![feature(multiple_supertrait_upcastable)] #![feature(negative_impls)] #![feature(never_type)] +#![feature(pointer_is_aligned)] #![feature(rustc_allow_const_fn_unstable)] #![feature(rustc_attrs)] -#![feature(pointer_is_aligned)] #![feature(slice_internals)] #![feature(staged_api)] #![feature(stmt_expr_attributes)] -#![cfg_attr(test, feature(test))] #![feature(unboxed_closures)] #![feature(unsized_fn_params)] -#![feature(c_unwind)] #![feature(with_negative_coherence)] +// tidy-alphabetical-end // // Rustdoc features: #![feature(doc_cfg)] @@ -205,6 +217,8 @@ extern crate std; #[cfg(test)] extern crate test; +#[cfg(test)] +mod testing; // Module with internal macros used by other modules (needs to be included before other modules). #[macro_use] @@ -250,3 +264,20 @@ pub mod vec; pub mod __export { pub use core::format_args; } + +#[cfg(test)] +#[allow(dead_code)] // Not used in all configurations +pub(crate) mod test_helpers { + /// Copied from `std::test_helpers::test_rng`, since these tests rely on the + /// seed not being the same for every RNG invocation too. + pub(crate) fn test_rng() -> rand_xorshift::XorShiftRng { + use std::hash::{BuildHasher, Hash, Hasher}; + let mut hasher = std::collections::hash_map::RandomState::new().build_hasher(); + std::panic::Location::caller().hash(&mut hasher); + let hc64 = hasher.finish(); + let seed_vec = + hc64.to_le_bytes().into_iter().chain(0u8..8).collect::>(); + let seed: [u8; 16] = seed_vec.as_slice().try_into().unwrap(); + rand::SeedableRng::from_seed(seed) + } +} diff --git a/rust/alloc/macros.rs b/rust/alloc/macros.rs index 97c330b5eff95c..584bb838273180 100644 --- a/rust/alloc/macros.rs +++ b/rust/alloc/macros.rs @@ -50,6 +50,8 @@ macro_rules! vec { ); ($($x:expr),+ $(,)?) => ( $crate::__rust_force_expr!(<[_]>::into_vec( + // This rustc_box is not required, but it produces a dramatic improvement in compile + // time when constructing arrays with many elements. #[rustc_box] $crate::boxed::Box::new([$($x),+]) )) diff --git a/rust/alloc/raw_vec.rs b/rust/alloc/raw_vec.rs index 08481e5d7f8961..1531c9fbc71da1 100644 --- a/rust/alloc/raw_vec.rs +++ b/rust/alloc/raw_vec.rs @@ -6,7 +6,6 @@ use core::alloc::LayoutError; use core::cmp; use core::intrinsics; use core::mem::{self, ManuallyDrop, MaybeUninit, SizedTypeProperties}; -use core::ops::Drop; use core::ptr::{self, NonNull, Unique}; use core::slice; @@ -274,10 +273,15 @@ impl RawVec { if T::IS_ZST || self.cap == 0 { None } else { - // We have an allocated chunk of memory, so we can bypass runtime - // checks to get our current layout. + // We could use Layout::array here which ensures the absence of isize and usize overflows + // and could hypothetically handle differences between stride and size, but this memory + // has already been allocated so we know it can't overflow and currently rust does not + // support such types. So we can do better by skipping some checks and avoid an unwrap. + let _: () = const { assert!(mem::size_of::() % mem::align_of::() == 0) }; unsafe { - let layout = Layout::array::(self.cap).unwrap_unchecked(); + let align = mem::align_of::(); + let size = mem::size_of::().unchecked_mul(self.cap); + let layout = Layout::from_size_align_unchecked(size, align); Some((self.ptr.cast().into(), layout)) } } @@ -474,11 +478,13 @@ impl RawVec { assert!(cap <= self.capacity(), "Tried to shrink to a larger capacity"); let (ptr, layout) = if let Some(mem) = self.current_memory() { mem } else { return Ok(()) }; - + // See current_memory() why this assert is here + let _: () = const { assert!(mem::size_of::() % mem::align_of::() == 0) }; let ptr = unsafe { // `Layout::array` cannot overflow here because it would have // overflowed earlier when capacity was larger. - let new_layout = Layout::array::(cap).unwrap_unchecked(); + let new_size = mem::size_of::().unchecked_mul(cap); + let new_layout = Layout::from_size_align_unchecked(new_size, layout.align()); self.alloc .shrink(ptr, layout, new_layout) .map_err(|_| AllocError { layout: new_layout, non_exhaustive: () })? diff --git a/rust/alloc/slice.rs b/rust/alloc/slice.rs index 3dfc1e7ce7f940..6ac463bd3edc53 100644 --- a/rust/alloc/slice.rs +++ b/rust/alloc/slice.rs @@ -21,15 +21,20 @@ use core::cmp::Ordering::{self, Less}; use core::mem::{self, SizedTypeProperties}; #[cfg(not(no_global_oom_handling))] use core::ptr; +#[cfg(not(no_global_oom_handling))] +use core::slice::sort; use crate::alloc::Allocator; -use crate::alloc::Global; +#[cfg(not(no_global_oom_handling))] +use crate::alloc::{self, Global}; #[cfg(not(no_global_oom_handling))] use crate::borrow::ToOwned; use crate::boxed::Box; -use crate::collections::TryReserveError; use crate::vec::Vec; +#[cfg(test)] +mod tests; + #[unstable(feature = "slice_range", issue = "76393")] pub use core::slice::range; #[unstable(feature = "array_chunks", issue = "74985")] @@ -89,7 +94,6 @@ pub(crate) mod hack { use core::alloc::Allocator; use crate::boxed::Box; - use crate::collections::TryReserveError; use crate::vec::Vec; // We shouldn't add inline attribute to this since this is used in @@ -109,11 +113,6 @@ pub(crate) mod hack { T::to_vec(s, alloc) } - #[inline] - pub fn try_to_vec(s: &[T], alloc: A) -> Result, TryReserveError> { - T::try_to_vec(s, alloc) - } - #[cfg(not(no_global_oom_handling))] pub trait ConvertVec { fn to_vec(s: &[Self], alloc: A) -> Vec @@ -121,12 +120,6 @@ pub(crate) mod hack { Self: Sized; } - pub trait TryConvertVec { - fn try_to_vec(s: &[Self], alloc: A) -> Result, TryReserveError> - where - Self: Sized; - } - #[cfg(not(no_global_oom_handling))] impl ConvertVec for T { #[inline] @@ -179,42 +172,6 @@ pub(crate) mod hack { v } } - - impl TryConvertVec for T { - #[inline] - default fn try_to_vec(s: &[Self], alloc: A) -> Result, TryReserveError> { - struct DropGuard<'a, T, A: Allocator> { - vec: &'a mut Vec, - num_init: usize, - } - impl<'a, T, A: Allocator> Drop for DropGuard<'a, T, A> { - #[inline] - fn drop(&mut self) { - // SAFETY: - // items were marked initialized in the loop below - unsafe { - self.vec.set_len(self.num_init); - } - } - } - let mut vec = Vec::try_with_capacity_in(s.len(), alloc)?; - let mut guard = DropGuard { vec: &mut vec, num_init: 0 }; - let slots = guard.vec.spare_capacity_mut(); - // .take(slots.len()) is necessary for LLVM to remove bounds checks - // and has better codegen than zip. - for (i, b) in s.iter().enumerate().take(slots.len()) { - guard.num_init = i; - slots[i].write(b.clone()); - } - core::mem::forget(guard); - // SAFETY: - // the vec was allocated and initialized above to at least this length. - unsafe { - vec.set_len(s.len()); - } - Ok(vec) - } - } } #[cfg(not(test))] @@ -253,7 +210,7 @@ impl [T] { where T: Ord, { - merge_sort(self, T::lt); + stable_sort(self, T::lt); } /// Sorts the slice with a comparator function. @@ -309,7 +266,7 @@ impl [T] { where F: FnMut(&T, &T) -> Ordering, { - merge_sort(self, |a, b| compare(a, b) == Less); + stable_sort(self, |a, b| compare(a, b) == Less); } /// Sorts the slice with a key extraction function. @@ -352,7 +309,7 @@ impl [T] { F: FnMut(&T) -> K, K: Ord, { - merge_sort(self, |a, b| f(a).lt(&f(b))); + stable_sort(self, |a, b| f(a).lt(&f(b))); } /// Sorts the slice with a key extraction function. @@ -461,25 +418,6 @@ impl [T] { self.to_vec_in(Global) } - /// Tries to copy `self` into a new `Vec`. - /// - /// # Examples - /// - /// ``` - /// let s = [10, 40, 30]; - /// let x = s.try_to_vec().unwrap(); - /// // Here, `s` and `x` can be modified independently. - /// ``` - #[rustc_allow_incoherent_impl] - #[inline] - #[stable(feature = "kernel", since = "1.0.0")] - pub fn try_to_vec(&self) -> Result, TryReserveError> - where - T: Clone, - { - self.try_to_vec_in(Global) - } - /// Copies `self` into a new `Vec` with an allocator. /// /// # Examples @@ -505,30 +443,6 @@ impl [T] { hack::to_vec(self, alloc) } - /// Tries to copy `self` into a new `Vec` with an allocator. - /// - /// # Examples - /// - /// ``` - /// #![feature(allocator_api)] - /// - /// use std::alloc::System; - /// - /// let s = [10, 40, 30]; - /// let x = s.try_to_vec_in(System).unwrap(); - /// // Here, `s` and `x` can be modified independently. - /// ``` - #[rustc_allow_incoherent_impl] - #[inline] - #[stable(feature = "kernel", since = "1.0.0")] - pub fn try_to_vec_in(&self, alloc: A) -> Result, TryReserveError> - where - T: Clone, - { - // N.B., see the `hack` module in this file for more details. - hack::try_to_vec(self, alloc) - } - /// Converts `self` into a vector without clones or allocation. /// /// The resulting vector can be converted back into a box via @@ -551,7 +465,7 @@ impl [T] { hack::into_vec(self) } - /// Creates a vector by repeating a slice `n` times. + /// Creates a vector by copying a slice `n` times. /// /// # Panics /// @@ -746,7 +660,7 @@ impl [u8] { /// /// ```error /// error[E0207]: the type parameter `T` is not constrained by the impl trait, self type, or predica -/// --> src/liballoc/slice.rs:608:6 +/// --> library/alloc/src/slice.rs:608:6 /// | /// 608 | impl> Concat for [V] { /// | ^ unconstrained type parameter @@ -870,21 +784,17 @@ impl BorrowMut<[T]> for Vec { } } +// Specializable trait for implementing ToOwned::clone_into. This is +// public in the crate and has the Allocator parameter so that +// vec::clone_from use it too. #[cfg(not(no_global_oom_handling))] -#[stable(feature = "rust1", since = "1.0.0")] -impl ToOwned for [T] { - type Owned = Vec; - #[cfg(not(test))] - fn to_owned(&self) -> Vec { - self.to_vec() - } - - #[cfg(test)] - fn to_owned(&self) -> Vec { - hack::to_vec(self, Global) - } +pub(crate) trait SpecCloneIntoVec { + fn clone_into(&self, target: &mut Vec); +} - fn clone_into(&self, target: &mut Vec) { +#[cfg(not(no_global_oom_handling))] +impl SpecCloneIntoVec for [T] { + default fn clone_into(&self, target: &mut Vec) { // drop anything in target that will not be overwritten target.truncate(self.len()); @@ -898,328 +808,83 @@ impl ToOwned for [T] { } } -//////////////////////////////////////////////////////////////////////////////// -// Sorting -//////////////////////////////////////////////////////////////////////////////// - -/// Inserts `v[0]` into pre-sorted sequence `v[1..]` so that whole `v[..]` becomes sorted. -/// -/// This is the integral subroutine of insertion sort. #[cfg(not(no_global_oom_handling))] -fn insert_head(v: &mut [T], is_less: &mut F) -where - F: FnMut(&T, &T) -> bool, -{ - if v.len() >= 2 && is_less(&v[1], &v[0]) { - unsafe { - // There are three ways to implement insertion here: - // - // 1. Swap adjacent elements until the first one gets to its final destination. - // However, this way we copy data around more than is necessary. If elements are big - // structures (costly to copy), this method will be slow. - // - // 2. Iterate until the right place for the first element is found. Then shift the - // elements succeeding it to make room for it and finally place it into the - // remaining hole. This is a good method. - // - // 3. Copy the first element into a temporary variable. Iterate until the right place - // for it is found. As we go along, copy every traversed element into the slot - // preceding it. Finally, copy data from the temporary variable into the remaining - // hole. This method is very good. Benchmarks demonstrated slightly better - // performance than with the 2nd method. - // - // All methods were benchmarked, and the 3rd showed best results. So we chose that one. - let tmp = mem::ManuallyDrop::new(ptr::read(&v[0])); - - // Intermediate state of the insertion process is always tracked by `hole`, which - // serves two purposes: - // 1. Protects integrity of `v` from panics in `is_less`. - // 2. Fills the remaining hole in `v` in the end. - // - // Panic safety: - // - // If `is_less` panics at any point during the process, `hole` will get dropped and - // fill the hole in `v` with `tmp`, thus ensuring that `v` still holds every object it - // initially held exactly once. - let mut hole = InsertionHole { src: &*tmp, dest: &mut v[1] }; - ptr::copy_nonoverlapping(&v[1], &mut v[0], 1); - - for i in 2..v.len() { - if !is_less(&v[i], &*tmp) { - break; - } - ptr::copy_nonoverlapping(&v[i], &mut v[i - 1], 1); - hole.dest = &mut v[i]; - } - // `hole` gets dropped and thus copies `tmp` into the remaining hole in `v`. - } - } - - // When dropped, copies from `src` into `dest`. - struct InsertionHole { - src: *const T, - dest: *mut T, - } - - impl Drop for InsertionHole { - fn drop(&mut self) { - unsafe { - ptr::copy_nonoverlapping(self.src, self.dest, 1); - } - } +impl SpecCloneIntoVec for [T] { + fn clone_into(&self, target: &mut Vec) { + target.clear(); + target.extend_from_slice(self); } } -/// Merges non-decreasing runs `v[..mid]` and `v[mid..]` using `buf` as temporary storage, and -/// stores the result into `v[..]`. -/// -/// # Safety -/// -/// The two slices must be non-empty and `mid` must be in bounds. Buffer `buf` must be long enough -/// to hold a copy of the shorter slice. Also, `T` must not be a zero-sized type. #[cfg(not(no_global_oom_handling))] -unsafe fn merge(v: &mut [T], mid: usize, buf: *mut T, is_less: &mut F) -where - F: FnMut(&T, &T) -> bool, -{ - let len = v.len(); - let v = v.as_mut_ptr(); - let (v_mid, v_end) = unsafe { (v.add(mid), v.add(len)) }; - - // The merge process first copies the shorter run into `buf`. Then it traces the newly copied - // run and the longer run forwards (or backwards), comparing their next unconsumed elements and - // copying the lesser (or greater) one into `v`. - // - // As soon as the shorter run is fully consumed, the process is done. If the longer run gets - // consumed first, then we must copy whatever is left of the shorter run into the remaining - // hole in `v`. - // - // Intermediate state of the process is always tracked by `hole`, which serves two purposes: - // 1. Protects integrity of `v` from panics in `is_less`. - // 2. Fills the remaining hole in `v` if the longer run gets consumed first. - // - // Panic safety: - // - // If `is_less` panics at any point during the process, `hole` will get dropped and fill the - // hole in `v` with the unconsumed range in `buf`, thus ensuring that `v` still holds every - // object it initially held exactly once. - let mut hole; - - if mid <= len - mid { - // The left run is shorter. - unsafe { - ptr::copy_nonoverlapping(v, buf, mid); - hole = MergeHole { start: buf, end: buf.add(mid), dest: v }; - } - - // Initially, these pointers point to the beginnings of their arrays. - let left = &mut hole.start; - let mut right = v_mid; - let out = &mut hole.dest; - - while *left < hole.end && right < v_end { - // Consume the lesser side. - // If equal, prefer the left run to maintain stability. - unsafe { - let to_copy = if is_less(&*right, &**left) { - get_and_increment(&mut right) - } else { - get_and_increment(left) - }; - ptr::copy_nonoverlapping(to_copy, get_and_increment(out), 1); - } - } - } else { - // The right run is shorter. - unsafe { - ptr::copy_nonoverlapping(v_mid, buf, len - mid); - hole = MergeHole { start: buf, end: buf.add(len - mid), dest: v_mid }; - } - - // Initially, these pointers point past the ends of their arrays. - let left = &mut hole.dest; - let right = &mut hole.end; - let mut out = v_end; - - while v < *left && buf < *right { - // Consume the greater side. - // If equal, prefer the right run to maintain stability. - unsafe { - let to_copy = if is_less(&*right.sub(1), &*left.sub(1)) { - decrement_and_get(left) - } else { - decrement_and_get(right) - }; - ptr::copy_nonoverlapping(to_copy, decrement_and_get(&mut out), 1); - } - } - } - // Finally, `hole` gets dropped. If the shorter run was not fully consumed, whatever remains of - // it will now be copied into the hole in `v`. - - unsafe fn get_and_increment(ptr: &mut *mut T) -> *mut T { - let old = *ptr; - *ptr = unsafe { ptr.add(1) }; - old - } - - unsafe fn decrement_and_get(ptr: &mut *mut T) -> *mut T { - *ptr = unsafe { ptr.sub(1) }; - *ptr +#[stable(feature = "rust1", since = "1.0.0")] +impl ToOwned for [T] { + type Owned = Vec; + #[cfg(not(test))] + fn to_owned(&self) -> Vec { + self.to_vec() } - // When dropped, copies the range `start..end` into `dest..`. - struct MergeHole { - start: *mut T, - end: *mut T, - dest: *mut T, + #[cfg(test)] + fn to_owned(&self) -> Vec { + hack::to_vec(self, Global) } - impl Drop for MergeHole { - fn drop(&mut self) { - // `T` is not a zero-sized type, and these are pointers into a slice's elements. - unsafe { - let len = self.end.sub_ptr(self.start); - ptr::copy_nonoverlapping(self.start, self.dest, len); - } - } + fn clone_into(&self, target: &mut Vec) { + SpecCloneIntoVec::clone_into(self, target); } } -/// This merge sort borrows some (but not all) ideas from TimSort, which is described in detail -/// [here](https://github.com/python/cpython/blob/main/Objects/listsort.txt). -/// -/// The algorithm identifies strictly descending and non-descending subsequences, which are called -/// natural runs. There is a stack of pending runs yet to be merged. Each newly found run is pushed -/// onto the stack, and then some pairs of adjacent runs are merged until these two invariants are -/// satisfied: -/// -/// 1. for every `i` in `1..runs.len()`: `runs[i - 1].len > runs[i].len` -/// 2. for every `i` in `2..runs.len()`: `runs[i - 2].len > runs[i - 1].len + runs[i].len` -/// -/// The invariants ensure that the total running time is *O*(*n* \* log(*n*)) worst-case. +//////////////////////////////////////////////////////////////////////////////// +// Sorting +//////////////////////////////////////////////////////////////////////////////// + +#[inline] #[cfg(not(no_global_oom_handling))] -fn merge_sort(v: &mut [T], mut is_less: F) +fn stable_sort(v: &mut [T], mut is_less: F) where F: FnMut(&T, &T) -> bool, { - // Slices of up to this length get sorted using insertion sort. - const MAX_INSERTION: usize = 20; - // Very short runs are extended using insertion sort to span at least this many elements. - const MIN_RUN: usize = 10; - - // Sorting has no meaningful behavior on zero-sized types. if T::IS_ZST { + // Sorting has no meaningful behavior on zero-sized types. Do nothing. return; } - let len = v.len(); - - // Short arrays get sorted in-place via insertion sort to avoid allocations. - if len <= MAX_INSERTION { - if len >= 2 { - for i in (0..len - 1).rev() { - insert_head(&mut v[i..], &mut is_less); - } - } - return; - } - - // Allocate a buffer to use as scratch memory. We keep the length 0 so we can keep in it - // shallow copies of the contents of `v` without risking the dtors running on copies if - // `is_less` panics. When merging two sorted runs, this buffer holds a copy of the shorter run, - // which will always have length at most `len / 2`. - let mut buf = Vec::with_capacity(len / 2); - - // In order to identify natural runs in `v`, we traverse it backwards. That might seem like a - // strange decision, but consider the fact that merges more often go in the opposite direction - // (forwards). According to benchmarks, merging forwards is slightly faster than merging - // backwards. To conclude, identifying runs by traversing backwards improves performance. - let mut runs = vec![]; - let mut end = len; - while end > 0 { - // Find the next natural run, and reverse it if it's strictly descending. - let mut start = end - 1; - if start > 0 { - start -= 1; - unsafe { - if is_less(v.get_unchecked(start + 1), v.get_unchecked(start)) { - while start > 0 && is_less(v.get_unchecked(start), v.get_unchecked(start - 1)) { - start -= 1; - } - v[start..end].reverse(); - } else { - while start > 0 && !is_less(v.get_unchecked(start), v.get_unchecked(start - 1)) - { - start -= 1; - } - } - } - } + let elem_alloc_fn = |len: usize| -> *mut T { + // SAFETY: Creating the layout is safe as long as merge_sort never calls this with len > + // v.len(). Alloc in general will only be used as 'shadow-region' to store temporary swap + // elements. + unsafe { alloc::alloc(alloc::Layout::array::(len).unwrap_unchecked()) as *mut T } + }; - // Insert some more elements into the run if it's too short. Insertion sort is faster than - // merge sort on short sequences, so this significantly improves performance. - while start > 0 && end - start < MIN_RUN { - start -= 1; - insert_head(&mut v[start..end], &mut is_less); + let elem_dealloc_fn = |buf_ptr: *mut T, len: usize| { + // SAFETY: Creating the layout is safe as long as merge_sort never calls this with len > + // v.len(). The caller must ensure that buf_ptr was created by elem_alloc_fn with the same + // len. + unsafe { + alloc::dealloc(buf_ptr as *mut u8, alloc::Layout::array::(len).unwrap_unchecked()); } + }; - // Push this run onto the stack. - runs.push(Run { start, len: end - start }); - end = start; - - // Merge some pairs of adjacent runs to satisfy the invariants. - while let Some(r) = collapse(&runs) { - let left = runs[r + 1]; - let right = runs[r]; - unsafe { - merge( - &mut v[left.start..right.start + right.len], - left.len, - buf.as_mut_ptr(), - &mut is_less, - ); - } - runs[r] = Run { start: left.start, len: left.len + right.len }; - runs.remove(r + 1); + let run_alloc_fn = |len: usize| -> *mut sort::TimSortRun { + // SAFETY: Creating the layout is safe as long as merge_sort never calls this with an + // obscene length or 0. + unsafe { + alloc::alloc(alloc::Layout::array::(len).unwrap_unchecked()) + as *mut sort::TimSortRun } - } + }; - // Finally, exactly one run must remain in the stack. - debug_assert!(runs.len() == 1 && runs[0].start == 0 && runs[0].len == len); - - // Examines the stack of runs and identifies the next pair of runs to merge. More specifically, - // if `Some(r)` is returned, that means `runs[r]` and `runs[r + 1]` must be merged next. If the - // algorithm should continue building a new run instead, `None` is returned. - // - // TimSort is infamous for its buggy implementations, as described here: - // http://envisage-project.eu/timsort-specification-and-verification/ - // - // The gist of the story is: we must enforce the invariants on the top four runs on the stack. - // Enforcing them on just top three is not sufficient to ensure that the invariants will still - // hold for *all* runs in the stack. - // - // This function correctly checks invariants for the top four runs. Additionally, if the top - // run starts at index 0, it will always demand a merge operation until the stack is fully - // collapsed, in order to complete the sort. - #[inline] - fn collapse(runs: &[Run]) -> Option { - let n = runs.len(); - if n >= 2 - && (runs[n - 1].start == 0 - || runs[n - 2].len <= runs[n - 1].len - || (n >= 3 && runs[n - 3].len <= runs[n - 2].len + runs[n - 1].len) - || (n >= 4 && runs[n - 4].len <= runs[n - 3].len + runs[n - 2].len)) - { - if n >= 3 && runs[n - 3].len < runs[n - 1].len { Some(n - 3) } else { Some(n - 2) } - } else { - None + let run_dealloc_fn = |buf_ptr: *mut sort::TimSortRun, len: usize| { + // SAFETY: The caller must ensure that buf_ptr was created by elem_alloc_fn with the same + // len. + unsafe { + alloc::dealloc( + buf_ptr as *mut u8, + alloc::Layout::array::(len).unwrap_unchecked(), + ); } - } + }; - #[derive(Clone, Copy)] - struct Run { - start: usize, - len: usize, - } + sort::merge_sort(v, &mut is_less, elem_alloc_fn, elem_dealloc_fn, run_alloc_fn, run_dealloc_fn); } diff --git a/rust/alloc/str.rs b/rust/alloc/str.rs index c797ba7d8f0c98..d33ece5fc64c07 100644 --- a/rust/alloc/str.rs +++ b/rust/alloc/str.rs @@ -18,7 +18,6 @@ use core::unicode::conversions; use crate::borrow::ToOwned; use crate::boxed::Box; -use crate::collections::TryReserveError; use crate::slice::{Concat, Join, SliceIndex}; use crate::string::String; use crate::vec::Vec; @@ -259,7 +258,7 @@ impl str { /// assert_eq!("than an old", s.replace("is", "an")); /// ``` /// - /// When the pattern doesn't match: + /// When the pattern doesn't match, it returns this string slice as [`String`]: /// /// ``` /// let s = "this is old"; @@ -300,7 +299,7 @@ impl str { /// assert_eq!("foo foo new23 foo", s.replacen(char::is_numeric, "new", 1)); /// ``` /// - /// When the pattern doesn't match: + /// When the pattern doesn't match, it returns this string slice as [`String`]: /// /// ``` /// let s = "this is old"; @@ -562,10 +561,9 @@ impl str { #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")] #[inline] pub fn to_ascii_uppercase(&self) -> String { - let mut bytes = self.as_bytes().to_vec(); - bytes.make_ascii_uppercase(); - // make_ascii_uppercase() preserves the UTF-8 invariant. - unsafe { String::from_utf8_unchecked(bytes) } + let mut s = self.to_owned(); + s.make_ascii_uppercase(); + s } /// Returns a copy of this string where each character is mapped to its @@ -595,27 +593,9 @@ impl str { #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")] #[inline] pub fn to_ascii_lowercase(&self) -> String { - let mut bytes = self.as_bytes().to_vec(); - bytes.make_ascii_lowercase(); - // make_ascii_lowercase() preserves the UTF-8 invariant. - unsafe { String::from_utf8_unchecked(bytes) } - } - - /// Tries to create a `String`. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let s: &str = "a"; - /// let ss: String = s.try_to_owned().unwrap(); - /// ``` - #[rustc_allow_incoherent_impl] - #[inline] - #[stable(feature = "kernel", since = "1.0.0")] - pub fn try_to_owned(&self) -> Result { - unsafe { Ok(String::from_utf8_unchecked(self.as_bytes().try_to_vec()?)) } + let mut s = self.to_owned(); + s.make_ascii_lowercase(); + s } } diff --git a/rust/alloc/string.rs b/rust/alloc/string.rs index bc84baa90d5e02..5290a0cd62b3a0 100644 --- a/rust/alloc/string.rs +++ b/rust/alloc/string.rs @@ -44,14 +44,12 @@ #![stable(feature = "rust1", since = "1.0.0")] -#[cfg(not(no_global_oom_handling))] -use core::char::{decode_utf16, REPLACEMENT_CHARACTER}; use core::error::Error; use core::fmt; use core::hash; -use core::iter::FusedIterator; #[cfg(not(no_global_oom_handling))] -use core::iter::{from_fn, FromIterator}; +use core::iter::from_fn; +use core::iter::FusedIterator; #[cfg(not(no_global_oom_handling))] use core::ops::Add; #[cfg(not(no_global_oom_handling))] @@ -363,9 +361,9 @@ use crate::vec::Vec; /// [Deref]: core::ops::Deref "ops::Deref" /// [`Deref`]: core::ops::Deref "ops::Deref" /// [`as_str()`]: String::as_str -#[derive(PartialOrd, Eq, Ord)] -#[cfg_attr(not(test), rustc_diagnostic_item = "String")] +#[derive(PartialEq, PartialOrd, Eq, Ord)] #[stable(feature = "rust1", since = "1.0.0")] +#[cfg_attr(not(test), lang = "String")] pub struct String { vec: Vec, } @@ -685,7 +683,7 @@ impl String { // This isn't done via collect::>() for performance reasons. // FIXME: the function can be simplified again when #48994 is closed. let mut ret = String::with_capacity(v.len()); - for c in decode_utf16(v.iter().cloned()) { + for c in char::decode_utf16(v.iter().cloned()) { if let Ok(c) = c { ret.push(c); } else { @@ -724,7 +722,9 @@ impl String { #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn from_utf16_lossy(v: &[u16]) -> String { - decode_utf16(v.iter().cloned()).map(|r| r.unwrap_or(REPLACEMENT_CHARACTER)).collect() + char::decode_utf16(v.iter().cloned()) + .map(|r| r.unwrap_or(char::REPLACEMENT_CHARACTER)) + .collect() } /// Decomposes a `String` into its raw components. @@ -930,12 +930,12 @@ impl String { /// Copies elements from `src` range to the end of the string. /// - /// ## Panics + /// # Panics /// /// Panics if the starting point or end point do not lie on a [`char`] /// boundary, or if they're out of bounds. /// - /// ## Examples + /// # Examples /// /// ``` /// #![feature(string_extend_from_within)] @@ -951,7 +951,7 @@ impl String { /// assert_eq!(string, "abcdecdeabecde"); /// ``` #[cfg(not(no_global_oom_handling))] - #[unstable(feature = "string_extend_from_within", issue = "none")] + #[unstable(feature = "string_extend_from_within", issue = "103806")] pub fn extend_from_within(&mut self, src: R) where R: RangeBounds, @@ -2209,18 +2209,6 @@ impl<'a, 'b> Pattern<'a> for &'b String { } } -#[stable(feature = "rust1", since = "1.0.0")] -impl PartialEq for String { - #[inline] - fn eq(&self, other: &String) -> bool { - PartialEq::eq(&self[..], &other[..]) - } - #[inline] - fn ne(&self, other: &String) -> bool { - PartialEq::ne(&self[..], &other[..]) - } -} - macro_rules! impl_eq { ($lhs:ty, $rhs: ty) => { #[stable(feature = "rust1", since = "1.0.0")] @@ -2550,6 +2538,15 @@ impl ToString for char { } } +#[cfg(not(no_global_oom_handling))] +#[stable(feature = "bool_to_string_specialization", since = "1.68.0")] +impl ToString for bool { + #[inline] + fn to_string(&self) -> String { + String::from(if *self { "true" } else { "false" }) + } +} + #[cfg(not(no_global_oom_handling))] #[stable(feature = "u8_to_string_specialization", since = "1.54.0")] impl ToString for u8 { @@ -2680,7 +2677,7 @@ impl From<&String> for String { } } -// note: test pulls in libstd, which causes errors here +// note: test pulls in std, which causes errors here #[cfg(not(test))] #[stable(feature = "string_from_box", since = "1.18.0")] impl From> for String { diff --git a/rust/alloc/vec/drain.rs b/rust/alloc/vec/drain.rs index 3594ad890c3dca..d503d2f478ce7a 100644 --- a/rust/alloc/vec/drain.rs +++ b/rust/alloc/vec/drain.rs @@ -225,9 +225,9 @@ impl Drop for Drain<'_, T, A> { } // as_slice() must only be called when iter.len() is > 0 because - // vec::Splice modifies vec::Drain fields and may grow the vec which would invalidate - // the iterator's internal pointers. Creating a reference to deallocated memory - // is invalid even when it is zero-length + // it also gets touched by vec::Splice which may turn it into a dangling pointer + // which would make it and the vec pointer point to different allocations which would + // lead to invalid pointer arithmetic below. let drop_ptr = iter.as_slice().as_ptr(); unsafe { diff --git a/rust/alloc/vec/into_iter.rs b/rust/alloc/vec/into_iter.rs index 0528ee16505a46..aac0ec16aef15d 100644 --- a/rust/alloc/vec/into_iter.rs +++ b/rust/alloc/vec/into_iter.rs @@ -3,6 +3,8 @@ #[cfg(not(no_global_oom_handling))] use super::AsVecIntoIter; use crate::alloc::{Allocator, Global}; +#[cfg(not(no_global_oom_handling))] +use crate::collections::VecDeque; use crate::raw_vec::RawVec; use core::array; use core::fmt; @@ -11,6 +13,7 @@ use core::iter::{ }; use core::marker::PhantomData; use core::mem::{self, ManuallyDrop, MaybeUninit, SizedTypeProperties}; +use core::num::NonZeroUsize; #[cfg(not(no_global_oom_handling))] use core::ops::Deref; use core::ptr::{self, NonNull}; @@ -40,7 +43,9 @@ pub struct IntoIter< // to avoid dropping the allocator twice we need to wrap it into ManuallyDrop pub(super) alloc: ManuallyDrop, pub(super) ptr: *const T, - pub(super) end: *const T, + pub(super) end: *const T, // If T is a ZST, this is actually ptr+len. This encoding is picked so that + // ptr == end is a quick test for the Iterator being empty, that works + // for both ZST and non-ZST. } #[stable(feature = "vec_intoiter_debug", since = "1.13.0")] @@ -105,7 +110,7 @@ impl IntoIter { /// ``` /// # let mut into_iter = Vec::::with_capacity(10).into_iter(); /// let mut into_iter = std::mem::replace(&mut into_iter, Vec::new().into_iter()); - /// (&mut into_iter).for_each(core::mem::drop); + /// (&mut into_iter).for_each(drop); /// std::mem::forget(into_iter); /// ``` /// @@ -132,7 +137,36 @@ impl IntoIter { /// Forgets to Drop the remaining elements while still allowing the backing allocation to be freed. pub(crate) fn forget_remaining_elements(&mut self) { - self.ptr = self.end; + // For th ZST case, it is crucial that we mutate `end` here, not `ptr`. + // `ptr` must stay aligned, while `end` may be unaligned. + self.end = self.ptr; + } + + #[cfg(not(no_global_oom_handling))] + #[inline] + pub(crate) fn into_vecdeque(self) -> VecDeque { + // Keep our `Drop` impl from dropping the elements and the allocator + let mut this = ManuallyDrop::new(self); + + // SAFETY: This allocation originally came from a `Vec`, so it passes + // all those checks. We have `this.buf` ≤ `this.ptr` ≤ `this.end`, + // so the `sub_ptr`s below cannot wrap, and will produce a well-formed + // range. `end` ≤ `buf + cap`, so the range will be in-bounds. + // Taking `alloc` is ok because nothing else is going to look at it, + // since our `Drop` impl isn't going to run so there's no more code. + unsafe { + let buf = this.buf.as_ptr(); + let initialized = if T::IS_ZST { + // All the pointers are the same for ZSTs, so it's fine to + // say that they're all at the beginning of the "allocation". + 0..this.len() + } else { + this.ptr.sub_ptr(buf)..this.end.sub_ptr(buf) + }; + let cap = this.cap; + let alloc = ManuallyDrop::take(&mut this.alloc); + VecDeque::from_contiguous_raw_parts_in(buf, initialized, cap, alloc) + } } } @@ -157,10 +191,9 @@ impl Iterator for IntoIter { if self.ptr == self.end { None } else if T::IS_ZST { - // purposefully don't use 'ptr.offset' because for - // vectors with 0-size elements this would return the - // same pointer. - self.ptr = self.ptr.wrapping_byte_add(1); + // `ptr` has to stay where it is to remain aligned, so we reduce the length by 1 by + // reducing the `end`. + self.end = self.end.wrapping_byte_sub(1); // Make up a value of this ZST. Some(unsafe { mem::zeroed() }) @@ -183,14 +216,12 @@ impl Iterator for IntoIter { } #[inline] - fn advance_by(&mut self, n: usize) -> Result<(), usize> { + fn advance_by(&mut self, n: usize) -> Result<(), NonZeroUsize> { let step_size = self.len().min(n); let to_drop = ptr::slice_from_raw_parts_mut(self.ptr as *mut T, step_size); if T::IS_ZST { - // SAFETY: due to unchecked casts of unsigned amounts to signed offsets the wraparound - // effectively results in unsigned pointers representing positions 0..usize::MAX, - // which is valid for ZSTs. - self.ptr = self.ptr.wrapping_byte_add(step_size); + // See `next` for why we sub `end` here. + self.end = self.end.wrapping_byte_sub(step_size); } else { // SAFETY: the min() above ensures that step_size is in bounds self.ptr = unsafe { self.ptr.add(step_size) }; @@ -199,10 +230,7 @@ impl Iterator for IntoIter { unsafe { ptr::drop_in_place(to_drop); } - if step_size < n { - return Err(step_size); - } - Ok(()) + NonZeroUsize::new(n - step_size).map_or(Ok(()), Err) } #[inline] @@ -223,7 +251,7 @@ impl Iterator for IntoIter { return Err(unsafe { array::IntoIter::new_unchecked(raw_ary, 0..len) }); } - self.ptr = self.ptr.wrapping_byte_add(N); + self.end = self.end.wrapping_byte_sub(N); // Safety: ditto return Ok(unsafe { raw_ary.transpose().assume_init() }); } @@ -285,7 +313,7 @@ impl DoubleEndedIterator for IntoIter { } #[inline] - fn advance_back_by(&mut self, n: usize) -> Result<(), usize> { + fn advance_back_by(&mut self, n: usize) -> Result<(), NonZeroUsize> { let step_size = self.len().min(n); if T::IS_ZST { // SAFETY: same as for advance_by() @@ -299,10 +327,7 @@ impl DoubleEndedIterator for IntoIter { unsafe { ptr::drop_in_place(to_drop); } - if step_size < n { - return Err(step_size); - } - Ok(()) + NonZeroUsize::new(n - step_size).map_or(Ok(()), Err) } } @@ -319,6 +344,24 @@ impl FusedIterator for IntoIter {} #[unstable(feature = "trusted_len", issue = "37572")] unsafe impl TrustedLen for IntoIter {} +#[stable(feature = "default_iters", since = "1.70.0")] +impl Default for IntoIter +where + A: Allocator + Default, +{ + /// Creates an empty `vec::IntoIter`. + /// + /// ``` + /// # use std::vec; + /// let iter: vec::IntoIter = Default::default(); + /// assert_eq!(iter.len(), 0); + /// assert_eq!(iter.as_slice(), &[]); + /// ``` + fn default() -> Self { + super::Vec::new_in(Default::default()).into_iter() + } +} + #[doc(hidden)] #[unstable(issue = "none", feature = "std_internals")] #[rustc_unsafe_specialization_marker] diff --git a/rust/alloc/vec/is_zero.rs b/rust/alloc/vec/is_zero.rs index 426bb2c9f6ff5f..d928dcf90e804c 100644 --- a/rust/alloc/vec/is_zero.rs +++ b/rust/alloc/vec/is_zero.rs @@ -1,11 +1,13 @@ // SPDX-License-Identifier: Apache-2.0 OR MIT + use core::num::{Saturating, Wrapping}; use crate::boxed::Box; #[rustc_specialization_trait] pub(super) unsafe trait IsZero { - /// Whether this value's representation is all zeros + /// Whether this value's representation is all zeros, + /// or can be represented with all zeroes. fn is_zero(&self) -> bool; } @@ -58,7 +60,7 @@ unsafe impl IsZero for [T; N] { #[inline] fn is_zero(&self) -> bool { // Because this is generated as a runtime check, it's not obvious that - // it's worth doing if the array is really long. The threshold here + // it's worth doing if the array is really long. The threshold here // is largely arbitrary, but was picked because as of 2022-07-01 LLVM // fails to const-fold the check in `vec![[1; 32]; n]` // See https://github.com/rust-lang/rust/pull/97581#issuecomment-1166628022 @@ -148,6 +150,23 @@ impl_is_zero_option_of_nonzero!( NonZeroIsize, ); +macro_rules! impl_is_zero_option_of_num { + ($($t:ty,)+) => {$( + unsafe impl IsZero for Option<$t> { + #[inline] + fn is_zero(&self) -> bool { + const { + let none: Self = unsafe { core::mem::MaybeUninit::zeroed().assume_init() }; + assert!(none.is_none()); + } + self.is_none() + } + } + )+}; +} + +impl_is_zero_option_of_num!(u8, u16, u32, u64, u128, i8, i16, i32, i64, i128, usize, isize,); + unsafe impl IsZero for Wrapping { #[inline] fn is_zero(&self) -> bool { diff --git a/rust/alloc/vec/mod.rs b/rust/alloc/vec/mod.rs index 37bed9d5a96691..38f5ee24e73002 100644 --- a/rust/alloc/vec/mod.rs +++ b/rust/alloc/vec/mod.rs @@ -58,13 +58,9 @@ #[cfg(not(no_global_oom_handling))] use core::cmp; use core::cmp::Ordering; -use core::convert::TryFrom; use core::fmt; use core::hash::{Hash, Hasher}; -use core::intrinsics::assume; use core::iter; -#[cfg(not(no_global_oom_handling))] -use core::iter::FromIterator; use core::marker::PhantomData; use core::mem::{self, ManuallyDrop, MaybeUninit, SizedTypeProperties}; use core::ops::{self, Index, IndexMut, Range, RangeBounds}; @@ -74,7 +70,7 @@ use core::slice::{self, SliceIndex}; use crate::alloc::{Allocator, Global}; use crate::borrow::{Cow, ToOwned}; use crate::boxed::Box; -use crate::collections::TryReserveError; +use crate::collections::{TryReserveError, TryReserveErrorKind}; use crate::raw_vec::RawVec; #[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")] @@ -167,7 +163,7 @@ mod spec_extend; /// vec[0] = 7; /// assert_eq!(vec[0], 7); /// -/// vec.extend([1, 2, 3].iter().copied()); +/// vec.extend([1, 2, 3]); /// /// for x in &vec { /// println!("{x}"); @@ -379,8 +375,8 @@ mod spec_extend; /// Currently, `Vec` does not guarantee the order in which elements are dropped. /// The order has changed in the past and may change again. /// -/// [`get`]: ../../std/vec/struct.Vec.html#method.get -/// [`get_mut`]: ../../std/vec/struct.Vec.html#method.get_mut +/// [`get`]: slice::get +/// [`get_mut`]: slice::get_mut /// [`String`]: crate::string::String /// [`&str`]: type@str /// [`shrink_to_fit`]: Vec::shrink_to_fit @@ -525,6 +521,9 @@ impl Vec { /// assert_eq!(vec.len(), 11); /// assert!(vec.capacity() >= 11); /// + /// let mut result = Vec::try_with_capacity(usize::MAX); + /// assert!(result.is_err()); + /// /// // A vector of a zero-sized type will always over-allocate, since no /// // allocation is necessary /// let vec_units = Vec::<()>::try_with_capacity(10).unwrap(); @@ -543,6 +542,8 @@ impl Vec { /// This is highly unsafe, due to the number of invariants that aren't /// checked: /// + /// * `ptr` must have been allocated using the global allocator, such as via + /// the [`alloc::alloc`] function. /// * `T` needs to have the same alignment as what `ptr` was allocated with. /// (`T` having a less strict alignment is not sufficient, the alignment really /// needs to be equal to satisfy the [`dealloc`] requirement that memory must be @@ -579,6 +580,7 @@ impl Vec { /// function. /// /// [`String`]: crate::string::String + /// [`alloc::alloc`]: crate::alloc::alloc /// [`dealloc`]: crate::alloc::GlobalAlloc::dealloc /// /// # Examples @@ -772,6 +774,9 @@ impl Vec { /// assert_eq!(vec.len(), 11); /// assert!(vec.capacity() >= 11); /// + /// let mut result = Vec::try_with_capacity_in(usize::MAX, System); + /// assert!(result.is_err()); + /// /// // A vector of a zero-sized type will always over-allocate, since no /// // allocation is necessary /// let vec_units = Vec::<(), System>::try_with_capacity_in(10, System).unwrap(); @@ -791,6 +796,7 @@ impl Vec { /// This is highly unsafe, due to the number of invariants that aren't /// checked: /// + /// * `ptr` must be [*currently allocated*] via the given allocator `alloc`. /// * `T` needs to have the same alignment as what `ptr` was allocated with. /// (`T` having a less strict alignment is not sufficient, the alignment really /// needs to be equal to satisfy the [`dealloc`] requirement that memory must be @@ -824,6 +830,7 @@ impl Vec { /// /// [`String`]: crate::string::String /// [`dealloc`]: crate::alloc::GlobalAlloc::dealloc + /// [*currently allocated*]: crate::alloc::Allocator#currently-allocated-memory /// [*fit*]: crate::alloc::Allocator#memory-fitting /// /// # Examples @@ -1206,7 +1213,8 @@ impl Vec { /// Converts the vector into [`Box<[T]>`][owned slice]. /// - /// Note that this will drop any excess capacity. + /// If the vector has excess capacity, its items will be moved into a + /// newly-allocated buffer with exactly the right capacity. /// /// [owned slice]: Box /// @@ -1242,7 +1250,8 @@ impl Vec { /// Tries to convert the vector into [`Box<[T]>`][owned slice]. /// - /// Note that this will drop any excess capacity. + /// If the vector has excess capacity, its items will be moved into a + /// newly-allocated buffer with exactly the right capacity. /// /// [owned slice]: Box /// @@ -1405,11 +1414,7 @@ impl Vec { pub fn as_ptr(&self) -> *const T { // We shadow the slice method of the same name to avoid going through // `deref`, which creates an intermediate reference. - let ptr = self.buf.ptr(); - unsafe { - assume(!ptr.is_null()); - } - ptr + self.buf.ptr() } /// Returns an unsafe mutable pointer to the vector's buffer, or a dangling @@ -1442,11 +1447,7 @@ impl Vec { pub fn as_mut_ptr(&mut self) -> *mut T { // We shadow the slice method of the same name to avoid going through // `deref_mut`, which creates an intermediate reference. - let ptr = self.buf.ptr(); - unsafe { - assume(!ptr.is_null()); - } - ptr + self.buf.ptr() } /// Returns a reference to the underlying allocator. @@ -2368,7 +2369,7 @@ impl Vec { { let len = self.len(); if new_len > len { - self.extend_with(new_len - len, ExtendFunc(f)); + self.extend_trusted(iter::repeat_with(f).take(new_len - len)); } else { self.truncate(new_len); } @@ -2691,7 +2692,7 @@ impl Vec { self.reserve(range.len()); // SAFETY: - // - `slice::range` guarantees that the given range is valid for indexing self + // - `slice::range` guarantees that the given range is valid for indexing self unsafe { self.spec_extend_from_within(range); } @@ -2759,16 +2760,6 @@ impl ExtendWith for ExtendElement { } } -struct ExtendFunc(F); -impl T> ExtendWith for ExtendFunc { - fn next(&mut self) -> T { - (self.0)() - } - fn last(mut self) -> T { - (self.0)() - } -} - impl Vec { #[cfg(not(no_global_oom_handling))] /// Extend the vector by `n` values, using the given generator. @@ -2886,7 +2877,7 @@ impl ExtendFromWithinSpec for Vec { let (this, spare, len) = unsafe { self.split_at_spare_mut_with_len() }; // SAFETY: - // - caller guaratees that src is a valid index + // - caller guarantees that src is a valid index let to_clone = unsafe { this.get_unchecked(src) }; iter::zip(to_clone, spare) @@ -2905,7 +2896,7 @@ impl ExtendFromWithinSpec for Vec { let (init, spare) = self.split_at_spare_mut(); // SAFETY: - // - caller guaratees that `src` is a valid index + // - caller guarantees that `src` is a valid index let source = unsafe { init.get_unchecked(src) }; // SAFETY: @@ -2948,35 +2939,6 @@ impl ops::DerefMut for Vec { } } -#[cfg(not(no_global_oom_handling))] -trait SpecCloneFrom { - fn clone_from(this: &mut Self, other: &Self); -} - -#[cfg(not(no_global_oom_handling))] -impl SpecCloneFrom for Vec { - default fn clone_from(this: &mut Self, other: &Self) { - // drop anything that will not be overwritten - this.truncate(other.len()); - - // self.len <= other.len due to the truncate above, so the - // slices here are always in-bounds. - let (init, tail) = other.split_at(this.len()); - - // reuse the contained values' allocations/resources. - this.clone_from_slice(init); - this.extend_from_slice(tail); - } -} - -#[cfg(not(no_global_oom_handling))] -impl SpecCloneFrom for Vec { - fn clone_from(this: &mut Self, other: &Self) { - this.clear(); - this.extend_from_slice(other); - } -} - #[cfg(not(no_global_oom_handling))] #[stable(feature = "rust1", since = "1.0.0")] impl Clone for Vec { @@ -2988,7 +2950,7 @@ impl Clone for Vec { // HACK(japaric): with cfg(test) the inherent `[T]::to_vec` method, which is // required for this method definition, is not available. Instead use the - // `slice::to_vec` function which is only available with cfg(test) + // `slice::to_vec` function which is only available with cfg(test) // NB see the slice::hack module in slice.rs for more information #[cfg(test)] fn clone(&self) -> Self { @@ -2997,7 +2959,7 @@ impl Clone for Vec { } fn clone_from(&mut self, other: &Self) { - SpecCloneFrom::clone_from(self, other) + crate::slice::SpecCloneIntoVec::clone_into(other.as_slice(), self); } } @@ -3078,7 +3040,7 @@ impl IntoIterator for Vec { /// assert_eq!(v_iter.next(), None); /// ``` #[inline] - fn into_iter(self) -> IntoIter { + fn into_iter(self) -> Self::IntoIter { unsafe { let mut me = ManuallyDrop::new(self); let alloc = ManuallyDrop::new(ptr::read(me.allocator())); @@ -3106,7 +3068,7 @@ impl<'a, T, A: Allocator> IntoIterator for &'a Vec { type Item = &'a T; type IntoIter = slice::Iter<'a, T>; - fn into_iter(self) -> slice::Iter<'a, T> { + fn into_iter(self) -> Self::IntoIter { self.iter() } } @@ -3116,7 +3078,7 @@ impl<'a, T, A: Allocator> IntoIterator for &'a mut Vec { type Item = &'a mut T; type IntoIter = slice::IterMut<'a, T>; - fn into_iter(self) -> slice::IterMut<'a, T> { + fn into_iter(self) -> Self::IntoIter { self.iter_mut() } } @@ -3196,6 +3158,69 @@ impl Vec { Ok(()) } + // specific extend for `TrustedLen` iterators, called both by the specializations + // and internal places where resolving specialization makes compilation slower + #[cfg(not(no_global_oom_handling))] + fn extend_trusted(&mut self, iterator: impl iter::TrustedLen) { + let (low, high) = iterator.size_hint(); + if let Some(additional) = high { + debug_assert_eq!( + low, + additional, + "TrustedLen iterator's size hint is not exact: {:?}", + (low, high) + ); + self.reserve(additional); + unsafe { + let ptr = self.as_mut_ptr(); + let mut local_len = SetLenOnDrop::new(&mut self.len); + iterator.for_each(move |element| { + ptr::write(ptr.add(local_len.current_len()), element); + // Since the loop executes user code which can panic we have to update + // the length every step to correctly drop what we've written. + // NB can't overflow since we would have had to alloc the address space + local_len.increment_len(1); + }); + } + } else { + // Per TrustedLen contract a `None` upper bound means that the iterator length + // truly exceeds usize::MAX, which would eventually lead to a capacity overflow anyway. + // Since the other branch already panics eagerly (via `reserve()`) we do the same here. + // This avoids additional codegen for a fallback code path which would eventually + // panic anyway. + panic!("capacity overflow"); + } + } + + // specific extend for `TrustedLen` iterators, called both by the specializations + // and internal places where resolving specialization makes compilation slower + fn try_extend_trusted(&mut self, iterator: impl iter::TrustedLen) -> Result<(), TryReserveError> { + let (low, high) = iterator.size_hint(); + if let Some(additional) = high { + debug_assert_eq!( + low, + additional, + "TrustedLen iterator's size hint is not exact: {:?}", + (low, high) + ); + self.try_reserve(additional)?; + unsafe { + let ptr = self.as_mut_ptr(); + let mut local_len = SetLenOnDrop::new(&mut self.len); + iterator.for_each(move |element| { + ptr::write(ptr.add(local_len.current_len()), element); + // Since the loop executes user code which can panic we have to update + // the length every step to correctly drop what we've written. + // NB can't overflow since we would have had to alloc the address space + local_len.increment_len(1); + }); + } + Ok(()) + } else { + Err(TryReserveErrorKind::CapacityOverflow.into()) + } + } + /// Creates a splicing iterator that replaces the specified range in the vector /// with the given `replace_with` iterator and yields the removed items. /// `replace_with` does not need to be the same length as `range`. @@ -3324,7 +3349,7 @@ impl<'a, T: Copy + 'a, A: Allocator + 'a> Extend<&'a T> for Vec { } } -/// Implements comparison of vectors, [lexicographically](core::cmp::Ord#lexicographical-comparison). +/// Implements comparison of vectors, [lexicographically](Ord#lexicographical-comparison). #[stable(feature = "rust1", since = "1.0.0")] impl PartialOrd for Vec { #[inline] @@ -3336,7 +3361,7 @@ impl PartialOrd for Vec { #[stable(feature = "rust1", since = "1.0.0")] impl Eq for Vec {} -/// Implements ordering of vectors, [lexicographically](core::cmp::Ord#lexicographical-comparison). +/// Implements ordering of vectors, [lexicographically](Ord#lexicographical-comparison). #[stable(feature = "rust1", since = "1.0.0")] impl Ord for Vec { #[inline] @@ -3456,10 +3481,7 @@ impl From<[T; N]> for Vec { /// ``` #[cfg(not(test))] fn from(s: [T; N]) -> Vec { - <[T]>::into_vec( - #[rustc_box] - Box::new(s), - ) + <[T]>::into_vec(Box::new(s)) } #[cfg(test)] @@ -3492,7 +3514,7 @@ where } } -// note: test pulls in libstd, which causes errors here +// note: test pulls in std, which causes errors here #[cfg(not(test))] #[stable(feature = "vec_from_box", since = "1.18.0")] impl From> for Vec { @@ -3510,7 +3532,7 @@ impl From> for Vec { } } -// note: test pulls in libstd, which causes errors here +// note: test pulls in std, which causes errors here #[cfg(not(no_global_oom_handling))] #[cfg(not(test))] #[stable(feature = "box_from_vec", since = "1.20.0")] @@ -3525,6 +3547,14 @@ impl From> for Box<[T], A> { /// ``` /// assert_eq!(Box::from(vec![1, 2, 3]), vec![1, 2, 3].into_boxed_slice()); /// ``` + /// + /// Any excess capacity is removed: + /// ``` + /// let mut vec = Vec::with_capacity(10); + /// vec.extend([1, 2, 3]); + /// + /// assert_eq!(Box::from(vec), vec![1, 2, 3].into_boxed_slice()); + /// ``` fn from(v: Vec) -> Self { v.into_boxed_slice() } diff --git a/rust/alloc/vec/set_len_on_drop.rs b/rust/alloc/vec/set_len_on_drop.rs index 448bf5076a0bf3..d3c7297b80ec0f 100644 --- a/rust/alloc/vec/set_len_on_drop.rs +++ b/rust/alloc/vec/set_len_on_drop.rs @@ -20,6 +20,11 @@ impl<'a> SetLenOnDrop<'a> { pub(super) fn increment_len(&mut self, increment: usize) { self.local_len += increment; } + + #[inline] + pub(super) fn current_len(&self) -> usize { + self.local_len + } } impl Drop for SetLenOnDrop<'_> { diff --git a/rust/alloc/vec/spec_extend.rs b/rust/alloc/vec/spec_extend.rs index 9621120fc1c949..a6a735201e59bb 100644 --- a/rust/alloc/vec/spec_extend.rs +++ b/rust/alloc/vec/spec_extend.rs @@ -1,12 +1,11 @@ // SPDX-License-Identifier: Apache-2.0 OR MIT use crate::alloc::Allocator; -use crate::collections::{TryReserveError, TryReserveErrorKind}; +use crate::collections::TryReserveError; use core::iter::TrustedLen; -use core::ptr::{self}; use core::slice::{self}; -use super::{IntoIter, SetLenOnDrop, Vec}; +use super::{IntoIter, Vec}; // Specialization trait used for Vec::extend #[cfg(not(no_global_oom_handling))] @@ -44,36 +43,7 @@ where I: TrustedLen, { default fn spec_extend(&mut self, iterator: I) { - // This is the case for a TrustedLen iterator. - let (low, high) = iterator.size_hint(); - if let Some(additional) = high { - debug_assert_eq!( - low, - additional, - "TrustedLen iterator's size hint is not exact: {:?}", - (low, high) - ); - self.reserve(additional); - unsafe { - let mut ptr = self.as_mut_ptr().add(self.len()); - let mut local_len = SetLenOnDrop::new(&mut self.len); - iterator.for_each(move |element| { - ptr::write(ptr, element); - ptr = ptr.add(1); - // Since the loop executes user code which can panic we have to bump the pointer - // after each step. - // NB can't overflow since we would have had to alloc the address space - local_len.increment_len(1); - }); - } - } else { - // Per TrustedLen contract a `None` upper bound means that the iterator length - // truly exceeds usize::MAX, which would eventually lead to a capacity overflow anyway. - // Since the other branch already panics eagerly (via `reserve()`) we do the same here. - // This avoids additional codegen for a fallback code path which would eventually - // panic anyway. - panic!("capacity overflow"); - } + self.extend_trusted(iterator) } } @@ -82,32 +52,7 @@ where I: TrustedLen, { default fn try_spec_extend(&mut self, iterator: I) -> Result<(), TryReserveError> { - // This is the case for a TrustedLen iterator. - let (low, high) = iterator.size_hint(); - if let Some(additional) = high { - debug_assert_eq!( - low, - additional, - "TrustedLen iterator's size hint is not exact: {:?}", - (low, high) - ); - self.try_reserve(additional)?; - unsafe { - let mut ptr = self.as_mut_ptr().add(self.len()); - let mut local_len = SetLenOnDrop::new(&mut self.len); - iterator.for_each(move |element| { - ptr::write(ptr, element); - ptr = ptr.add(1); - // Since the loop executes user code which can panic we have to bump the pointer - // after each step. - // NB can't overflow since we would have had to alloc the address space - local_len.increment_len(1); - }); - } - Ok(()) - } else { - Err(TryReserveErrorKind::CapacityOverflow.into()) - } + self.try_extend_trusted(iterator) } } diff --git a/rust/kernel/allocator.rs b/rust/kernel/allocator.rs index 397a3dd57a9b13..280676993074c1 100644 --- a/rust/kernel/allocator.rs +++ b/rust/kernel/allocator.rs @@ -31,16 +31,19 @@ static ALLOCATOR: KernelAllocator = KernelAllocator; // let's generate them ourselves instead. // // Note that `#[no_mangle]` implies exported too, nowadays. +#[allow(clippy::no_mangle_with_rust_abi)] #[no_mangle] fn __rust_alloc(size: usize, _align: usize) -> *mut u8 { unsafe { bindings::krealloc(core::ptr::null(), size, bindings::GFP_KERNEL) as *mut u8 } } +#[allow(clippy::no_mangle_with_rust_abi)] #[no_mangle] fn __rust_dealloc(ptr: *mut u8, _size: usize, _align: usize) { unsafe { bindings::kfree(ptr as *const core::ffi::c_void) }; } +#[allow(clippy::no_mangle_with_rust_abi)] #[no_mangle] fn __rust_realloc(ptr: *mut u8, _old_size: usize, _align: usize, new_size: usize) -> *mut u8 { unsafe { @@ -52,6 +55,7 @@ fn __rust_realloc(ptr: *mut u8, _old_size: usize, _align: usize, new_size: usize } } +#[allow(clippy::no_mangle_with_rust_abi)] #[no_mangle] fn __rust_alloc_zeroed(size: usize, _align: usize) -> *mut u8 { unsafe { diff --git a/scripts/Makefile.build b/scripts/Makefile.build index 94d67252df4e0c..b05809ad1f7269 100644 --- a/scripts/Makefile.build +++ b/scripts/Makefile.build @@ -309,7 +309,7 @@ quiet_cmd_rustc_rsi_rs = $(RUSTC_OR_CLIPPY_QUIET) $(quiet_modtag) $@ command -v $(RUSTFMT) >/dev/null && $(RUSTFMT) $@ $(obj)/%.rsi: $(src)/%.rs FORCE - $(call if_changed_dep,rustc_rsi_rs) + $(call if_changed,rustc_rsi_rs) quiet_cmd_rustc_s_rs = $(RUSTC_OR_CLIPPY_QUIET) $(quiet_modtag) $@ cmd_rustc_s_rs = $(rust_common_cmd) --emit=asm=$@ $< diff --git a/scripts/min-tool-version.sh b/scripts/min-tool-version.sh index a69f29184c8ad8..c55875107673c2 100755 --- a/scripts/min-tool-version.sh +++ b/scripts/min-tool-version.sh @@ -27,7 +27,7 @@ llvm) fi ;; rustc) - echo 1.66.0 + echo 1.70.0 ;; bindgen) echo 0.56.0