alloc/raw_vec/mod.rs
1#![unstable(feature = "raw_vec_internals", reason = "unstable const warnings", issue = "none")]
2#![cfg_attr(test, allow(dead_code))]
3
4// Note: This module is also included in the alloctests crate using #[path] to
5// run the tests. See the comment there for an explanation why this is the case.
6
7use core::marker::PhantomData;
8use core::mem::{ManuallyDrop, MaybeUninit, SizedTypeProperties};
9use core::ptr::{self, Alignment, NonNull, Unique};
10use core::{cmp, hint};
11
12#[cfg(not(no_global_oom_handling))]
13use crate::alloc::handle_alloc_error;
14use crate::alloc::{Allocator, Global, Layout};
15use crate::boxed::Box;
16use crate::collections::TryReserveError;
17use crate::collections::TryReserveErrorKind::*;
18
19#[cfg(test)]
20mod tests;
21
22// One central function responsible for reporting capacity overflows. This'll
23// ensure that the code generation related to these panics is minimal as there's
24// only one location which panics rather than a bunch throughout the module.
25#[cfg(not(no_global_oom_handling))]
26#[cfg_attr(not(panic = "immediate-abort"), inline(never))]
27fn capacity_overflow() -> ! {
28 panic!("capacity overflow");
29}
30
31enum AllocInit {
32 /// The contents of the new memory are uninitialized.
33 Uninitialized,
34 #[cfg(not(no_global_oom_handling))]
35 /// The new memory is guaranteed to be zeroed.
36 Zeroed,
37}
38
39type Cap = core::num::niche_types::UsizeNoHighBit;
40
41const ZERO_CAP: Cap = unsafe { Cap::new_unchecked(0) };
42
43/// `Cap(cap)`, except if `T` is a ZST then `Cap::ZERO`.
44///
45/// # Safety: cap must be <= `isize::MAX`.
46unsafe fn new_cap<T>(cap: usize) -> Cap {
47 if T::IS_ZST { ZERO_CAP } else { unsafe { Cap::new_unchecked(cap) } }
48}
49
50/// A low-level utility for more ergonomically allocating, reallocating, and deallocating
51/// a buffer of memory on the heap without having to worry about all the corner cases
52/// involved. This type is excellent for building your own data structures like Vec and VecDeque.
53/// In particular:
54///
55/// * Produces `Unique::dangling()` on zero-sized types.
56/// * Produces `Unique::dangling()` on zero-length allocations.
57/// * Avoids freeing `Unique::dangling()`.
58/// * Catches all overflows in capacity computations (promotes them to "capacity overflow" panics).
59/// * Guards against 32-bit systems allocating more than `isize::MAX` bytes.
60/// * Guards against overflowing your length.
61/// * Calls `handle_alloc_error` for fallible allocations.
62/// * Contains a `ptr::Unique` and thus endows the user with all related benefits.
63/// * Uses the excess returned from the allocator to use the largest available capacity.
64///
65/// This type does not in anyway inspect the memory that it manages. When dropped it *will*
66/// free its memory, but it *won't* try to drop its contents. It is up to the user of `RawVec`
67/// to handle the actual things *stored* inside of a `RawVec`.
68///
69/// Note that the excess of a zero-sized types is always infinite, so `capacity()` always returns
70/// `usize::MAX`. This means that you need to be careful when round-tripping this type with a
71/// `Box<[T]>`, since `capacity()` won't yield the length.
72#[allow(missing_debug_implementations)]
73pub(crate) struct RawVec<T, A: Allocator = Global> {
74 inner: RawVecInner<A>,
75 _marker: PhantomData<T>,
76}
77
78/// Like a `RawVec`, but only generic over the allocator, not the type.
79///
80/// As such, all the methods need the layout passed-in as a parameter.
81///
82/// Having this separation reduces the amount of code we need to monomorphize,
83/// as most operations don't need the actual type, just its layout.
84#[allow(missing_debug_implementations)]
85struct RawVecInner<A: Allocator = Global> {
86 ptr: Unique<u8>,
87 /// Never used for ZSTs; it's `capacity()`'s responsibility to return usize::MAX in that case.
88 ///
89 /// # Safety
90 ///
91 /// `cap` must be in the `0..=isize::MAX` range.
92 cap: Cap,
93 alloc: A,
94}
95
96impl<T> RawVec<T, Global> {
97 /// Creates the biggest possible `RawVec` (on the system heap)
98 /// without allocating. If `T` has positive size, then this makes a
99 /// `RawVec` with capacity `0`. If `T` is zero-sized, then it makes a
100 /// `RawVec` with capacity `usize::MAX`. Useful for implementing
101 /// delayed allocation.
102 #[must_use]
103 pub(crate) const fn new() -> Self {
104 Self::new_in(Global)
105 }
106
107 /// Creates a `RawVec` (on the system heap) with exactly the
108 /// capacity and alignment requirements for a `[T; capacity]`. This is
109 /// equivalent to calling `RawVec::new` when `capacity` is `0` or `T` is
110 /// zero-sized. Note that if `T` is zero-sized this means you will
111 /// *not* get a `RawVec` with the requested capacity.
112 ///
113 /// Non-fallible version of `try_with_capacity`
114 ///
115 /// # Panics
116 ///
117 /// Panics if the requested capacity exceeds `isize::MAX` bytes.
118 ///
119 /// # Aborts
120 ///
121 /// Aborts on OOM.
122 #[cfg(not(any(no_global_oom_handling, test)))]
123 #[must_use]
124 #[inline]
125 pub(crate) fn with_capacity(capacity: usize) -> Self {
126 Self { inner: RawVecInner::with_capacity(capacity, T::LAYOUT), _marker: PhantomData }
127 }
128
129 /// Like `with_capacity`, but guarantees the buffer is zeroed.
130 #[cfg(not(any(no_global_oom_handling, test)))]
131 #[must_use]
132 #[inline]
133 pub(crate) fn with_capacity_zeroed(capacity: usize) -> Self {
134 Self {
135 inner: RawVecInner::with_capacity_zeroed_in(capacity, Global, T::LAYOUT),
136 _marker: PhantomData,
137 }
138 }
139}
140
141impl RawVecInner<Global> {
142 #[cfg(not(any(no_global_oom_handling, test)))]
143 #[must_use]
144 #[inline]
145 fn with_capacity(capacity: usize, elem_layout: Layout) -> Self {
146 match Self::try_allocate_in(capacity, AllocInit::Uninitialized, Global, elem_layout) {
147 Ok(res) => res,
148 Err(err) => handle_error(err),
149 }
150 }
151}
152
153// Tiny Vecs are dumb. Skip to:
154// - 8 if the element size is 1, because any heap allocator is likely
155// to round up a request of less than 8 bytes to at least 8 bytes.
156// - 4 if elements are moderate-sized (<= 1 KiB).
157// - 1 otherwise, to avoid wasting too much space for very short Vecs.
158const fn min_non_zero_cap(size: usize) -> usize {
159 if size == 1 {
160 8
161 } else if size <= 1024 {
162 4
163 } else {
164 1
165 }
166}
167
168impl<T, A: Allocator> RawVec<T, A> {
169 #[cfg(not(no_global_oom_handling))]
170 pub(crate) const MIN_NON_ZERO_CAP: usize = min_non_zero_cap(size_of::<T>());
171
172 /// Like `new`, but parameterized over the choice of allocator for
173 /// the returned `RawVec`.
174 #[inline]
175 pub(crate) const fn new_in(alloc: A) -> Self {
176 // Check assumption made in `current_memory`
177 const { assert!(T::LAYOUT.size() % T::LAYOUT.align() == 0) };
178 Self { inner: RawVecInner::new_in(alloc, Alignment::of::<T>()), _marker: PhantomData }
179 }
180
181 /// Like `with_capacity`, but parameterized over the choice of
182 /// allocator for the returned `RawVec`.
183 #[cfg(not(no_global_oom_handling))]
184 #[inline]
185 pub(crate) fn with_capacity_in(capacity: usize, alloc: A) -> Self {
186 Self {
187 inner: RawVecInner::with_capacity_in(capacity, alloc, T::LAYOUT),
188 _marker: PhantomData,
189 }
190 }
191
192 /// Like `try_with_capacity`, but parameterized over the choice of
193 /// allocator for the returned `RawVec`.
194 #[inline]
195 pub(crate) fn try_with_capacity_in(capacity: usize, alloc: A) -> Result<Self, TryReserveError> {
196 match RawVecInner::try_with_capacity_in(capacity, alloc, T::LAYOUT) {
197 Ok(inner) => Ok(Self { inner, _marker: PhantomData }),
198 Err(e) => Err(e),
199 }
200 }
201
202 /// Like `with_capacity_zeroed`, but parameterized over the choice
203 /// of allocator for the returned `RawVec`.
204 #[cfg(not(no_global_oom_handling))]
205 #[inline]
206 pub(crate) fn with_capacity_zeroed_in(capacity: usize, alloc: A) -> Self {
207 Self {
208 inner: RawVecInner::with_capacity_zeroed_in(capacity, alloc, T::LAYOUT),
209 _marker: PhantomData,
210 }
211 }
212
213 /// Converts the entire buffer into `Box<[MaybeUninit<T>]>` with the specified `len`.
214 ///
215 /// Note that this will correctly reconstitute any `cap` changes
216 /// that may have been performed. (See description of type for details.)
217 ///
218 /// # Safety
219 ///
220 /// * `len` must be greater than or equal to the most recently requested capacity, and
221 /// * `len` must be less than or equal to `self.capacity()`.
222 ///
223 /// Note, that the requested capacity and `self.capacity()` could differ, as
224 /// an allocator could overallocate and return a greater memory block than requested.
225 pub(crate) unsafe fn into_box(self, len: usize) -> Box<[MaybeUninit<T>], A> {
226 // Sanity-check one half of the safety requirement (we cannot check the other half).
227 debug_assert!(
228 len <= self.capacity(),
229 "`len` must be smaller than or equal to `self.capacity()`"
230 );
231
232 let me = ManuallyDrop::new(self);
233 unsafe {
234 let slice = ptr::slice_from_raw_parts_mut(me.ptr() as *mut MaybeUninit<T>, len);
235 Box::from_raw_in(slice, ptr::read(&me.inner.alloc))
236 }
237 }
238
239 /// Reconstitutes a `RawVec` from a pointer, capacity, and allocator.
240 ///
241 /// # Safety
242 ///
243 /// The `ptr` must be allocated (via the given allocator `alloc`), and with the given
244 /// `capacity`.
245 /// The `capacity` cannot exceed `isize::MAX` for sized types. (only a concern on 32-bit
246 /// systems). For ZSTs capacity is ignored.
247 /// If the `ptr` and `capacity` come from a `RawVec` created via `alloc`, then this is
248 /// guaranteed.
249 #[inline]
250 pub(crate) unsafe fn from_raw_parts_in(ptr: *mut T, capacity: usize, alloc: A) -> Self {
251 // SAFETY: Precondition passed to the caller
252 unsafe {
253 let ptr = ptr.cast();
254 let capacity = new_cap::<T>(capacity);
255 Self {
256 inner: RawVecInner::from_raw_parts_in(ptr, capacity, alloc),
257 _marker: PhantomData,
258 }
259 }
260 }
261
262 /// A convenience method for hoisting the non-null precondition out of [`RawVec::from_raw_parts_in`].
263 ///
264 /// # Safety
265 ///
266 /// See [`RawVec::from_raw_parts_in`].
267 #[inline]
268 pub(crate) unsafe fn from_nonnull_in(ptr: NonNull<T>, capacity: usize, alloc: A) -> Self {
269 // SAFETY: Precondition passed to the caller
270 unsafe {
271 let ptr = ptr.cast();
272 let capacity = new_cap::<T>(capacity);
273 Self { inner: RawVecInner::from_nonnull_in(ptr, capacity, alloc), _marker: PhantomData }
274 }
275 }
276
277 /// Gets a raw pointer to the start of the allocation. Note that this is
278 /// `Unique::dangling()` if `capacity == 0` or `T` is zero-sized. In the former case, you must
279 /// be careful.
280 #[inline]
281 pub(crate) const fn ptr(&self) -> *mut T {
282 self.inner.ptr()
283 }
284
285 #[inline]
286 pub(crate) const fn non_null(&self) -> NonNull<T> {
287 self.inner.non_null()
288 }
289
290 /// Gets the capacity of the allocation.
291 ///
292 /// This will always be `usize::MAX` if `T` is zero-sized.
293 #[inline]
294 pub(crate) const fn capacity(&self) -> usize {
295 self.inner.capacity(size_of::<T>())
296 }
297
298 /// Returns a shared reference to the allocator backing this `RawVec`.
299 #[inline]
300 pub(crate) fn allocator(&self) -> &A {
301 self.inner.allocator()
302 }
303
304 /// Ensures that the buffer contains at least enough space to hold `len +
305 /// additional` elements. If it doesn't already have enough capacity, will
306 /// reallocate enough space plus comfortable slack space to get amortized
307 /// *O*(1) behavior. Will limit this behavior if it would needlessly cause
308 /// itself to panic.
309 ///
310 /// If `len` exceeds `self.capacity()`, this may fail to actually allocate
311 /// the requested space. This is not really unsafe, but the unsafe
312 /// code *you* write that relies on the behavior of this function may break.
313 ///
314 /// This is ideal for implementing a bulk-push operation like `extend`.
315 ///
316 /// # Panics
317 ///
318 /// Panics if the new capacity exceeds `isize::MAX` _bytes_.
319 ///
320 /// # Aborts
321 ///
322 /// Aborts on OOM.
323 #[cfg(not(no_global_oom_handling))]
324 #[inline]
325 pub(crate) fn reserve(&mut self, len: usize, additional: usize) {
326 // SAFETY: All calls on self.inner pass T::LAYOUT as the elem_layout
327 unsafe { self.inner.reserve(len, additional, T::LAYOUT) }
328 }
329
330 /// A specialized version of `self.reserve(len, 1)` which requires the
331 /// caller to ensure `len == self.capacity()`.
332 #[cfg(not(no_global_oom_handling))]
333 #[inline(never)]
334 pub(crate) fn grow_one(&mut self) {
335 // SAFETY: All calls on self.inner pass T::LAYOUT as the elem_layout
336 unsafe { self.inner.grow_one(T::LAYOUT) }
337 }
338
339 /// The same as `reserve`, but returns on errors instead of panicking or aborting.
340 pub(crate) fn try_reserve(
341 &mut self,
342 len: usize,
343 additional: usize,
344 ) -> Result<(), TryReserveError> {
345 // SAFETY: All calls on self.inner pass T::LAYOUT as the elem_layout
346 unsafe { self.inner.try_reserve(len, additional, T::LAYOUT) }
347 }
348
349 /// Ensures that the buffer contains at least enough space to hold `len +
350 /// additional` elements. If it doesn't already, will reallocate the
351 /// minimum possible amount of memory necessary. Generally this will be
352 /// exactly the amount of memory necessary, but in principle the allocator
353 /// is free to give back more than we asked for.
354 ///
355 /// If `len` exceeds `self.capacity()`, this may fail to actually allocate
356 /// the requested space. This is not really unsafe, but the unsafe code
357 /// *you* write that relies on the behavior of this function may break.
358 ///
359 /// # Panics
360 ///
361 /// Panics if the new capacity exceeds `isize::MAX` _bytes_.
362 ///
363 /// # Aborts
364 ///
365 /// Aborts on OOM.
366 #[cfg(not(no_global_oom_handling))]
367 pub(crate) fn reserve_exact(&mut self, len: usize, additional: usize) {
368 // SAFETY: All calls on self.inner pass T::LAYOUT as the elem_layout
369 unsafe { self.inner.reserve_exact(len, additional, T::LAYOUT) }
370 }
371
372 /// The same as `reserve_exact`, but returns on errors instead of panicking or aborting.
373 pub(crate) fn try_reserve_exact(
374 &mut self,
375 len: usize,
376 additional: usize,
377 ) -> Result<(), TryReserveError> {
378 // SAFETY: All calls on self.inner pass T::LAYOUT as the elem_layout
379 unsafe { self.inner.try_reserve_exact(len, additional, T::LAYOUT) }
380 }
381
382 /// Shrinks the buffer down to the specified capacity. If the given amount
383 /// is 0, actually completely deallocates.
384 ///
385 /// # Panics
386 ///
387 /// Panics if the given amount is *larger* than the current capacity.
388 ///
389 /// # Aborts
390 ///
391 /// Aborts on OOM.
392 #[cfg(not(no_global_oom_handling))]
393 #[inline]
394 pub(crate) fn shrink_to_fit(&mut self, cap: usize) {
395 // SAFETY: All calls on self.inner pass T::LAYOUT as the elem_layout
396 unsafe { self.inner.shrink_to_fit(cap, T::LAYOUT) }
397 }
398}
399
400unsafe impl<#[may_dangle] T, A: Allocator> Drop for RawVec<T, A> {
401 /// Frees the memory owned by the `RawVec` *without* trying to drop its contents.
402 fn drop(&mut self) {
403 // SAFETY: We are in a Drop impl, self.inner will not be used again.
404 unsafe { self.inner.deallocate(T::LAYOUT) }
405 }
406}
407
408impl<A: Allocator> RawVecInner<A> {
409 #[inline]
410 const fn new_in(alloc: A, align: Alignment) -> Self {
411 let ptr = Unique::from_non_null(NonNull::without_provenance(align.as_nonzero()));
412 // `cap: 0` means "unallocated". zero-sized types are ignored.
413 Self { ptr, cap: ZERO_CAP, alloc }
414 }
415
416 #[cfg(not(no_global_oom_handling))]
417 #[inline]
418 fn with_capacity_in(capacity: usize, alloc: A, elem_layout: Layout) -> Self {
419 match Self::try_allocate_in(capacity, AllocInit::Uninitialized, alloc, elem_layout) {
420 Ok(this) => {
421 unsafe {
422 // Make it more obvious that a subsequent Vec::reserve(capacity) will not allocate.
423 hint::assert_unchecked(!this.needs_to_grow(0, capacity, elem_layout));
424 }
425 this
426 }
427 Err(err) => handle_error(err),
428 }
429 }
430
431 #[inline]
432 fn try_with_capacity_in(
433 capacity: usize,
434 alloc: A,
435 elem_layout: Layout,
436 ) -> Result<Self, TryReserveError> {
437 Self::try_allocate_in(capacity, AllocInit::Uninitialized, alloc, elem_layout)
438 }
439
440 #[cfg(not(no_global_oom_handling))]
441 #[inline]
442 fn with_capacity_zeroed_in(capacity: usize, alloc: A, elem_layout: Layout) -> Self {
443 match Self::try_allocate_in(capacity, AllocInit::Zeroed, alloc, elem_layout) {
444 Ok(res) => res,
445 Err(err) => handle_error(err),
446 }
447 }
448
449 fn try_allocate_in(
450 capacity: usize,
451 init: AllocInit,
452 alloc: A,
453 elem_layout: Layout,
454 ) -> Result<Self, TryReserveError> {
455 // We avoid `unwrap_or_else` here because it bloats the amount of
456 // LLVM IR generated.
457 let layout = match layout_array(capacity, elem_layout) {
458 Ok(layout) => layout,
459 Err(_) => return Err(CapacityOverflow.into()),
460 };
461
462 // Don't allocate here because `Drop` will not deallocate when `capacity` is 0.
463 if layout.size() == 0 {
464 return Ok(Self::new_in(alloc, elem_layout.alignment()));
465 }
466
467 let result = match init {
468 AllocInit::Uninitialized => alloc.allocate(layout),
469 #[cfg(not(no_global_oom_handling))]
470 AllocInit::Zeroed => alloc.allocate_zeroed(layout),
471 };
472 let ptr = match result {
473 Ok(ptr) => ptr,
474 Err(_) => return Err(AllocError { layout, non_exhaustive: () }.into()),
475 };
476
477 // Allocators currently return a `NonNull<[u8]>` whose length
478 // matches the size requested. If that ever changes, the capacity
479 // here should change to `ptr.len() / size_of::<T>()`.
480 Ok(Self {
481 ptr: Unique::from(ptr.cast()),
482 cap: unsafe { Cap::new_unchecked(capacity) },
483 alloc,
484 })
485 }
486
487 #[inline]
488 unsafe fn from_raw_parts_in(ptr: *mut u8, cap: Cap, alloc: A) -> Self {
489 Self { ptr: unsafe { Unique::new_unchecked(ptr) }, cap, alloc }
490 }
491
492 #[inline]
493 unsafe fn from_nonnull_in(ptr: NonNull<u8>, cap: Cap, alloc: A) -> Self {
494 Self { ptr: Unique::from(ptr), cap, alloc }
495 }
496
497 #[inline]
498 const fn ptr<T>(&self) -> *mut T {
499 self.non_null::<T>().as_ptr()
500 }
501
502 #[inline]
503 const fn non_null<T>(&self) -> NonNull<T> {
504 self.ptr.cast().as_non_null_ptr()
505 }
506
507 #[inline]
508 const fn capacity(&self, elem_size: usize) -> usize {
509 if elem_size == 0 { usize::MAX } else { self.cap.as_inner() }
510 }
511
512 #[inline]
513 fn allocator(&self) -> &A {
514 &self.alloc
515 }
516
517 /// # Safety
518 /// - `elem_layout` must be valid for `self`, i.e. it must be the same `elem_layout` used to
519 /// initially construct `self`
520 /// - `elem_layout`'s size must be a multiple of its alignment
521 #[inline]
522 unsafe fn current_memory(&self, elem_layout: Layout) -> Option<(NonNull<u8>, Layout)> {
523 if elem_layout.size() == 0 || self.cap.as_inner() == 0 {
524 None
525 } else {
526 // We could use Layout::array here which ensures the absence of isize and usize overflows
527 // and could hypothetically handle differences between stride and size, but this memory
528 // has already been allocated so we know it can't overflow and currently Rust does not
529 // support such types. So we can do better by skipping some checks and avoid an unwrap.
530 unsafe {
531 let alloc_size = elem_layout.size().unchecked_mul(self.cap.as_inner());
532 let layout = Layout::from_size_align_unchecked(alloc_size, elem_layout.align());
533 Some((self.ptr.into(), layout))
534 }
535 }
536 }
537
538 /// # Safety
539 /// - `elem_layout` must be valid for `self`, i.e. it must be the same `elem_layout` used to
540 /// initially construct `self`
541 /// - `elem_layout`'s size must be a multiple of its alignment
542 #[cfg(not(no_global_oom_handling))]
543 #[inline]
544 unsafe fn reserve(&mut self, len: usize, additional: usize, elem_layout: Layout) {
545 // Callers expect this function to be very cheap when there is already sufficient capacity.
546 // Therefore, we move all the resizing and error-handling logic from grow_amortized and
547 // handle_reserve behind a call, while making sure that this function is likely to be
548 // inlined as just a comparison and a call if the comparison fails.
549 #[cold]
550 unsafe fn do_reserve_and_handle<A: Allocator>(
551 slf: &mut RawVecInner<A>,
552 len: usize,
553 additional: usize,
554 elem_layout: Layout,
555 ) {
556 // SAFETY: Precondition passed to caller
557 if let Err(err) = unsafe { slf.grow_amortized(len, additional, elem_layout) } {
558 handle_error(err);
559 }
560 }
561
562 if self.needs_to_grow(len, additional, elem_layout) {
563 unsafe {
564 do_reserve_and_handle(self, len, additional, elem_layout);
565 }
566 }
567 }
568
569 /// # Safety
570 /// - `elem_layout` must be valid for `self`, i.e. it must be the same `elem_layout` used to
571 /// initially construct `self`
572 /// - `elem_layout`'s size must be a multiple of its alignment
573 #[cfg(not(no_global_oom_handling))]
574 #[inline]
575 unsafe fn grow_one(&mut self, elem_layout: Layout) {
576 // SAFETY: Precondition passed to caller
577 if let Err(err) = unsafe { self.grow_amortized(self.cap.as_inner(), 1, elem_layout) } {
578 handle_error(err);
579 }
580 }
581
582 /// # Safety
583 /// - `elem_layout` must be valid for `self`, i.e. it must be the same `elem_layout` used to
584 /// initially construct `self`
585 /// - `elem_layout`'s size must be a multiple of its alignment
586 unsafe fn try_reserve(
587 &mut self,
588 len: usize,
589 additional: usize,
590 elem_layout: Layout,
591 ) -> Result<(), TryReserveError> {
592 if self.needs_to_grow(len, additional, elem_layout) {
593 // SAFETY: Precondition passed to caller
594 unsafe {
595 self.grow_amortized(len, additional, elem_layout)?;
596 }
597 }
598 unsafe {
599 // Inform the optimizer that the reservation has succeeded or wasn't needed
600 hint::assert_unchecked(!self.needs_to_grow(len, additional, elem_layout));
601 }
602 Ok(())
603 }
604
605 /// # Safety
606 /// - `elem_layout` must be valid for `self`, i.e. it must be the same `elem_layout` used to
607 /// initially construct `self`
608 /// - `elem_layout`'s size must be a multiple of its alignment
609 #[cfg(not(no_global_oom_handling))]
610 unsafe fn reserve_exact(&mut self, len: usize, additional: usize, elem_layout: Layout) {
611 // SAFETY: Precondition passed to caller
612 if let Err(err) = unsafe { self.try_reserve_exact(len, additional, elem_layout) } {
613 handle_error(err);
614 }
615 }
616
617 /// # Safety
618 /// - `elem_layout` must be valid for `self`, i.e. it must be the same `elem_layout` used to
619 /// initially construct `self`
620 /// - `elem_layout`'s size must be a multiple of its alignment
621 unsafe fn try_reserve_exact(
622 &mut self,
623 len: usize,
624 additional: usize,
625 elem_layout: Layout,
626 ) -> Result<(), TryReserveError> {
627 if self.needs_to_grow(len, additional, elem_layout) {
628 // SAFETY: Precondition passed to caller
629 unsafe {
630 self.grow_exact(len, additional, elem_layout)?;
631 }
632 }
633 unsafe {
634 // Inform the optimizer that the reservation has succeeded or wasn't needed
635 hint::assert_unchecked(!self.needs_to_grow(len, additional, elem_layout));
636 }
637 Ok(())
638 }
639
640 /// # Safety
641 /// - `elem_layout` must be valid for `self`, i.e. it must be the same `elem_layout` used to
642 /// initially construct `self`
643 /// - `elem_layout`'s size must be a multiple of its alignment
644 /// - `cap` must be less than or equal to `self.capacity(elem_layout.size())`
645 #[cfg(not(no_global_oom_handling))]
646 #[inline]
647 unsafe fn shrink_to_fit(&mut self, cap: usize, elem_layout: Layout) {
648 if let Err(err) = unsafe { self.shrink(cap, elem_layout) } {
649 handle_error(err);
650 }
651 }
652
653 #[inline]
654 fn needs_to_grow(&self, len: usize, additional: usize, elem_layout: Layout) -> bool {
655 additional > self.capacity(elem_layout.size()).wrapping_sub(len)
656 }
657
658 #[inline]
659 unsafe fn set_ptr_and_cap(&mut self, ptr: NonNull<[u8]>, cap: usize) {
660 // Allocators currently return a `NonNull<[u8]>` whose length matches
661 // the size requested. If that ever changes, the capacity here should
662 // change to `ptr.len() / size_of::<T>()`.
663 self.ptr = Unique::from(ptr.cast());
664 self.cap = unsafe { Cap::new_unchecked(cap) };
665 }
666
667 /// # Safety
668 /// - `elem_layout` must be valid for `self`, i.e. it must be the same `elem_layout` used to
669 /// initially construct `self`
670 /// - `elem_layout`'s size must be a multiple of its alignment
671 /// - The sum of `len` and `additional` must be greater than or equal to
672 /// `self.capacity(elem_layout.size())`
673 unsafe fn grow_amortized(
674 &mut self,
675 len: usize,
676 additional: usize,
677 elem_layout: Layout,
678 ) -> Result<(), TryReserveError> {
679 // This is ensured by the calling contexts.
680 debug_assert!(additional > 0);
681
682 if elem_layout.size() == 0 {
683 // Since we return a capacity of `usize::MAX` when `elem_size` is
684 // 0, getting to here necessarily means the `RawVec` is overfull.
685 return Err(CapacityOverflow.into());
686 }
687
688 // Nothing we can really do about these checks, sadly.
689 let required_cap = len.checked_add(additional).ok_or(CapacityOverflow)?;
690
691 // This guarantees exponential growth. The doubling cannot overflow
692 // because `cap <= isize::MAX` and the type of `cap` is `usize`.
693 let cap = cmp::max(self.cap.as_inner() * 2, required_cap);
694 let cap = cmp::max(min_non_zero_cap(elem_layout.size()), cap);
695
696 let new_layout = layout_array(cap, elem_layout)?;
697
698 // SAFETY:
699 // - For the `current_memory` call: Precondition passed to caller
700 // - For the `finish_grow` call: Precondition passed to caller
701 // + `current_memory` does the right thing
702 let ptr =
703 unsafe { finish_grow(new_layout, self.current_memory(elem_layout), &mut self.alloc)? };
704
705 // SAFETY: layout_array would have resulted in a capacity overflow if we tried to allocate more than `isize::MAX` items
706 unsafe { self.set_ptr_and_cap(ptr, cap) };
707 Ok(())
708 }
709
710 /// # Safety
711 /// - `elem_layout` must be valid for `self`, i.e. it must be the same `elem_layout` used to
712 /// initially construct `self`
713 /// - `elem_layout`'s size must be a multiple of its alignment
714 /// - The sum of `len` and `additional` must be greater than or equal to
715 /// `self.capacity(elem_layout.size())`
716 unsafe fn grow_exact(
717 &mut self,
718 len: usize,
719 additional: usize,
720 elem_layout: Layout,
721 ) -> Result<(), TryReserveError> {
722 if elem_layout.size() == 0 {
723 // Since we return a capacity of `usize::MAX` when the type size is
724 // 0, getting to here necessarily means the `RawVec` is overfull.
725 return Err(CapacityOverflow.into());
726 }
727
728 let cap = len.checked_add(additional).ok_or(CapacityOverflow)?;
729 let new_layout = layout_array(cap, elem_layout)?;
730
731 // SAFETY:
732 // - For the `current_memory` call: Precondition passed to caller
733 // - For the `finish_grow` call: Precondition passed to caller
734 // + `current_memory` does the right thing
735 let ptr =
736 unsafe { finish_grow(new_layout, self.current_memory(elem_layout), &mut self.alloc)? };
737 // SAFETY: layout_array would have resulted in a capacity overflow if we tried to allocate more than `isize::MAX` items
738 unsafe {
739 self.set_ptr_and_cap(ptr, cap);
740 }
741 Ok(())
742 }
743
744 /// # Safety
745 /// - `elem_layout` must be valid for `self`, i.e. it must be the same `elem_layout` used to
746 /// initially construct `self`
747 /// - `elem_layout`'s size must be a multiple of its alignment
748 /// - `cap` must be less than or equal to `self.capacity(elem_layout.size())`
749 #[cfg(not(no_global_oom_handling))]
750 #[inline]
751 unsafe fn shrink(&mut self, cap: usize, elem_layout: Layout) -> Result<(), TryReserveError> {
752 assert!(cap <= self.capacity(elem_layout.size()), "Tried to shrink to a larger capacity");
753 // SAFETY: Just checked this isn't trying to grow
754 unsafe { self.shrink_unchecked(cap, elem_layout) }
755 }
756
757 /// `shrink`, but without the capacity check.
758 ///
759 /// This is split out so that `shrink` can inline the check, since it
760 /// optimizes out in things like `shrink_to_fit`, without needing to
761 /// also inline all this code, as doing that ends up failing the
762 /// `vec-shrink-panic` codegen test when `shrink_to_fit` ends up being too
763 /// big for LLVM to be willing to inline.
764 ///
765 /// # Safety
766 /// `cap <= self.capacity()`
767 #[cfg(not(no_global_oom_handling))]
768 unsafe fn shrink_unchecked(
769 &mut self,
770 cap: usize,
771 elem_layout: Layout,
772 ) -> Result<(), TryReserveError> {
773 // SAFETY: Precondition passed to caller
774 let (ptr, layout) = if let Some(mem) = unsafe { self.current_memory(elem_layout) } {
775 mem
776 } else {
777 return Ok(());
778 };
779
780 // If shrinking to 0, deallocate the buffer. We don't reach this point
781 // for the T::IS_ZST case since current_memory() will have returned
782 // None.
783 if cap == 0 {
784 unsafe { self.alloc.deallocate(ptr, layout) };
785 self.ptr =
786 unsafe { Unique::new_unchecked(ptr::without_provenance_mut(elem_layout.align())) };
787 self.cap = ZERO_CAP;
788 } else {
789 let ptr = unsafe {
790 // Layout cannot overflow here because it would have
791 // overflowed earlier when capacity was larger.
792 let new_size = elem_layout.size().unchecked_mul(cap);
793 let new_layout = Layout::from_size_align_unchecked(new_size, layout.align());
794 self.alloc
795 .shrink(ptr, layout, new_layout)
796 .map_err(|_| AllocError { layout: new_layout, non_exhaustive: () })?
797 };
798 // SAFETY: if the allocation is valid, then the capacity is too
799 unsafe {
800 self.set_ptr_and_cap(ptr, cap);
801 }
802 }
803 Ok(())
804 }
805
806 /// # Safety
807 ///
808 /// This function deallocates the owned allocation, but does not update `ptr` or `cap` to
809 /// prevent double-free or use-after-free. Essentially, do not do anything with the caller
810 /// after this function returns.
811 /// Ideally this function would take `self` by move, but it cannot because it exists to be
812 /// called from a `Drop` impl.
813 unsafe fn deallocate(&mut self, elem_layout: Layout) {
814 // SAFETY: Precondition passed to caller
815 if let Some((ptr, layout)) = unsafe { self.current_memory(elem_layout) } {
816 unsafe {
817 self.alloc.deallocate(ptr, layout);
818 }
819 }
820 }
821}
822
823/// # Safety
824/// If `current_memory` matches `Some((ptr, old_layout))`:
825/// - `ptr` must denote a block of memory *currently allocated* via `alloc`
826/// - `old_layout` must *fit* that block of memory
827/// - `new_layout` must have the same alignment as `old_layout`
828/// - `new_layout.size()` must be greater than or equal to `old_layout.size()`
829/// If `current_memory` is `None`, this function is safe.
830// not marked inline(never) since we want optimizers to be able to observe the specifics of this
831// function, see tests/codegen-llvm/vec-reserve-extend.rs.
832#[cold]
833unsafe fn finish_grow<A>(
834 new_layout: Layout,
835 current_memory: Option<(NonNull<u8>, Layout)>,
836 alloc: &mut A,
837) -> Result<NonNull<[u8]>, TryReserveError>
838where
839 A: Allocator,
840{
841 let memory = if let Some((ptr, old_layout)) = current_memory {
842 debug_assert_eq!(old_layout.align(), new_layout.align());
843 unsafe {
844 // The allocator checks for alignment equality
845 hint::assert_unchecked(old_layout.align() == new_layout.align());
846 alloc.grow(ptr, old_layout, new_layout)
847 }
848 } else {
849 alloc.allocate(new_layout)
850 };
851
852 memory.map_err(|_| AllocError { layout: new_layout, non_exhaustive: () }.into())
853}
854
855// Central function for reserve error handling.
856#[cfg(not(no_global_oom_handling))]
857#[cold]
858#[optimize(size)]
859fn handle_error(e: TryReserveError) -> ! {
860 match e.kind() {
861 CapacityOverflow => capacity_overflow(),
862 AllocError { layout, .. } => handle_alloc_error(layout),
863 }
864}
865
866#[inline]
867fn layout_array(cap: usize, elem_layout: Layout) -> Result<Layout, TryReserveError> {
868 elem_layout.repeat(cap).map(|(layout, _pad)| layout).map_err(|_| CapacityOverflow.into())
869}