hashbrown/raw/
mod.rs

1use crate::alloc::alloc::{handle_alloc_error, Layout};
2use crate::control::{BitMaskIter, Group, Tag, TagSliceExt};
3use crate::scopeguard::{guard, ScopeGuard};
4use crate::util::{invalid_mut, likely, unlikely};
5use crate::TryReserveError;
6use core::array;
7use core::iter::FusedIterator;
8use core::marker::PhantomData;
9use core::mem;
10use core::ptr::NonNull;
11use core::slice;
12use core::{hint, ptr};
13
14mod alloc;
15#[cfg(test)]
16pub(crate) use self::alloc::AllocError;
17pub(crate) use self::alloc::{do_alloc, Allocator, Global};
18
19#[inline]
20unsafe fn offset_from<T>(to: *const T, from: *const T) -> usize {
21    to.offset_from(from) as usize
22}
23
24/// Whether memory allocation errors should return an error or abort.
25#[derive(Copy, Clone)]
26enum Fallibility {
27    Fallible,
28    Infallible,
29}
30
31impl Fallibility {
32    /// Error to return on capacity overflow.
33    #[cfg_attr(feature = "inline-more", inline)]
34    fn capacity_overflow(self) -> TryReserveError {
35        match self {
36            Fallibility::Fallible => TryReserveError::CapacityOverflow,
37            Fallibility::Infallible => panic!("Hash table capacity overflow"),
38        }
39    }
40
41    /// Error to return on allocation error.
42    #[cfg_attr(feature = "inline-more", inline)]
43    fn alloc_err(self, layout: Layout) -> TryReserveError {
44        match self {
45            Fallibility::Fallible => TryReserveError::AllocError { layout },
46            Fallibility::Infallible => handle_alloc_error(layout),
47        }
48    }
49}
50
51trait SizedTypeProperties: Sized {
52    const IS_ZERO_SIZED: bool = mem::size_of::<Self>() == 0;
53    const NEEDS_DROP: bool = mem::needs_drop::<Self>();
54}
55
56impl<T> SizedTypeProperties for T {}
57
58/// Primary hash function, used to select the initial bucket to probe from.
59#[inline]
60#[allow(clippy::cast_possible_truncation)]
61fn h1(hash: u64) -> usize {
62    // On 32-bit platforms we simply ignore the higher hash bits.
63    hash as usize
64}
65
66/// Probe sequence based on triangular numbers, which is guaranteed (since our
67/// table size is a power of two) to visit every group of elements exactly once.
68///
69/// A triangular probe has us jump by 1 more group every time. So first we
70/// jump by 1 group (meaning we just continue our linear scan), then 2 groups
71/// (skipping over 1 group), then 3 groups (skipping over 2 groups), and so on.
72///
73/// Proof that the probe will visit every group in the table:
74/// <https://fgiesen.wordpress.com/2015/02/22/triangular-numbers-mod-2n/>
75#[derive(Clone)]
76struct ProbeSeq {
77    pos: usize,
78    stride: usize,
79}
80
81impl ProbeSeq {
82    #[inline]
83    fn move_next(&mut self, bucket_mask: usize) {
84        // We should have found an empty bucket by now and ended the probe.
85        debug_assert!(
86            self.stride <= bucket_mask,
87            "Went past end of probe sequence"
88        );
89
90        self.stride += Group::WIDTH;
91        self.pos += self.stride;
92        self.pos &= bucket_mask;
93    }
94}
95
96/// Returns the number of buckets needed to hold the given number of items,
97/// taking the maximum load factor into account.
98///
99/// Returns `None` if an overflow occurs.
100// Workaround for emscripten bug emscripten-core/emscripten-fastcomp#258
101#[cfg_attr(target_os = "emscripten", inline(never))]
102#[cfg_attr(not(target_os = "emscripten"), inline)]
103fn capacity_to_buckets(cap: usize, table_layout: TableLayout) -> Option<usize> {
104    debug_assert_ne!(cap, 0);
105
106    // For small tables we require at least 1 empty bucket so that lookups are
107    // guaranteed to terminate if an element doesn't exist in the table.
108    if cap < 15 {
109        // Consider a small TableLayout like { size: 1, ctrl_align: 16 } on a
110        // platform with Group::WIDTH of 16 (like x86_64 with SSE2). For small
111        // bucket sizes, this ends up wasting quite a few bytes just to pad to
112        // the relatively larger ctrl_align:
113        //
114        // | capacity | buckets | bytes allocated | bytes per item |
115        // | -------- | ------- | --------------- | -------------- |
116        // |        3 |       4 |              36 | (Yikes!)  12.0 |
117        // |        7 |       8 |              40 | (Poor)     5.7 |
118        // |       14 |      16 |              48 |            3.4 |
119        // |       28 |      32 |              80 |            3.3 |
120        //
121        // In general, buckets * table_layout.size >= table_layout.ctrl_align
122        // must be true to avoid these edges. This is implemented by adjusting
123        // the minimum capacity upwards for small items. This code only needs
124        // to handle ctrl_align which are less than or equal to Group::WIDTH,
125        // because valid layout sizes are always a multiple of the alignment,
126        // so anything with alignment over the Group::WIDTH won't hit this edge
127        // case.
128
129        // This is brittle, e.g. if we ever add 32 byte groups, it will select
130        // 3 regardless of the table_layout.size.
131        let min_cap = match (Group::WIDTH, table_layout.size) {
132            (16, 0..=1) => 14,
133            (16, 2..=3) => 7,
134            (8, 0..=1) => 7,
135            _ => 3,
136        };
137        let cap = min_cap.max(cap);
138        // We don't bother with a table size of 2 buckets since that can only
139        // hold a single element. Instead, we skip directly to a 4 bucket table
140        // which can hold 3 elements.
141        return Some(if cap < 4 {
142            4
143        } else if cap < 8 {
144            8
145        } else {
146            16
147        });
148    }
149
150    // Otherwise require 1/8 buckets to be empty (87.5% load)
151    //
152    // Be careful when modifying this, calculate_layout relies on the
153    // overflow check here.
154    let adjusted_cap = cap.checked_mul(8)? / 7;
155
156    // Any overflows will have been caught by the checked_mul. Also, any
157    // rounding errors from the division above will be cleaned up by
158    // next_power_of_two (which can't overflow because of the previous division).
159    Some(adjusted_cap.next_power_of_two())
160}
161
162/// Returns the maximum effective capacity for the given bucket mask, taking
163/// the maximum load factor into account.
164#[inline]
165fn bucket_mask_to_capacity(bucket_mask: usize) -> usize {
166    if bucket_mask < 8 {
167        // For tables with 1/2/4/8 buckets, we always reserve one empty slot.
168        // Keep in mind that the bucket mask is one less than the bucket count.
169        bucket_mask
170    } else {
171        // For larger tables we reserve 12.5% of the slots as empty.
172        ((bucket_mask + 1) / 8) * 7
173    }
174}
175
176/// Helper which allows the max calculation for `ctrl_align` to be statically computed for each `T`
177/// while keeping the rest of `calculate_layout_for` independent of `T`
178#[derive(Copy, Clone)]
179struct TableLayout {
180    size: usize,
181    ctrl_align: usize,
182}
183
184impl TableLayout {
185    #[inline]
186    const fn new<T>() -> Self {
187        let layout = Layout::new::<T>();
188        Self {
189            size: layout.size(),
190            ctrl_align: if layout.align() > Group::WIDTH {
191                layout.align()
192            } else {
193                Group::WIDTH
194            },
195        }
196    }
197
198    #[inline]
199    fn calculate_layout_for(self, buckets: usize) -> Option<(Layout, usize)> {
200        debug_assert!(buckets.is_power_of_two());
201
202        let TableLayout { size, ctrl_align } = self;
203        // Manual layout calculation since Layout methods are not yet stable.
204        let ctrl_offset =
205            size.checked_mul(buckets)?.checked_add(ctrl_align - 1)? & !(ctrl_align - 1);
206        let len = ctrl_offset.checked_add(buckets + Group::WIDTH)?;
207
208        // We need an additional check to ensure that the allocation doesn't
209        // exceed `isize::MAX` (https://github.com/rust-lang/rust/pull/95295).
210        if len > isize::MAX as usize - (ctrl_align - 1) {
211            return None;
212        }
213
214        Some((
215            unsafe { Layout::from_size_align_unchecked(len, ctrl_align) },
216            ctrl_offset,
217        ))
218    }
219}
220
221/// A reference to an empty bucket into which an can be inserted.
222pub struct InsertSlot {
223    index: usize,
224}
225
226/// A reference to a hash table bucket containing a `T`.
227///
228/// This is usually just a pointer to the element itself. However if the element
229/// is a ZST, then we instead track the index of the element in the table so
230/// that `erase` works properly.
231pub struct Bucket<T> {
232    // Actually it is pointer to next element than element itself
233    // this is needed to maintain pointer arithmetic invariants
234    // keeping direct pointer to element introduces difficulty.
235    // Using `NonNull` for variance and niche layout
236    ptr: NonNull<T>,
237}
238
239// This Send impl is needed for rayon support. This is safe since Bucket is
240// never exposed in a public API.
241unsafe impl<T> Send for Bucket<T> {}
242
243impl<T> Clone for Bucket<T> {
244    #[inline]
245    fn clone(&self) -> Self {
246        Self { ptr: self.ptr }
247    }
248}
249
250impl<T> Bucket<T> {
251    /// Creates a [`Bucket`] that contain pointer to the data.
252    /// The pointer calculation is performed by calculating the
253    /// offset from given `base` pointer (convenience for
254    /// `base.as_ptr().sub(index)`).
255    ///
256    /// `index` is in units of `T`; e.g., an `index` of 3 represents a pointer
257    /// offset of `3 * size_of::<T>()` bytes.
258    ///
259    /// If the `T` is a ZST, then we instead track the index of the element
260    /// in the table so that `erase` works properly (return
261    /// `NonNull::new_unchecked((index + 1) as *mut T)`)
262    ///
263    /// # Safety
264    ///
265    /// If `mem::size_of::<T>() != 0`, then the safety rules are directly derived
266    /// from the safety rules for [`<*mut T>::sub`] method of `*mut T` and the safety
267    /// rules of [`NonNull::new_unchecked`] function.
268    ///
269    /// Thus, in order to uphold the safety contracts for the [`<*mut T>::sub`] method
270    /// and [`NonNull::new_unchecked`] function, as well as for the correct
271    /// logic of the work of this crate, the following rules are necessary and
272    /// sufficient:
273    ///
274    /// * the `base` pointer must not be `dangling` and must points to the
275    ///   end of the first `value element` from the `data part` of the table, i.e.
276    ///   must be the pointer that returned by [`RawTable::data_end`] or by
277    ///   [`RawTableInner::data_end<T>`];
278    ///
279    /// * `index` must not be greater than `RawTableInner.bucket_mask`, i.e.
280    ///   `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)`
281    ///   must be no greater than the number returned by the function
282    ///   [`RawTable::buckets`] or [`RawTableInner::buckets`].
283    ///
284    /// If `mem::size_of::<T>() == 0`, then the only requirement is that the
285    /// `index` must not be greater than `RawTableInner.bucket_mask`, i.e.
286    /// `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)`
287    /// must be no greater than the number returned by the function
288    /// [`RawTable::buckets`] or [`RawTableInner::buckets`].
289    ///
290    /// [`Bucket`]: crate::raw::Bucket
291    /// [`<*mut T>::sub`]: https://doc.rust-lang.org/core/primitive.pointer.html#method.sub-1
292    /// [`NonNull::new_unchecked`]: https://doc.rust-lang.org/stable/std/ptr/struct.NonNull.html#method.new_unchecked
293    /// [`RawTable::data_end`]: crate::raw::RawTable::data_end
294    /// [`RawTableInner::data_end<T>`]: RawTableInner::data_end<T>
295    /// [`RawTable::buckets`]: crate::raw::RawTable::buckets
296    /// [`RawTableInner::buckets`]: RawTableInner::buckets
297    #[inline]
298    unsafe fn from_base_index(base: NonNull<T>, index: usize) -> Self {
299        // If mem::size_of::<T>() != 0 then return a pointer to an `element` in
300        // the data part of the table (we start counting from "0", so that
301        // in the expression T[last], the "last" index actually one less than the
302        // "buckets" number in the table, i.e. "last = RawTableInner.bucket_mask"):
303        //
304        //                   `from_base_index(base, 1).as_ptr()` returns a pointer that
305        //                   points here in the data part of the table
306        //                   (to the start of T1)
307        //                        |
308        //                        |        `base: NonNull<T>` must point here
309        //                        |         (to the end of T0 or to the start of C0)
310        //                        v         v
311        // [Padding], Tlast, ..., |T1|, T0, |C0, C1, ..., Clast
312        //                           ^
313        //                           `from_base_index(base, 1)` returns a pointer
314        //                           that points here in the data part of the table
315        //                           (to the end of T1)
316        //
317        // where: T0...Tlast - our stored data; C0...Clast - control bytes
318        // or metadata for data.
319        let ptr = if T::IS_ZERO_SIZED {
320            // won't overflow because index must be less than length (bucket_mask)
321            // and bucket_mask is guaranteed to be less than `isize::MAX`
322            // (see TableLayout::calculate_layout_for method)
323            invalid_mut(index + 1)
324        } else {
325            base.as_ptr().sub(index)
326        };
327        Self {
328            ptr: NonNull::new_unchecked(ptr),
329        }
330    }
331
332    /// Calculates the index of a [`Bucket`] as distance between two pointers
333    /// (convenience for `base.as_ptr().offset_from(self.ptr.as_ptr()) as usize`).
334    /// The returned value is in units of T: the distance in bytes divided by
335    /// [`core::mem::size_of::<T>()`].
336    ///
337    /// If the `T` is a ZST, then we return the index of the element in
338    /// the table so that `erase` works properly (return `self.ptr.as_ptr() as usize - 1`).
339    ///
340    /// This function is the inverse of [`from_base_index`].
341    ///
342    /// # Safety
343    ///
344    /// If `mem::size_of::<T>() != 0`, then the safety rules are directly derived
345    /// from the safety rules for [`<*const T>::offset_from`] method of `*const T`.
346    ///
347    /// Thus, in order to uphold the safety contracts for [`<*const T>::offset_from`]
348    /// method, as well as for the correct logic of the work of this crate, the
349    /// following rules are necessary and sufficient:
350    ///
351    /// * `base` contained pointer must not be `dangling` and must point to the
352    ///   end of the first `element` from the `data part` of the table, i.e.
353    ///   must be a pointer that returns by [`RawTable::data_end`] or by
354    ///   [`RawTableInner::data_end<T>`];
355    ///
356    /// * `self` also must not contain dangling pointer;
357    ///
358    /// * both `self` and `base` must be created from the same [`RawTable`]
359    ///   (or [`RawTableInner`]).
360    ///
361    /// If `mem::size_of::<T>() == 0`, this function is always safe.
362    ///
363    /// [`Bucket`]: crate::raw::Bucket
364    /// [`from_base_index`]: crate::raw::Bucket::from_base_index
365    /// [`RawTable::data_end`]: crate::raw::RawTable::data_end
366    /// [`RawTableInner::data_end<T>`]: RawTableInner::data_end<T>
367    /// [`RawTable`]: crate::raw::RawTable
368    /// [`RawTableInner`]: RawTableInner
369    /// [`<*const T>::offset_from`]: https://doc.rust-lang.org/nightly/core/primitive.pointer.html#method.offset_from
370    #[inline]
371    unsafe fn to_base_index(&self, base: NonNull<T>) -> usize {
372        // If mem::size_of::<T>() != 0 then return an index under which we used to store the
373        // `element` in the data part of the table (we start counting from "0", so
374        // that in the expression T[last], the "last" index actually is one less than the
375        // "buckets" number in the table, i.e. "last = RawTableInner.bucket_mask").
376        // For example for 5th element in table calculation is performed like this:
377        //
378        //                        mem::size_of::<T>()
379        //                          |
380        //                          |         `self = from_base_index(base, 5)` that returns pointer
381        //                          |         that points here in the data part of the table
382        //                          |         (to the end of T5)
383        //                          |           |                    `base: NonNull<T>` must point here
384        //                          v           |                    (to the end of T0 or to the start of C0)
385        //                        /???\         v                      v
386        // [Padding], Tlast, ..., |T10|, ..., T5|, T4, T3, T2, T1, T0, |C0, C1, C2, C3, C4, C5, ..., C10, ..., Clast
387        //                                      \__________  __________/
388        //                                                 \/
389        //                                     `bucket.to_base_index(base)` = 5
390        //                                     (base.as_ptr() as usize - self.ptr.as_ptr() as usize) / mem::size_of::<T>()
391        //
392        // where: T0...Tlast - our stored data; C0...Clast - control bytes or metadata for data.
393        if T::IS_ZERO_SIZED {
394            // this can not be UB
395            self.ptr.as_ptr() as usize - 1
396        } else {
397            offset_from(base.as_ptr(), self.ptr.as_ptr())
398        }
399    }
400
401    /// Acquires the underlying raw pointer `*mut T` to `data`.
402    ///
403    /// # Note
404    ///
405    /// If `T` is not [`Copy`], do not use `*mut T` methods that can cause calling the
406    /// destructor of `T` (for example the [`<*mut T>::drop_in_place`] method), because
407    /// for properly dropping the data we also need to clear `data` control bytes. If we
408    /// drop data, but do not clear `data control byte` it leads to double drop when
409    /// [`RawTable`] goes out of scope.
410    ///
411    /// If you modify an already initialized `value`, so [`Hash`] and [`Eq`] on the new
412    /// `T` value and its borrowed form *must* match those for the old `T` value, as the map
413    /// will not re-evaluate where the new value should go, meaning the value may become
414    /// "lost" if their location does not reflect their state.
415    ///
416    /// [`RawTable`]: crate::raw::RawTable
417    /// [`<*mut T>::drop_in_place`]: https://doc.rust-lang.org/core/primitive.pointer.html#method.drop_in_place
418    /// [`Hash`]: https://doc.rust-lang.org/core/hash/trait.Hash.html
419    /// [`Eq`]: https://doc.rust-lang.org/core/cmp/trait.Eq.html
420    #[inline]
421    pub fn as_ptr(&self) -> *mut T {
422        if T::IS_ZERO_SIZED {
423            // Just return an arbitrary ZST pointer which is properly aligned
424            // invalid pointer is good enough for ZST
425            invalid_mut(mem::align_of::<T>())
426        } else {
427            unsafe { self.ptr.as_ptr().sub(1) }
428        }
429    }
430
431    /// Acquires the underlying non-null pointer `*mut T` to `data`.
432    #[inline]
433    fn as_non_null(&self) -> NonNull<T> {
434        // SAFETY: `self.ptr` is already a `NonNull`
435        unsafe { NonNull::new_unchecked(self.as_ptr()) }
436    }
437
438    /// Create a new [`Bucket`] that is offset from the `self` by the given
439    /// `offset`. The pointer calculation is performed by calculating the
440    /// offset from `self` pointer (convenience for `self.ptr.as_ptr().sub(offset)`).
441    /// This function is used for iterators.
442    ///
443    /// `offset` is in units of `T`; e.g., a `offset` of 3 represents a pointer
444    /// offset of `3 * size_of::<T>()` bytes.
445    ///
446    /// # Safety
447    ///
448    /// If `mem::size_of::<T>() != 0`, then the safety rules are directly derived
449    /// from the safety rules for [`<*mut T>::sub`] method of `*mut T` and safety
450    /// rules of [`NonNull::new_unchecked`] function.
451    ///
452    /// Thus, in order to uphold the safety contracts for [`<*mut T>::sub`] method
453    /// and [`NonNull::new_unchecked`] function, as well as for the correct
454    /// logic of the work of this crate, the following rules are necessary and
455    /// sufficient:
456    ///
457    /// * `self` contained pointer must not be `dangling`;
458    ///
459    /// * `self.to_base_index() + offset` must not be greater than `RawTableInner.bucket_mask`,
460    ///   i.e. `(self.to_base_index() + offset) <= RawTableInner.bucket_mask` or, in other
461    ///   words, `self.to_base_index() + offset + 1` must be no greater than the number returned
462    ///   by the function [`RawTable::buckets`] or [`RawTableInner::buckets`].
463    ///
464    /// If `mem::size_of::<T>() == 0`, then the only requirement is that the
465    /// `self.to_base_index() + offset` must not be greater than `RawTableInner.bucket_mask`,
466    /// i.e. `(self.to_base_index() + offset) <= RawTableInner.bucket_mask` or, in other words,
467    /// `self.to_base_index() + offset + 1` must be no greater than the number returned by the
468    /// function [`RawTable::buckets`] or [`RawTableInner::buckets`].
469    ///
470    /// [`Bucket`]: crate::raw::Bucket
471    /// [`<*mut T>::sub`]: https://doc.rust-lang.org/core/primitive.pointer.html#method.sub-1
472    /// [`NonNull::new_unchecked`]: https://doc.rust-lang.org/stable/std/ptr/struct.NonNull.html#method.new_unchecked
473    /// [`RawTable::buckets`]: crate::raw::RawTable::buckets
474    /// [`RawTableInner::buckets`]: RawTableInner::buckets
475    #[inline]
476    unsafe fn next_n(&self, offset: usize) -> Self {
477        let ptr = if T::IS_ZERO_SIZED {
478            // invalid pointer is good enough for ZST
479            invalid_mut(self.ptr.as_ptr() as usize + offset)
480        } else {
481            self.ptr.as_ptr().sub(offset)
482        };
483        Self {
484            ptr: NonNull::new_unchecked(ptr),
485        }
486    }
487
488    /// Executes the destructor (if any) of the pointed-to `data`.
489    ///
490    /// # Safety
491    ///
492    /// See [`ptr::drop_in_place`] for safety concerns.
493    ///
494    /// You should use [`RawTable::erase`] instead of this function,
495    /// or be careful with calling this function directly, because for
496    /// properly dropping the data we need also clear `data` control bytes.
497    /// If we drop data, but do not erase `data control byte` it leads to
498    /// double drop when [`RawTable`] goes out of scope.
499    ///
500    /// [`ptr::drop_in_place`]: https://doc.rust-lang.org/core/ptr/fn.drop_in_place.html
501    /// [`RawTable`]: crate::raw::RawTable
502    /// [`RawTable::erase`]: crate::raw::RawTable::erase
503    #[cfg_attr(feature = "inline-more", inline)]
504    pub(crate) unsafe fn drop(&self) {
505        self.as_ptr().drop_in_place();
506    }
507
508    /// Reads the `value` from `self` without moving it. This leaves the
509    /// memory in `self` unchanged.
510    ///
511    /// # Safety
512    ///
513    /// See [`ptr::read`] for safety concerns.
514    ///
515    /// You should use [`RawTable::remove`] instead of this function,
516    /// or be careful with calling this function directly, because compiler
517    /// calls its destructor when the read `value` goes out of scope. It
518    /// can cause double dropping when [`RawTable`] goes out of scope,
519    /// because of not erased `data control byte`.
520    ///
521    /// [`ptr::read`]: https://doc.rust-lang.org/core/ptr/fn.read.html
522    /// [`RawTable`]: crate::raw::RawTable
523    /// [`RawTable::remove`]: crate::raw::RawTable::remove
524    #[inline]
525    pub(crate) unsafe fn read(&self) -> T {
526        self.as_ptr().read()
527    }
528
529    /// Overwrites a memory location with the given `value` without reading
530    /// or dropping the old value (like [`ptr::write`] function).
531    ///
532    /// # Safety
533    ///
534    /// See [`ptr::write`] for safety concerns.
535    ///
536    /// # Note
537    ///
538    /// [`Hash`] and [`Eq`] on the new `T` value and its borrowed form *must* match
539    /// those for the old `T` value, as the map will not re-evaluate where the new
540    /// value should go, meaning the value may become "lost" if their location
541    /// does not reflect their state.
542    ///
543    /// [`ptr::write`]: https://doc.rust-lang.org/core/ptr/fn.write.html
544    /// [`Hash`]: https://doc.rust-lang.org/core/hash/trait.Hash.html
545    /// [`Eq`]: https://doc.rust-lang.org/core/cmp/trait.Eq.html
546    #[inline]
547    pub(crate) unsafe fn write(&self, val: T) {
548        self.as_ptr().write(val);
549    }
550
551    /// Returns a shared immutable reference to the `value`.
552    ///
553    /// # Safety
554    ///
555    /// See [`NonNull::as_ref`] for safety concerns.
556    ///
557    /// [`NonNull::as_ref`]: https://doc.rust-lang.org/core/ptr/struct.NonNull.html#method.as_ref
558    #[inline]
559    pub unsafe fn as_ref<'a>(&self) -> &'a T {
560        &*self.as_ptr()
561    }
562
563    /// Returns a unique mutable reference to the `value`.
564    ///
565    /// # Safety
566    ///
567    /// See [`NonNull::as_mut`] for safety concerns.
568    ///
569    /// # Note
570    ///
571    /// [`Hash`] and [`Eq`] on the new `T` value and its borrowed form *must* match
572    /// those for the old `T` value, as the map will not re-evaluate where the new
573    /// value should go, meaning the value may become "lost" if their location
574    /// does not reflect their state.
575    ///
576    /// [`NonNull::as_mut`]: https://doc.rust-lang.org/core/ptr/struct.NonNull.html#method.as_mut
577    /// [`Hash`]: https://doc.rust-lang.org/core/hash/trait.Hash.html
578    /// [`Eq`]: https://doc.rust-lang.org/core/cmp/trait.Eq.html
579    #[inline]
580    pub unsafe fn as_mut<'a>(&self) -> &'a mut T {
581        &mut *self.as_ptr()
582    }
583}
584
585/// A raw hash table with an unsafe API.
586pub struct RawTable<T, A: Allocator = Global> {
587    table: RawTableInner,
588    alloc: A,
589    // Tell dropck that we own instances of T.
590    marker: PhantomData<T>,
591}
592
593/// Non-generic part of `RawTable` which allows functions to be instantiated only once regardless
594/// of how many different key-value types are used.
595struct RawTableInner {
596    // Mask to get an index from a hash value. The value is one less than the
597    // number of buckets in the table.
598    bucket_mask: usize,
599
600    // [Padding], T_n, ..., T1, T0, C0, C1, ...
601    //                              ^ points here
602    ctrl: NonNull<u8>,
603
604    // Number of elements that can be inserted before we need to grow the table
605    growth_left: usize,
606
607    // Number of elements in the table, only really used by len()
608    items: usize,
609}
610
611impl<T> RawTable<T, Global> {
612    /// Creates a new empty hash table without allocating any memory.
613    ///
614    /// In effect this returns a table with exactly 1 bucket. However we can
615    /// leave the data pointer dangling since that bucket is never written to
616    /// due to our load factor forcing us to always have at least 1 free bucket.
617    #[inline]
618    #[cfg_attr(feature = "rustc-dep-of-std", rustc_const_stable_indirect)]
619    pub const fn new() -> Self {
620        Self {
621            table: RawTableInner::NEW,
622            alloc: Global,
623            marker: PhantomData,
624        }
625    }
626
627    /// Allocates a new hash table with at least enough capacity for inserting
628    /// the given number of elements without reallocating.
629    pub fn with_capacity(capacity: usize) -> Self {
630        Self::with_capacity_in(capacity, Global)
631    }
632}
633
634impl<T, A: Allocator> RawTable<T, A> {
635    const TABLE_LAYOUT: TableLayout = TableLayout::new::<T>();
636
637    /// Creates a new empty hash table without allocating any memory, using the
638    /// given allocator.
639    ///
640    /// In effect this returns a table with exactly 1 bucket. However we can
641    /// leave the data pointer dangling since that bucket is never written to
642    /// due to our load factor forcing us to always have at least 1 free bucket.
643    #[inline]
644    #[cfg_attr(feature = "rustc-dep-of-std", rustc_const_stable_indirect)]
645    pub const fn new_in(alloc: A) -> Self {
646        Self {
647            table: RawTableInner::NEW,
648            alloc,
649            marker: PhantomData,
650        }
651    }
652
653    /// Allocates a new hash table with the given number of buckets.
654    ///
655    /// The control bytes are left uninitialized.
656    #[cfg_attr(feature = "inline-more", inline)]
657    unsafe fn new_uninitialized(
658        alloc: A,
659        buckets: usize,
660        fallibility: Fallibility,
661    ) -> Result<Self, TryReserveError> {
662        debug_assert!(buckets.is_power_of_two());
663
664        Ok(Self {
665            table: RawTableInner::new_uninitialized(
666                &alloc,
667                Self::TABLE_LAYOUT,
668                buckets,
669                fallibility,
670            )?,
671            alloc,
672            marker: PhantomData,
673        })
674    }
675
676    /// Allocates a new hash table using the given allocator, with at least enough capacity for
677    /// inserting the given number of elements without reallocating.
678    pub fn with_capacity_in(capacity: usize, alloc: A) -> Self {
679        Self {
680            table: RawTableInner::with_capacity(&alloc, Self::TABLE_LAYOUT, capacity),
681            alloc,
682            marker: PhantomData,
683        }
684    }
685
686    /// Returns a reference to the underlying allocator.
687    #[inline]
688    pub fn allocator(&self) -> &A {
689        &self.alloc
690    }
691
692    /// Returns pointer to one past last `data` element in the table as viewed from
693    /// the start point of the allocation.
694    ///
695    /// The caller must ensure that the `RawTable` outlives the returned [`NonNull<T>`],
696    /// otherwise using it may result in [`undefined behavior`].
697    ///
698    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
699    #[inline]
700    pub fn data_end(&self) -> NonNull<T> {
701        //                        `self.table.ctrl.cast()` returns pointer that
702        //                        points here (to the end of `T0`)
703        //                          ∨
704        // [Pad], T_n, ..., T1, T0, |CT0, CT1, ..., CT_n|, CTa_0, CTa_1, ..., CTa_m
705        //                           \________  ________/
706        //                                    \/
707        //       `n = buckets - 1`, i.e. `RawTable::buckets() - 1`
708        //
709        // where: T0...T_n  - our stored data;
710        //        CT0...CT_n - control bytes or metadata for `data`.
711        //        CTa_0...CTa_m - additional control bytes, where `m = Group::WIDTH - 1` (so that the search
712        //                        with loading `Group` bytes from the heap works properly, even if the result
713        //                        of `h1(hash) & self.bucket_mask` is equal to `self.bucket_mask`). See also
714        //                        `RawTableInner::set_ctrl` function.
715        //
716        // P.S. `h1(hash) & self.bucket_mask` is the same as `hash as usize % self.buckets()` because the number
717        // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
718        self.table.ctrl.cast()
719    }
720
721    /// Returns pointer to start of data table.
722    #[inline]
723    #[cfg(feature = "nightly")]
724    pub unsafe fn data_start(&self) -> NonNull<T> {
725        NonNull::new_unchecked(self.data_end().as_ptr().wrapping_sub(self.buckets()))
726    }
727
728    /// Returns the total amount of memory allocated internally by the hash
729    /// table, in bytes.
730    ///
731    /// The returned number is informational only. It is intended to be
732    /// primarily used for memory profiling.
733    #[inline]
734    pub fn allocation_size(&self) -> usize {
735        // SAFETY: We use the same `table_layout` that was used to allocate
736        // this table.
737        unsafe { self.table.allocation_size_or_zero(Self::TABLE_LAYOUT) }
738    }
739
740    /// Returns the index of a bucket from a `Bucket`.
741    #[inline]
742    pub unsafe fn bucket_index(&self, bucket: &Bucket<T>) -> usize {
743        bucket.to_base_index(self.data_end())
744    }
745
746    /// Returns a pointer to an element in the table.
747    ///
748    /// The caller must ensure that the `RawTable` outlives the returned [`Bucket<T>`],
749    /// otherwise using it may result in [`undefined behavior`].
750    ///
751    /// # Safety
752    ///
753    /// If `mem::size_of::<T>() != 0`, then the caller of this function must observe the
754    /// following safety rules:
755    ///
756    /// * The table must already be allocated;
757    ///
758    /// * The `index` must not be greater than the number returned by the [`RawTable::buckets`]
759    ///   function, i.e. `(index + 1) <= self.buckets()`.
760    ///
761    /// It is safe to call this function with index of zero (`index == 0`) on a table that has
762    /// not been allocated, but using the returned [`Bucket`] results in [`undefined behavior`].
763    ///
764    /// If `mem::size_of::<T>() == 0`, then the only requirement is that the `index` must
765    /// not be greater than the number returned by the [`RawTable::buckets`] function, i.e.
766    /// `(index + 1) <= self.buckets()`.
767    ///
768    /// [`RawTable::buckets`]: RawTable::buckets
769    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
770    #[inline]
771    pub unsafe fn bucket(&self, index: usize) -> Bucket<T> {
772        // If mem::size_of::<T>() != 0 then return a pointer to the `element` in the `data part` of the table
773        // (we start counting from "0", so that in the expression T[n], the "n" index actually one less than
774        // the "buckets" number of our `RawTable`, i.e. "n = RawTable::buckets() - 1"):
775        //
776        //           `table.bucket(3).as_ptr()` returns a pointer that points here in the `data`
777        //           part of the `RawTable`, i.e. to the start of T3 (see `Bucket::as_ptr`)
778        //                  |
779        //                  |               `base = self.data_end()` points here
780        //                  |               (to the start of CT0 or to the end of T0)
781        //                  v                 v
782        // [Pad], T_n, ..., |T3|, T2, T1, T0, |CT0, CT1, CT2, CT3, ..., CT_n, CTa_0, CTa_1, ..., CTa_m
783        //                     ^                                              \__________  __________/
784        //        `table.bucket(3)` returns a pointer that points                        \/
785        //         here in the `data` part of the `RawTable` (to              additional control bytes
786        //         the end of T3)                                              `m = Group::WIDTH - 1`
787        //
788        // where: T0...T_n  - our stored data;
789        //        CT0...CT_n - control bytes or metadata for `data`;
790        //        CTa_0...CTa_m - additional control bytes (so that the search with loading `Group` bytes from
791        //                        the heap works properly, even if the result of `h1(hash) & self.table.bucket_mask`
792        //                        is equal to `self.table.bucket_mask`). See also `RawTableInner::set_ctrl` function.
793        //
794        // P.S. `h1(hash) & self.table.bucket_mask` is the same as `hash as usize % self.buckets()` because the number
795        // of buckets is a power of two, and `self.table.bucket_mask = self.buckets() - 1`.
796        debug_assert_ne!(self.table.bucket_mask, 0);
797        debug_assert!(index < self.buckets());
798        Bucket::from_base_index(self.data_end(), index)
799    }
800
801    /// Erases an element from the table without dropping it.
802    #[cfg_attr(feature = "inline-more", inline)]
803    unsafe fn erase_no_drop(&mut self, item: &Bucket<T>) {
804        let index = self.bucket_index(item);
805        self.table.erase(index);
806    }
807
808    /// Erases an element from the table, dropping it in place.
809    #[cfg_attr(feature = "inline-more", inline)]
810    #[allow(clippy::needless_pass_by_value)]
811    pub unsafe fn erase(&mut self, item: Bucket<T>) {
812        // Erase the element from the table first since drop might panic.
813        self.erase_no_drop(&item);
814        item.drop();
815    }
816
817    /// Removes an element from the table, returning it.
818    ///
819    /// This also returns an `InsertSlot` pointing to the newly free bucket.
820    #[cfg_attr(feature = "inline-more", inline)]
821    #[allow(clippy::needless_pass_by_value)]
822    pub unsafe fn remove(&mut self, item: Bucket<T>) -> (T, InsertSlot) {
823        self.erase_no_drop(&item);
824        (
825            item.read(),
826            InsertSlot {
827                index: self.bucket_index(&item),
828            },
829        )
830    }
831
832    /// Finds and removes an element from the table, returning it.
833    #[cfg_attr(feature = "inline-more", inline)]
834    pub fn remove_entry(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<T> {
835        // Avoid `Option::map` because it bloats LLVM IR.
836        match self.find(hash, eq) {
837            Some(bucket) => Some(unsafe { self.remove(bucket).0 }),
838            None => None,
839        }
840    }
841
842    /// Marks all table buckets as empty without dropping their contents.
843    #[cfg_attr(feature = "inline-more", inline)]
844    pub fn clear_no_drop(&mut self) {
845        self.table.clear_no_drop();
846    }
847
848    /// Removes all elements from the table without freeing the backing memory.
849    #[cfg_attr(feature = "inline-more", inline)]
850    pub fn clear(&mut self) {
851        if self.is_empty() {
852            // Special case empty table to avoid surprising O(capacity) time.
853            return;
854        }
855        // Ensure that the table is reset even if one of the drops panic
856        let mut self_ = guard(self, |self_| self_.clear_no_drop());
857        unsafe {
858            // SAFETY: ScopeGuard sets to zero the `items` field of the table
859            // even in case of panic during the dropping of the elements so
860            // that there will be no double drop of the elements.
861            self_.table.drop_elements::<T>();
862        }
863    }
864
865    /// Shrinks the table to fit `max(self.len(), min_size)` elements.
866    #[cfg_attr(feature = "inline-more", inline)]
867    pub fn shrink_to(&mut self, min_size: usize, hasher: impl Fn(&T) -> u64) {
868        // Calculate the minimal number of elements that we need to reserve
869        // space for.
870        let min_size = usize::max(self.table.items, min_size);
871        if min_size == 0 {
872            let mut old_inner = mem::replace(&mut self.table, RawTableInner::NEW);
873            unsafe {
874                // SAFETY:
875                // 1. We call the function only once;
876                // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`]
877                //    and [`TableLayout`] that were used to allocate this table.
878                // 3. If any elements' drop function panics, then there will only be a memory leak,
879                //    because we have replaced the inner table with a new one.
880                old_inner.drop_inner_table::<T, _>(&self.alloc, Self::TABLE_LAYOUT);
881            }
882            return;
883        }
884
885        // Calculate the number of buckets that we need for this number of
886        // elements. If the calculation overflows then the requested bucket
887        // count must be larger than what we have right and nothing needs to be
888        // done.
889        let min_buckets = match capacity_to_buckets(min_size, Self::TABLE_LAYOUT) {
890            Some(buckets) => buckets,
891            None => return,
892        };
893
894        // If we have more buckets than we need, shrink the table.
895        if min_buckets < self.buckets() {
896            // Fast path if the table is empty
897            if self.table.items == 0 {
898                let new_inner =
899                    RawTableInner::with_capacity(&self.alloc, Self::TABLE_LAYOUT, min_size);
900                let mut old_inner = mem::replace(&mut self.table, new_inner);
901                unsafe {
902                    // SAFETY:
903                    // 1. We call the function only once;
904                    // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`]
905                    //    and [`TableLayout`] that were used to allocate this table.
906                    // 3. If any elements' drop function panics, then there will only be a memory leak,
907                    //    because we have replaced the inner table with a new one.
908                    old_inner.drop_inner_table::<T, _>(&self.alloc, Self::TABLE_LAYOUT);
909                }
910            } else {
911                // Avoid `Result::unwrap_or_else` because it bloats LLVM IR.
912                unsafe {
913                    // SAFETY:
914                    // 1. We know for sure that `min_size >= self.table.items`.
915                    // 2. The [`RawTableInner`] must already have properly initialized control bytes since
916                    //    we will never expose RawTable::new_uninitialized in a public API.
917                    if self
918                        .resize(min_size, hasher, Fallibility::Infallible)
919                        .is_err()
920                    {
921                        // SAFETY: The result of calling the `resize` function cannot be an error
922                        // because `fallibility == Fallibility::Infallible.
923                        hint::unreachable_unchecked()
924                    }
925                }
926            }
927        }
928    }
929
930    /// Ensures that at least `additional` items can be inserted into the table
931    /// without reallocation.
932    #[cfg_attr(feature = "inline-more", inline)]
933    pub fn reserve(&mut self, additional: usize, hasher: impl Fn(&T) -> u64) {
934        if unlikely(additional > self.table.growth_left) {
935            // Avoid `Result::unwrap_or_else` because it bloats LLVM IR.
936            unsafe {
937                // SAFETY: The [`RawTableInner`] must already have properly initialized control
938                // bytes since we will never expose RawTable::new_uninitialized in a public API.
939                if self
940                    .reserve_rehash(additional, hasher, Fallibility::Infallible)
941                    .is_err()
942                {
943                    // SAFETY: All allocation errors will be caught inside `RawTableInner::reserve_rehash`.
944                    hint::unreachable_unchecked()
945                }
946            }
947        }
948    }
949
950    /// Tries to ensure that at least `additional` items can be inserted into
951    /// the table without reallocation.
952    #[cfg_attr(feature = "inline-more", inline)]
953    pub fn try_reserve(
954        &mut self,
955        additional: usize,
956        hasher: impl Fn(&T) -> u64,
957    ) -> Result<(), TryReserveError> {
958        if additional > self.table.growth_left {
959            // SAFETY: The [`RawTableInner`] must already have properly initialized control
960            // bytes since we will never expose RawTable::new_uninitialized in a public API.
961            unsafe { self.reserve_rehash(additional, hasher, Fallibility::Fallible) }
962        } else {
963            Ok(())
964        }
965    }
966
967    /// Out-of-line slow path for `reserve` and `try_reserve`.
968    ///
969    /// # Safety
970    ///
971    /// The [`RawTableInner`] must have properly initialized control bytes,
972    /// otherwise calling this function results in [`undefined behavior`]
973    ///
974    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
975    #[cold]
976    #[inline(never)]
977    unsafe fn reserve_rehash(
978        &mut self,
979        additional: usize,
980        hasher: impl Fn(&T) -> u64,
981        fallibility: Fallibility,
982    ) -> Result<(), TryReserveError> {
983        unsafe {
984            // SAFETY:
985            // 1. We know for sure that `alloc` and `layout` matches the [`Allocator`] and
986            //    [`TableLayout`] that were used to allocate this table.
987            // 2. The `drop` function is the actual drop function of the elements stored in
988            //    the table.
989            // 3. The caller ensures that the control bytes of the `RawTableInner`
990            //    are already initialized.
991            self.table.reserve_rehash_inner(
992                &self.alloc,
993                additional,
994                &|table, index| hasher(table.bucket::<T>(index).as_ref()),
995                fallibility,
996                Self::TABLE_LAYOUT,
997                if T::NEEDS_DROP {
998                    Some(|ptr| ptr::drop_in_place(ptr as *mut T))
999                } else {
1000                    None
1001                },
1002            )
1003        }
1004    }
1005
1006    /// Allocates a new table of a different size and moves the contents of the
1007    /// current table into it.
1008    ///
1009    /// # Safety
1010    ///
1011    /// The [`RawTableInner`] must have properly initialized control bytes,
1012    /// otherwise calling this function results in [`undefined behavior`]
1013    ///
1014    /// The caller of this function must ensure that `capacity >= self.table.items`
1015    /// otherwise:
1016    ///
1017    /// * If `self.table.items != 0`, calling of this function with `capacity`
1018    ///   equal to 0 (`capacity == 0`) results in [`undefined behavior`].
1019    ///
1020    /// * If `self.table.items > capacity_to_buckets(capacity, Self::TABLE_LAYOUT)`
1021    ///   calling this function are never return (will loop infinitely).
1022    ///
1023    /// See [`RawTableInner::find_insert_slot`] for more information.
1024    ///
1025    /// [`RawTableInner::find_insert_slot`]: RawTableInner::find_insert_slot
1026    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
1027    unsafe fn resize(
1028        &mut self,
1029        capacity: usize,
1030        hasher: impl Fn(&T) -> u64,
1031        fallibility: Fallibility,
1032    ) -> Result<(), TryReserveError> {
1033        // SAFETY:
1034        // 1. The caller of this function guarantees that `capacity >= self.table.items`.
1035        // 2. We know for sure that `alloc` and `layout` matches the [`Allocator`] and
1036        //    [`TableLayout`] that were used to allocate this table.
1037        // 3. The caller ensures that the control bytes of the `RawTableInner`
1038        //    are already initialized.
1039        self.table.resize_inner(
1040            &self.alloc,
1041            capacity,
1042            &|table, index| hasher(table.bucket::<T>(index).as_ref()),
1043            fallibility,
1044            Self::TABLE_LAYOUT,
1045        )
1046    }
1047
1048    /// Inserts a new element into the table, and returns its raw bucket.
1049    ///
1050    /// This does not check if the given element already exists in the table.
1051    #[cfg_attr(feature = "inline-more", inline)]
1052    pub fn insert(&mut self, hash: u64, value: T, hasher: impl Fn(&T) -> u64) -> Bucket<T> {
1053        unsafe {
1054            // SAFETY:
1055            // 1. The [`RawTableInner`] must already have properly initialized control bytes since
1056            //    we will never expose `RawTable::new_uninitialized` in a public API.
1057            //
1058            // 2. We reserve additional space (if necessary) right after calling this function.
1059            let mut slot = self.table.find_insert_slot(hash);
1060
1061            // We can avoid growing the table once we have reached our load factor if we are replacing
1062            // a tombstone. This works since the number of EMPTY slots does not change in this case.
1063            //
1064            // SAFETY: The function is guaranteed to return [`InsertSlot`] that contains an index
1065            // in the range `0..=self.buckets()`.
1066            let old_ctrl = *self.table.ctrl(slot.index);
1067            if unlikely(self.table.growth_left == 0 && old_ctrl.special_is_empty()) {
1068                self.reserve(1, hasher);
1069                // SAFETY: We know for sure that `RawTableInner` has control bytes
1070                // initialized and that there is extra space in the table.
1071                slot = self.table.find_insert_slot(hash);
1072            }
1073
1074            self.insert_in_slot(hash, slot, value)
1075        }
1076    }
1077
1078    /// Inserts a new element into the table, and returns a mutable reference to it.
1079    ///
1080    /// This does not check if the given element already exists in the table.
1081    #[cfg_attr(feature = "inline-more", inline)]
1082    pub fn insert_entry(&mut self, hash: u64, value: T, hasher: impl Fn(&T) -> u64) -> &mut T {
1083        unsafe { self.insert(hash, value, hasher).as_mut() }
1084    }
1085
1086    /// Inserts a new element into the table, without growing the table.
1087    ///
1088    /// There must be enough space in the table to insert the new element.
1089    ///
1090    /// This does not check if the given element already exists in the table.
1091    #[cfg_attr(feature = "inline-more", inline)]
1092    #[cfg(feature = "rustc-internal-api")]
1093    pub unsafe fn insert_no_grow(&mut self, hash: u64, value: T) -> Bucket<T> {
1094        let (index, old_ctrl) = self.table.prepare_insert_slot(hash);
1095        let bucket = self.table.bucket(index);
1096
1097        // If we are replacing a DELETED entry then we don't need to update
1098        // the load counter.
1099        self.table.growth_left -= old_ctrl.special_is_empty() as usize;
1100
1101        bucket.write(value);
1102        self.table.items += 1;
1103        bucket
1104    }
1105
1106    /// Temporary removes a bucket, applying the given function to the removed
1107    /// element and optionally put back the returned value in the same bucket.
1108    ///
1109    /// Returns `true` if the bucket still contains an element
1110    ///
1111    /// This does not check if the given bucket is actually occupied.
1112    #[cfg_attr(feature = "inline-more", inline)]
1113    pub unsafe fn replace_bucket_with<F>(&mut self, bucket: Bucket<T>, f: F) -> bool
1114    where
1115        F: FnOnce(T) -> Option<T>,
1116    {
1117        let index = self.bucket_index(&bucket);
1118        let old_ctrl = *self.table.ctrl(index);
1119        debug_assert!(self.is_bucket_full(index));
1120        let old_growth_left = self.table.growth_left;
1121        let item = self.remove(bucket).0;
1122        if let Some(new_item) = f(item) {
1123            self.table.growth_left = old_growth_left;
1124            self.table.set_ctrl(index, old_ctrl);
1125            self.table.items += 1;
1126            self.bucket(index).write(new_item);
1127            true
1128        } else {
1129            false
1130        }
1131    }
1132
1133    /// Searches for an element in the table. If the element is not found,
1134    /// returns `Err` with the position of a slot where an element with the
1135    /// same hash could be inserted.
1136    ///
1137    /// This function may resize the table if additional space is required for
1138    /// inserting an element.
1139    #[inline]
1140    pub fn find_or_find_insert_slot(
1141        &mut self,
1142        hash: u64,
1143        mut eq: impl FnMut(&T) -> bool,
1144        hasher: impl Fn(&T) -> u64,
1145    ) -> Result<Bucket<T>, InsertSlot> {
1146        self.reserve(1, hasher);
1147
1148        unsafe {
1149            // SAFETY:
1150            // 1. We know for sure that there is at least one empty `bucket` in the table.
1151            // 2. The [`RawTableInner`] must already have properly initialized control bytes since we will
1152            //    never expose `RawTable::new_uninitialized` in a public API.
1153            // 3. The `find_or_find_insert_slot_inner` function returns the `index` of only the full bucket,
1154            //    which is in the range `0..self.buckets()` (since there is at least one empty `bucket` in
1155            //    the table), so calling `self.bucket(index)` and `Bucket::as_ref` is safe.
1156            match self
1157                .table
1158                .find_or_find_insert_slot_inner(hash, &mut |index| eq(self.bucket(index).as_ref()))
1159            {
1160                // SAFETY: See explanation above.
1161                Ok(index) => Ok(self.bucket(index)),
1162                Err(slot) => Err(slot),
1163            }
1164        }
1165    }
1166
1167    /// Inserts a new element into the table in the given slot, and returns its
1168    /// raw bucket.
1169    ///
1170    /// # Safety
1171    ///
1172    /// `slot` must point to a slot previously returned by
1173    /// `find_or_find_insert_slot`, and no mutation of the table must have
1174    /// occurred since that call.
1175    #[inline]
1176    pub unsafe fn insert_in_slot(&mut self, hash: u64, slot: InsertSlot, value: T) -> Bucket<T> {
1177        let old_ctrl = *self.table.ctrl(slot.index);
1178        self.table.record_item_insert_at(slot.index, old_ctrl, hash);
1179
1180        let bucket = self.bucket(slot.index);
1181        bucket.write(value);
1182        bucket
1183    }
1184
1185    /// Searches for an element in the table.
1186    #[inline]
1187    pub fn find(&self, hash: u64, mut eq: impl FnMut(&T) -> bool) -> Option<Bucket<T>> {
1188        unsafe {
1189            // SAFETY:
1190            // 1. The [`RawTableInner`] must already have properly initialized control bytes since we
1191            //    will never expose `RawTable::new_uninitialized` in a public API.
1192            // 1. The `find_inner` function returns the `index` of only the full bucket, which is in
1193            //    the range `0..self.buckets()`, so calling `self.bucket(index)` and `Bucket::as_ref`
1194            //    is safe.
1195            let result = self
1196                .table
1197                .find_inner(hash, &mut |index| eq(self.bucket(index).as_ref()));
1198
1199            // Avoid `Option::map` because it bloats LLVM IR.
1200            match result {
1201                // SAFETY: See explanation above.
1202                Some(index) => Some(self.bucket(index)),
1203                None => None,
1204            }
1205        }
1206    }
1207
1208    /// Gets a reference to an element in the table.
1209    #[inline]
1210    pub fn get(&self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<&T> {
1211        // Avoid `Option::map` because it bloats LLVM IR.
1212        match self.find(hash, eq) {
1213            Some(bucket) => Some(unsafe { bucket.as_ref() }),
1214            None => None,
1215        }
1216    }
1217
1218    /// Gets a mutable reference to an element in the table.
1219    #[inline]
1220    pub fn get_mut(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<&mut T> {
1221        // Avoid `Option::map` because it bloats LLVM IR.
1222        match self.find(hash, eq) {
1223            Some(bucket) => Some(unsafe { bucket.as_mut() }),
1224            None => None,
1225        }
1226    }
1227
1228    /// Attempts to get mutable references to `N` entries in the table at once.
1229    ///
1230    /// Returns an array of length `N` with the results of each query.
1231    ///
1232    /// At most one mutable reference will be returned to any entry. `None` will be returned if any
1233    /// of the hashes are duplicates. `None` will be returned if the hash is not found.
1234    ///
1235    /// The `eq` argument should be a closure such that `eq(i, k)` returns true if `k` is equal to
1236    /// the `i`th key to be looked up.
1237    pub fn get_many_mut<const N: usize>(
1238        &mut self,
1239        hashes: [u64; N],
1240        eq: impl FnMut(usize, &T) -> bool,
1241    ) -> [Option<&'_ mut T>; N] {
1242        unsafe {
1243            let ptrs = self.get_many_mut_pointers(hashes, eq);
1244
1245            for (i, cur) in ptrs.iter().enumerate() {
1246                if cur.is_some() && ptrs[..i].contains(cur) {
1247                    panic!("duplicate keys found");
1248                }
1249            }
1250            // All bucket are distinct from all previous buckets so we're clear to return the result
1251            // of the lookup.
1252
1253            ptrs.map(|ptr| ptr.map(|mut ptr| ptr.as_mut()))
1254        }
1255    }
1256
1257    pub unsafe fn get_many_unchecked_mut<const N: usize>(
1258        &mut self,
1259        hashes: [u64; N],
1260        eq: impl FnMut(usize, &T) -> bool,
1261    ) -> [Option<&'_ mut T>; N] {
1262        let ptrs = self.get_many_mut_pointers(hashes, eq);
1263        ptrs.map(|ptr| ptr.map(|mut ptr| ptr.as_mut()))
1264    }
1265
1266    unsafe fn get_many_mut_pointers<const N: usize>(
1267        &mut self,
1268        hashes: [u64; N],
1269        mut eq: impl FnMut(usize, &T) -> bool,
1270    ) -> [Option<NonNull<T>>; N] {
1271        array::from_fn(|i| {
1272            self.find(hashes[i], |k| eq(i, k))
1273                .map(|cur| cur.as_non_null())
1274        })
1275    }
1276
1277    /// Returns the number of elements the map can hold without reallocating.
1278    ///
1279    /// This number is a lower bound; the table might be able to hold
1280    /// more, but is guaranteed to be able to hold at least this many.
1281    #[inline]
1282    pub fn capacity(&self) -> usize {
1283        self.table.items + self.table.growth_left
1284    }
1285
1286    /// Returns the number of elements in the table.
1287    #[inline]
1288    pub fn len(&self) -> usize {
1289        self.table.items
1290    }
1291
1292    /// Returns `true` if the table contains no elements.
1293    #[inline]
1294    pub fn is_empty(&self) -> bool {
1295        self.len() == 0
1296    }
1297
1298    /// Returns the number of buckets in the table.
1299    #[inline]
1300    pub fn buckets(&self) -> usize {
1301        self.table.bucket_mask + 1
1302    }
1303
1304    /// Checks whether the bucket at `index` is full.
1305    ///
1306    /// # Safety
1307    ///
1308    /// The caller must ensure `index` is less than the number of buckets.
1309    #[inline]
1310    pub unsafe fn is_bucket_full(&self, index: usize) -> bool {
1311        self.table.is_bucket_full(index)
1312    }
1313
1314    /// Returns an iterator over every element in the table. It is up to
1315    /// the caller to ensure that the `RawTable` outlives the `RawIter`.
1316    /// Because we cannot make the `next` method unsafe on the `RawIter`
1317    /// struct, we have to make the `iter` method unsafe.
1318    #[inline]
1319    pub unsafe fn iter(&self) -> RawIter<T> {
1320        // SAFETY:
1321        // 1. The caller must uphold the safety contract for `iter` method.
1322        // 2. The [`RawTableInner`] must already have properly initialized control bytes since
1323        //    we will never expose RawTable::new_uninitialized in a public API.
1324        self.table.iter()
1325    }
1326
1327    /// Returns an iterator over occupied buckets that could match a given hash.
1328    ///
1329    /// `RawTable` only stores 7 bits of the hash value, so this iterator may
1330    /// return items that have a hash value different than the one provided. You
1331    /// should always validate the returned values before using them.
1332    ///
1333    /// It is up to the caller to ensure that the `RawTable` outlives the
1334    /// `RawIterHash`. Because we cannot make the `next` method unsafe on the
1335    /// `RawIterHash` struct, we have to make the `iter_hash` method unsafe.
1336    #[cfg_attr(feature = "inline-more", inline)]
1337    pub unsafe fn iter_hash(&self, hash: u64) -> RawIterHash<T> {
1338        RawIterHash::new(self, hash)
1339    }
1340
1341    /// Returns an iterator which removes all elements from the table without
1342    /// freeing the memory.
1343    #[cfg_attr(feature = "inline-more", inline)]
1344    pub fn drain(&mut self) -> RawDrain<'_, T, A> {
1345        unsafe {
1346            let iter = self.iter();
1347            self.drain_iter_from(iter)
1348        }
1349    }
1350
1351    /// Returns an iterator which removes all elements from the table without
1352    /// freeing the memory.
1353    ///
1354    /// Iteration starts at the provided iterator's current location.
1355    ///
1356    /// It is up to the caller to ensure that the iterator is valid for this
1357    /// `RawTable` and covers all items that remain in the table.
1358    #[cfg_attr(feature = "inline-more", inline)]
1359    pub unsafe fn drain_iter_from(&mut self, iter: RawIter<T>) -> RawDrain<'_, T, A> {
1360        debug_assert_eq!(iter.len(), self.len());
1361        RawDrain {
1362            iter,
1363            table: mem::replace(&mut self.table, RawTableInner::NEW),
1364            orig_table: NonNull::from(&mut self.table),
1365            marker: PhantomData,
1366        }
1367    }
1368
1369    /// Returns an iterator which consumes all elements from the table.
1370    ///
1371    /// Iteration starts at the provided iterator's current location.
1372    ///
1373    /// It is up to the caller to ensure that the iterator is valid for this
1374    /// `RawTable` and covers all items that remain in the table.
1375    pub unsafe fn into_iter_from(self, iter: RawIter<T>) -> RawIntoIter<T, A> {
1376        debug_assert_eq!(iter.len(), self.len());
1377
1378        let allocation = self.into_allocation();
1379        RawIntoIter {
1380            iter,
1381            allocation,
1382            marker: PhantomData,
1383        }
1384    }
1385
1386    /// Converts the table into a raw allocation. The contents of the table
1387    /// should be dropped using a `RawIter` before freeing the allocation.
1388    #[cfg_attr(feature = "inline-more", inline)]
1389    pub(crate) fn into_allocation(self) -> Option<(NonNull<u8>, Layout, A)> {
1390        let alloc = if self.table.is_empty_singleton() {
1391            None
1392        } else {
1393            // Avoid `Option::unwrap_or_else` because it bloats LLVM IR.
1394            let (layout, ctrl_offset) =
1395                match Self::TABLE_LAYOUT.calculate_layout_for(self.table.buckets()) {
1396                    Some(lco) => lco,
1397                    None => unsafe { hint::unreachable_unchecked() },
1398                };
1399            Some((
1400                unsafe { NonNull::new_unchecked(self.table.ctrl.as_ptr().sub(ctrl_offset).cast()) },
1401                layout,
1402                unsafe { ptr::read(&self.alloc) },
1403            ))
1404        };
1405        mem::forget(self);
1406        alloc
1407    }
1408}
1409
1410unsafe impl<T, A: Allocator> Send for RawTable<T, A>
1411where
1412    T: Send,
1413    A: Send,
1414{
1415}
1416unsafe impl<T, A: Allocator> Sync for RawTable<T, A>
1417where
1418    T: Sync,
1419    A: Sync,
1420{
1421}
1422
1423impl RawTableInner {
1424    const NEW: Self = RawTableInner::new();
1425
1426    /// Creates a new empty hash table without allocating any memory.
1427    ///
1428    /// In effect this returns a table with exactly 1 bucket. However we can
1429    /// leave the data pointer dangling since that bucket is never accessed
1430    /// due to our load factor forcing us to always have at least 1 free bucket.
1431    #[inline]
1432    const fn new() -> Self {
1433        Self {
1434            // Be careful to cast the entire slice to a raw pointer.
1435            ctrl: unsafe {
1436                NonNull::new_unchecked(Group::static_empty().as_ptr().cast_mut().cast())
1437            },
1438            bucket_mask: 0,
1439            items: 0,
1440            growth_left: 0,
1441        }
1442    }
1443}
1444
1445impl RawTableInner {
1446    /// Allocates a new [`RawTableInner`] with the given number of buckets.
1447    /// The control bytes and buckets are left uninitialized.
1448    ///
1449    /// # Safety
1450    ///
1451    /// The caller of this function must ensure that the `buckets` is power of two
1452    /// and also initialize all control bytes of the length `self.bucket_mask + 1 +
1453    /// Group::WIDTH` with the [`Tag::EMPTY`] bytes.
1454    ///
1455    /// See also [`Allocator`] API for other safety concerns.
1456    ///
1457    /// [`Allocator`]: https://doc.rust-lang.org/alloc/alloc/trait.Allocator.html
1458    #[cfg_attr(feature = "inline-more", inline)]
1459    unsafe fn new_uninitialized<A>(
1460        alloc: &A,
1461        table_layout: TableLayout,
1462        buckets: usize,
1463        fallibility: Fallibility,
1464    ) -> Result<Self, TryReserveError>
1465    where
1466        A: Allocator,
1467    {
1468        debug_assert!(buckets.is_power_of_two());
1469
1470        // Avoid `Option::ok_or_else` because it bloats LLVM IR.
1471        let (layout, ctrl_offset) = match table_layout.calculate_layout_for(buckets) {
1472            Some(lco) => lco,
1473            None => return Err(fallibility.capacity_overflow()),
1474        };
1475
1476        let ptr: NonNull<u8> = match do_alloc(alloc, layout) {
1477            Ok(block) => block.cast(),
1478            Err(_) => return Err(fallibility.alloc_err(layout)),
1479        };
1480
1481        // SAFETY: null pointer will be caught in above check
1482        let ctrl = NonNull::new_unchecked(ptr.as_ptr().add(ctrl_offset));
1483        Ok(Self {
1484            ctrl,
1485            bucket_mask: buckets - 1,
1486            items: 0,
1487            growth_left: bucket_mask_to_capacity(buckets - 1),
1488        })
1489    }
1490
1491    /// Attempts to allocate a new [`RawTableInner`] with at least enough
1492    /// capacity for inserting the given number of elements without reallocating.
1493    ///
1494    /// All the control bytes are initialized with the [`Tag::EMPTY`] bytes.
1495    #[inline]
1496    fn fallible_with_capacity<A>(
1497        alloc: &A,
1498        table_layout: TableLayout,
1499        capacity: usize,
1500        fallibility: Fallibility,
1501    ) -> Result<Self, TryReserveError>
1502    where
1503        A: Allocator,
1504    {
1505        if capacity == 0 {
1506            Ok(Self::NEW)
1507        } else {
1508            // SAFETY: We checked that we could successfully allocate the new table, and then
1509            // initialized all control bytes with the constant `Tag::EMPTY` byte.
1510            unsafe {
1511                let buckets = capacity_to_buckets(capacity, table_layout)
1512                    .ok_or_else(|| fallibility.capacity_overflow())?;
1513
1514                let mut result =
1515                    Self::new_uninitialized(alloc, table_layout, buckets, fallibility)?;
1516                // SAFETY: We checked that the table is allocated and therefore the table already has
1517                // `self.bucket_mask + 1 + Group::WIDTH` number of control bytes (see TableLayout::calculate_layout_for)
1518                // so writing `self.num_ctrl_bytes() == bucket_mask + 1 + Group::WIDTH` bytes is safe.
1519                result.ctrl_slice().fill_empty();
1520
1521                Ok(result)
1522            }
1523        }
1524    }
1525
1526    /// Allocates a new [`RawTableInner`] with at least enough capacity for inserting
1527    /// the given number of elements without reallocating.
1528    ///
1529    /// Panics if the new capacity exceeds [`isize::MAX`] bytes and [`abort`] the program
1530    /// in case of allocation error. Use [`fallible_with_capacity`] instead if you want to
1531    /// handle memory allocation failure.
1532    ///
1533    /// All the control bytes are initialized with the [`Tag::EMPTY`] bytes.
1534    ///
1535    /// [`fallible_with_capacity`]: RawTableInner::fallible_with_capacity
1536    /// [`abort`]: https://doc.rust-lang.org/alloc/alloc/fn.handle_alloc_error.html
1537    fn with_capacity<A>(alloc: &A, table_layout: TableLayout, capacity: usize) -> Self
1538    where
1539        A: Allocator,
1540    {
1541        // Avoid `Result::unwrap_or_else` because it bloats LLVM IR.
1542        match Self::fallible_with_capacity(alloc, table_layout, capacity, Fallibility::Infallible) {
1543            Ok(table_inner) => table_inner,
1544            // SAFETY: All allocation errors will be caught inside `RawTableInner::new_uninitialized`.
1545            Err(_) => unsafe { hint::unreachable_unchecked() },
1546        }
1547    }
1548
1549    /// Fixes up an insertion slot returned by the [`RawTableInner::find_insert_slot_in_group`] method.
1550    ///
1551    /// In tables smaller than the group width (`self.buckets() < Group::WIDTH`), trailing control
1552    /// bytes outside the range of the table are filled with [`Tag::EMPTY`] entries. These will unfortunately
1553    /// trigger a match of [`RawTableInner::find_insert_slot_in_group`] function. This is because
1554    /// the `Some(bit)` returned by `group.match_empty_or_deleted().lowest_set_bit()` after masking
1555    /// (`(probe_seq.pos + bit) & self.bucket_mask`) may point to a full bucket that is already occupied.
1556    /// We detect this situation here and perform a second scan starting at the beginning of the table.
1557    /// This second scan is guaranteed to find an empty slot (due to the load factor) before hitting the
1558    /// trailing control bytes (containing [`Tag::EMPTY`] bytes).
1559    ///
1560    /// If this function is called correctly, it is guaranteed to return [`InsertSlot`] with an
1561    /// index of an empty or deleted bucket in the range `0..self.buckets()` (see `Warning` and
1562    /// `Safety`).
1563    ///
1564    /// # Warning
1565    ///
1566    /// The table must have at least 1 empty or deleted `bucket`, otherwise if the table is less than
1567    /// the group width (`self.buckets() < Group::WIDTH`) this function returns an index outside of the
1568    /// table indices range `0..self.buckets()` (`0..=self.bucket_mask`). Attempt to write data at that
1569    /// index will cause immediate [`undefined behavior`].
1570    ///
1571    /// # Safety
1572    ///
1573    /// The safety rules are directly derived from the safety rules for [`RawTableInner::ctrl`] method.
1574    /// Thus, in order to uphold those safety contracts, as well as for the correct logic of the work
1575    /// of this crate, the following rules are necessary and sufficient:
1576    ///
1577    /// * The [`RawTableInner`] must have properly initialized control bytes otherwise calling this
1578    ///   function results in [`undefined behavior`].
1579    ///
1580    /// * This function must only be used on insertion slots found by [`RawTableInner::find_insert_slot_in_group`]
1581    ///   (after the `find_insert_slot_in_group` function, but before insertion into the table).
1582    ///
1583    /// * The `index` must not be greater than the `self.bucket_mask`, i.e. `(index + 1) <= self.buckets()`
1584    ///   (this one is provided by the [`RawTableInner::find_insert_slot_in_group`] function).
1585    ///
1586    /// Calling this function with an index not provided by [`RawTableInner::find_insert_slot_in_group`]
1587    /// may result in [`undefined behavior`] even if the index satisfies the safety rules of the
1588    /// [`RawTableInner::ctrl`] function (`index < self.bucket_mask + 1 + Group::WIDTH`).
1589    ///
1590    /// [`RawTableInner::ctrl`]: RawTableInner::ctrl
1591    /// [`RawTableInner::find_insert_slot_in_group`]: RawTableInner::find_insert_slot_in_group
1592    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
1593    #[inline]
1594    unsafe fn fix_insert_slot(&self, mut index: usize) -> InsertSlot {
1595        // SAFETY: The caller of this function ensures that `index` is in the range `0..=self.bucket_mask`.
1596        if unlikely(self.is_bucket_full(index)) {
1597            debug_assert!(self.bucket_mask < Group::WIDTH);
1598            // SAFETY:
1599            //
1600            // * Since the caller of this function ensures that the control bytes are properly
1601            //   initialized and `ptr = self.ctrl(0)` points to the start of the array of control
1602            //   bytes, therefore: `ctrl` is valid for reads, properly aligned to `Group::WIDTH`
1603            //   and points to the properly initialized control bytes (see also
1604            //   `TableLayout::calculate_layout_for` and `ptr::read`);
1605            //
1606            // * Because the caller of this function ensures that the index was provided by the
1607            //   `self.find_insert_slot_in_group()` function, so for for tables larger than the
1608            //   group width (self.buckets() >= Group::WIDTH), we will never end up in the given
1609            //   branch, since `(probe_seq.pos + bit) & self.bucket_mask` in `find_insert_slot_in_group`
1610            //   cannot return a full bucket index. For tables smaller than the group width, calling
1611            //   the `unwrap_unchecked` function is also safe, as the trailing control bytes outside
1612            //   the range of the table are filled with EMPTY bytes (and we know for sure that there
1613            //   is at least one FULL bucket), so this second scan either finds an empty slot (due to
1614            //   the load factor) or hits the trailing control bytes (containing EMPTY).
1615            index = Group::load_aligned(self.ctrl(0))
1616                .match_empty_or_deleted()
1617                .lowest_set_bit()
1618                .unwrap_unchecked();
1619        }
1620        InsertSlot { index }
1621    }
1622
1623    /// Finds the position to insert something in a group.
1624    ///
1625    /// **This may have false positives and must be fixed up with `fix_insert_slot`
1626    /// before it's used.**
1627    ///
1628    /// The function is guaranteed to return the index of an empty or deleted [`Bucket`]
1629    /// in the range `0..self.buckets()` (`0..=self.bucket_mask`).
1630    #[inline]
1631    fn find_insert_slot_in_group(&self, group: &Group, probe_seq: &ProbeSeq) -> Option<usize> {
1632        let bit = group.match_empty_or_deleted().lowest_set_bit();
1633
1634        if likely(bit.is_some()) {
1635            // This is the same as `(probe_seq.pos + bit) % self.buckets()` because the number
1636            // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
1637            Some((probe_seq.pos + bit.unwrap()) & self.bucket_mask)
1638        } else {
1639            None
1640        }
1641    }
1642
1643    /// Searches for an element in the table, or a potential slot where that element could
1644    /// be inserted (an empty or deleted [`Bucket`] index).
1645    ///
1646    /// This uses dynamic dispatch to reduce the amount of code generated, but that is
1647    /// eliminated by LLVM optimizations.
1648    ///
1649    /// This function does not make any changes to the `data` part of the table, or any
1650    /// changes to the `items` or `growth_left` field of the table.
1651    ///
1652    /// The table must have at least 1 empty or deleted `bucket`, otherwise, if the
1653    /// `eq: &mut dyn FnMut(usize) -> bool` function does not return `true`, this function
1654    /// will never return (will go into an infinite loop) for tables larger than the group
1655    /// width, or return an index outside of the table indices range if the table is less
1656    /// than the group width.
1657    ///
1658    /// This function is guaranteed to provide the `eq: &mut dyn FnMut(usize) -> bool`
1659    /// function with only `FULL` buckets' indices and return the `index` of the found
1660    /// element (as `Ok(index)`). If the element is not found and there is at least 1
1661    /// empty or deleted [`Bucket`] in the table, the function is guaranteed to return
1662    /// [`InsertSlot`] with an index in the range `0..self.buckets()`, but in any case,
1663    /// if this function returns [`InsertSlot`], it will contain an index in the range
1664    /// `0..=self.buckets()`.
1665    ///
1666    /// # Safety
1667    ///
1668    /// The [`RawTableInner`] must have properly initialized control bytes otherwise calling
1669    /// this function results in [`undefined behavior`].
1670    ///
1671    /// Attempt to write data at the [`InsertSlot`] returned by this function when the table is
1672    /// less than the group width and if there was not at least one empty or deleted bucket in
1673    /// the table will cause immediate [`undefined behavior`]. This is because in this case the
1674    /// function will return `self.bucket_mask + 1` as an index due to the trailing [`Tag::EMPTY`]
1675    /// control bytes outside the table range.
1676    ///
1677    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
1678    #[inline]
1679    unsafe fn find_or_find_insert_slot_inner(
1680        &self,
1681        hash: u64,
1682        eq: &mut dyn FnMut(usize) -> bool,
1683    ) -> Result<usize, InsertSlot> {
1684        let mut insert_slot = None;
1685
1686        let tag_hash = Tag::full(hash);
1687        let mut probe_seq = self.probe_seq(hash);
1688
1689        loop {
1690            // SAFETY:
1691            // * Caller of this function ensures that the control bytes are properly initialized.
1692            //
1693            // * `ProbeSeq.pos` cannot be greater than `self.bucket_mask = self.buckets() - 1`
1694            //   of the table due to masking with `self.bucket_mask` and also because the number
1695            //   of buckets is a power of two (see `self.probe_seq` function).
1696            //
1697            // * Even if `ProbeSeq.pos` returns `position == self.bucket_mask`, it is safe to
1698            //   call `Group::load` due to the extended control bytes range, which is
1699            //  `self.bucket_mask + 1 + Group::WIDTH` (in fact, this means that the last control
1700            //   byte will never be read for the allocated table);
1701            //
1702            // * Also, even if `RawTableInner` is not already allocated, `ProbeSeq.pos` will
1703            //   always return "0" (zero), so Group::load will read unaligned `Group::static_empty()`
1704            //   bytes, which is safe (see RawTableInner::new).
1705            let group = unsafe { Group::load(self.ctrl(probe_seq.pos)) };
1706
1707            for bit in group.match_tag(tag_hash) {
1708                let index = (probe_seq.pos + bit) & self.bucket_mask;
1709
1710                if likely(eq(index)) {
1711                    return Ok(index);
1712                }
1713            }
1714
1715            // We didn't find the element we were looking for in the group, try to get an
1716            // insertion slot from the group if we don't have one yet.
1717            if likely(insert_slot.is_none()) {
1718                insert_slot = self.find_insert_slot_in_group(&group, &probe_seq);
1719            }
1720
1721            if let Some(insert_slot) = insert_slot {
1722                // Only stop the search if the group contains at least one empty element.
1723                // Otherwise, the element that we are looking for might be in a following group.
1724                if likely(group.match_empty().any_bit_set()) {
1725                    // We must have found a insert slot by now, since the current group contains at
1726                    // least one. For tables smaller than the group width, there will still be an
1727                    // empty element in the current (and only) group due to the load factor.
1728                    unsafe {
1729                        // SAFETY:
1730                        // * Caller of this function ensures that the control bytes are properly initialized.
1731                        //
1732                        // * We use this function with the slot / index found by `self.find_insert_slot_in_group`
1733                        return Err(self.fix_insert_slot(insert_slot));
1734                    }
1735                }
1736            }
1737
1738            probe_seq.move_next(self.bucket_mask);
1739        }
1740    }
1741
1742    /// Searches for an empty or deleted bucket which is suitable for inserting a new
1743    /// element and sets the hash for that slot. Returns an index of that slot and the
1744    /// old control byte stored in the found index.
1745    ///
1746    /// This function does not check if the given element exists in the table. Also,
1747    /// this function does not check if there is enough space in the table to insert
1748    /// a new element. The caller of the function must make sure that the table has at
1749    /// least 1 empty or deleted `bucket`, otherwise this function will never return
1750    /// (will go into an infinite loop) for tables larger than the group width, or
1751    /// return an index outside of the table indices range if the table is less than
1752    /// the group width.
1753    ///
1754    /// If there is at least 1 empty or deleted `bucket` in the table, the function is
1755    /// guaranteed to return an `index` in the range `0..self.buckets()`, but in any case,
1756    /// if this function returns an `index` it will be in the range `0..=self.buckets()`.
1757    ///
1758    /// This function does not make any changes to the `data` parts of the table,
1759    /// or any changes to the `items` or `growth_left` field of the table.
1760    ///
1761    /// # Safety
1762    ///
1763    /// The safety rules are directly derived from the safety rules for the
1764    /// [`RawTableInner::set_ctrl_hash`] and [`RawTableInner::find_insert_slot`] methods.
1765    /// Thus, in order to uphold the safety contracts for that methods, as well as for
1766    /// the correct logic of the work of this crate, you must observe the following rules
1767    /// when calling this function:
1768    ///
1769    /// * The [`RawTableInner`] has already been allocated and has properly initialized
1770    ///   control bytes otherwise calling this function results in [`undefined behavior`].
1771    ///
1772    /// * The caller of this function must ensure that the "data" parts of the table
1773    ///   will have an entry in the returned index (matching the given hash) right
1774    ///   after calling this function.
1775    ///
1776    /// Attempt to write data at the `index` returned by this function when the table is
1777    /// less than the group width and if there was not at least one empty or deleted bucket in
1778    /// the table will cause immediate [`undefined behavior`]. This is because in this case the
1779    /// function will return `self.bucket_mask + 1` as an index due to the trailing [`Tag::EMPTY`]
1780    /// control bytes outside the table range.
1781    ///
1782    /// The caller must independently increase the `items` field of the table, and also,
1783    /// if the old control byte was [`Tag::EMPTY`], then decrease the table's `growth_left`
1784    /// field, and do not change it if the old control byte was [`Tag::DELETED`].
1785    ///
1786    /// See also [`Bucket::as_ptr`] method, for more information about of properly removing
1787    /// or saving `element` from / into the [`RawTable`] / [`RawTableInner`].
1788    ///
1789    /// [`Bucket::as_ptr`]: Bucket::as_ptr
1790    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
1791    /// [`RawTableInner::ctrl`]: RawTableInner::ctrl
1792    /// [`RawTableInner::set_ctrl_hash`]: RawTableInner::set_ctrl_hash
1793    /// [`RawTableInner::find_insert_slot`]: RawTableInner::find_insert_slot
1794    #[inline]
1795    unsafe fn prepare_insert_slot(&mut self, hash: u64) -> (usize, Tag) {
1796        // SAFETY: Caller of this function ensures that the control bytes are properly initialized.
1797        let index: usize = self.find_insert_slot(hash).index;
1798        // SAFETY:
1799        // 1. The `find_insert_slot` function either returns an `index` less than or
1800        //    equal to `self.buckets() = self.bucket_mask + 1` of the table, or never
1801        //    returns if it cannot find an empty or deleted slot.
1802        // 2. The caller of this function guarantees that the table has already been
1803        //    allocated
1804        let old_ctrl = *self.ctrl(index);
1805        self.set_ctrl_hash(index, hash);
1806        (index, old_ctrl)
1807    }
1808
1809    /// Searches for an empty or deleted bucket which is suitable for inserting
1810    /// a new element, returning the `index` for the new [`Bucket`].
1811    ///
1812    /// This function does not make any changes to the `data` part of the table, or any
1813    /// changes to the `items` or `growth_left` field of the table.
1814    ///
1815    /// The table must have at least 1 empty or deleted `bucket`, otherwise this function
1816    /// will never return (will go into an infinite loop) for tables larger than the group
1817    /// width, or return an index outside of the table indices range if the table is less
1818    /// than the group width.
1819    ///
1820    /// If there is at least 1 empty or deleted `bucket` in the table, the function is
1821    /// guaranteed to return [`InsertSlot`] with an index in the range `0..self.buckets()`,
1822    /// but in any case, if this function returns [`InsertSlot`], it will contain an index
1823    /// in the range `0..=self.buckets()`.
1824    ///
1825    /// # Safety
1826    ///
1827    /// The [`RawTableInner`] must have properly initialized control bytes otherwise calling
1828    /// this function results in [`undefined behavior`].
1829    ///
1830    /// Attempt to write data at the [`InsertSlot`] returned by this function when the table is
1831    /// less than the group width and if there was not at least one empty or deleted bucket in
1832    /// the table will cause immediate [`undefined behavior`]. This is because in this case the
1833    /// function will return `self.bucket_mask + 1` as an index due to the trailing [`Tag::EMPTY`]
1834    /// control bytes outside the table range.
1835    ///
1836    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
1837    #[inline]
1838    unsafe fn find_insert_slot(&self, hash: u64) -> InsertSlot {
1839        let mut probe_seq = self.probe_seq(hash);
1840        loop {
1841            // SAFETY:
1842            // * Caller of this function ensures that the control bytes are properly initialized.
1843            //
1844            // * `ProbeSeq.pos` cannot be greater than `self.bucket_mask = self.buckets() - 1`
1845            //   of the table due to masking with `self.bucket_mask` and also because the number
1846            //   of buckets is a power of two (see `self.probe_seq` function).
1847            //
1848            // * Even if `ProbeSeq.pos` returns `position == self.bucket_mask`, it is safe to
1849            //   call `Group::load` due to the extended control bytes range, which is
1850            //  `self.bucket_mask + 1 + Group::WIDTH` (in fact, this means that the last control
1851            //   byte will never be read for the allocated table);
1852            //
1853            // * Also, even if `RawTableInner` is not already allocated, `ProbeSeq.pos` will
1854            //   always return "0" (zero), so Group::load will read unaligned `Group::static_empty()`
1855            //   bytes, which is safe (see RawTableInner::new).
1856            let group = unsafe { Group::load(self.ctrl(probe_seq.pos)) };
1857
1858            let index = self.find_insert_slot_in_group(&group, &probe_seq);
1859            if likely(index.is_some()) {
1860                // SAFETY:
1861                // * Caller of this function ensures that the control bytes are properly initialized.
1862                //
1863                // * We use this function with the slot / index found by `self.find_insert_slot_in_group`
1864                unsafe {
1865                    return self.fix_insert_slot(index.unwrap_unchecked());
1866                }
1867            }
1868            probe_seq.move_next(self.bucket_mask);
1869        }
1870    }
1871
1872    /// Searches for an element in a table, returning the `index` of the found element.
1873    /// This uses dynamic dispatch to reduce the amount of code generated, but it is
1874    /// eliminated by LLVM optimizations.
1875    ///
1876    /// This function does not make any changes to the `data` part of the table, or any
1877    /// changes to the `items` or `growth_left` field of the table.
1878    ///
1879    /// The table must have at least 1 empty `bucket`, otherwise, if the
1880    /// `eq: &mut dyn FnMut(usize) -> bool` function does not return `true`,
1881    /// this function will also never return (will go into an infinite loop).
1882    ///
1883    /// This function is guaranteed to provide the `eq: &mut dyn FnMut(usize) -> bool`
1884    /// function with only `FULL` buckets' indices and return the `index` of the found
1885    /// element as `Some(index)`, so the index will always be in the range
1886    /// `0..self.buckets()`.
1887    ///
1888    /// # Safety
1889    ///
1890    /// The [`RawTableInner`] must have properly initialized control bytes otherwise calling
1891    /// this function results in [`undefined behavior`].
1892    ///
1893    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
1894    #[inline(always)]
1895    unsafe fn find_inner(&self, hash: u64, eq: &mut dyn FnMut(usize) -> bool) -> Option<usize> {
1896        let tag_hash = Tag::full(hash);
1897        let mut probe_seq = self.probe_seq(hash);
1898
1899        loop {
1900            // SAFETY:
1901            // * Caller of this function ensures that the control bytes are properly initialized.
1902            //
1903            // * `ProbeSeq.pos` cannot be greater than `self.bucket_mask = self.buckets() - 1`
1904            //   of the table due to masking with `self.bucket_mask`.
1905            //
1906            // * Even if `ProbeSeq.pos` returns `position == self.bucket_mask`, it is safe to
1907            //   call `Group::load` due to the extended control bytes range, which is
1908            //  `self.bucket_mask + 1 + Group::WIDTH` (in fact, this means that the last control
1909            //   byte will never be read for the allocated table);
1910            //
1911            // * Also, even if `RawTableInner` is not already allocated, `ProbeSeq.pos` will
1912            //   always return "0" (zero), so Group::load will read unaligned `Group::static_empty()`
1913            //   bytes, which is safe (see RawTableInner::new_in).
1914            let group = unsafe { Group::load(self.ctrl(probe_seq.pos)) };
1915
1916            for bit in group.match_tag(tag_hash) {
1917                // This is the same as `(probe_seq.pos + bit) % self.buckets()` because the number
1918                // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
1919                let index = (probe_seq.pos + bit) & self.bucket_mask;
1920
1921                if likely(eq(index)) {
1922                    return Some(index);
1923                }
1924            }
1925
1926            if likely(group.match_empty().any_bit_set()) {
1927                return None;
1928            }
1929
1930            probe_seq.move_next(self.bucket_mask);
1931        }
1932    }
1933
1934    /// Prepares for rehashing data in place (that is, without allocating new memory).
1935    /// Converts all full index `control bytes` to `Tag::DELETED` and all `Tag::DELETED` control
1936    /// bytes to `Tag::EMPTY`, i.e. performs the following conversion:
1937    ///
1938    /// - `Tag::EMPTY` control bytes   -> `Tag::EMPTY`;
1939    /// - `Tag::DELETED` control bytes -> `Tag::EMPTY`;
1940    /// - `FULL` control bytes    -> `Tag::DELETED`.
1941    ///
1942    /// This function does not make any changes to the `data` parts of the table,
1943    /// or any changes to the `items` or `growth_left` field of the table.
1944    ///
1945    /// # Safety
1946    ///
1947    /// You must observe the following safety rules when calling this function:
1948    ///
1949    /// * The [`RawTableInner`] has already been allocated;
1950    ///
1951    /// * The caller of this function must convert the `Tag::DELETED` bytes back to `FULL`
1952    ///   bytes when re-inserting them into their ideal position (which was impossible
1953    ///   to do during the first insert due to tombstones). If the caller does not do
1954    ///   this, then calling this function may result in a memory leak.
1955    ///
1956    /// * The [`RawTableInner`] must have properly initialized control bytes otherwise
1957    ///   calling this function results in [`undefined behavior`].
1958    ///
1959    /// Calling this function on a table that has not been allocated results in
1960    /// [`undefined behavior`].
1961    ///
1962    /// See also [`Bucket::as_ptr`] method, for more information about of properly removing
1963    /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`].
1964    ///
1965    /// [`Bucket::as_ptr`]: Bucket::as_ptr
1966    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
1967    #[allow(clippy::mut_mut)]
1968    #[inline]
1969    unsafe fn prepare_rehash_in_place(&mut self) {
1970        // Bulk convert all full control bytes to DELETED, and all DELETED control bytes to EMPTY.
1971        // This effectively frees up all buckets containing a DELETED entry.
1972        //
1973        // SAFETY:
1974        // 1. `i` is guaranteed to be within bounds since we are iterating from zero to `buckets - 1`;
1975        // 2. Even if `i` will be `i == self.bucket_mask`, it is safe to call `Group::load_aligned`
1976        //    due to the extended control bytes range, which is `self.bucket_mask + 1 + Group::WIDTH`;
1977        // 3. The caller of this function guarantees that [`RawTableInner`] has already been allocated;
1978        // 4. We can use `Group::load_aligned` and `Group::store_aligned` here since we start from 0
1979        //    and go to the end with a step equal to `Group::WIDTH` (see TableLayout::calculate_layout_for).
1980        for i in (0..self.buckets()).step_by(Group::WIDTH) {
1981            let group = Group::load_aligned(self.ctrl(i));
1982            let group = group.convert_special_to_empty_and_full_to_deleted();
1983            group.store_aligned(self.ctrl(i));
1984        }
1985
1986        // Fix up the trailing control bytes. See the comments in set_ctrl
1987        // for the handling of tables smaller than the group width.
1988        //
1989        // SAFETY: The caller of this function guarantees that [`RawTableInner`]
1990        // has already been allocated
1991        if unlikely(self.buckets() < Group::WIDTH) {
1992            // SAFETY: We have `self.bucket_mask + 1 + Group::WIDTH` number of control bytes,
1993            // so copying `self.buckets() == self.bucket_mask + 1` bytes with offset equal to
1994            // `Group::WIDTH` is safe
1995            self.ctrl(0)
1996                .copy_to(self.ctrl(Group::WIDTH), self.buckets());
1997        } else {
1998            // SAFETY: We have `self.bucket_mask + 1 + Group::WIDTH` number of
1999            // control bytes,so copying `Group::WIDTH` bytes with offset equal
2000            // to `self.buckets() == self.bucket_mask + 1` is safe
2001            self.ctrl(0)
2002                .copy_to(self.ctrl(self.buckets()), Group::WIDTH);
2003        }
2004    }
2005
2006    /// Returns an iterator over every element in the table.
2007    ///
2008    /// # Safety
2009    ///
2010    /// If any of the following conditions are violated, the result
2011    /// is [`undefined behavior`]:
2012    ///
2013    /// * The caller has to ensure that the `RawTableInner` outlives the
2014    ///   `RawIter`. Because we cannot make the `next` method unsafe on
2015    ///   the `RawIter` struct, we have to make the `iter` method unsafe.
2016    ///
2017    /// * The [`RawTableInner`] must have properly initialized control bytes.
2018    ///
2019    /// The type `T` must be the actual type of the elements stored in the table,
2020    /// otherwise using the returned [`RawIter`] results in [`undefined behavior`].
2021    ///
2022    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2023    #[inline]
2024    unsafe fn iter<T>(&self) -> RawIter<T> {
2025        // SAFETY:
2026        // 1. Since the caller of this function ensures that the control bytes
2027        //    are properly initialized and `self.data_end()` points to the start
2028        //    of the array of control bytes, therefore: `ctrl` is valid for reads,
2029        //    properly aligned to `Group::WIDTH` and points to the properly initialized
2030        //    control bytes.
2031        // 2. `data` bucket index in the table is equal to the `ctrl` index (i.e.
2032        //    equal to zero).
2033        // 3. We pass the exact value of buckets of the table to the function.
2034        //
2035        //                         `ctrl` points here (to the start
2036        //                         of the first control byte `CT0`)
2037        //                          ∨
2038        // [Pad], T_n, ..., T1, T0, |CT0, CT1, ..., CT_n|, CTa_0, CTa_1, ..., CTa_m
2039        //                           \________  ________/
2040        //                                    \/
2041        //       `n = buckets - 1`, i.e. `RawTableInner::buckets() - 1`
2042        //
2043        // where: T0...T_n  - our stored data;
2044        //        CT0...CT_n - control bytes or metadata for `data`.
2045        //        CTa_0...CTa_m - additional control bytes, where `m = Group::WIDTH - 1` (so that the search
2046        //                        with loading `Group` bytes from the heap works properly, even if the result
2047        //                        of `h1(hash) & self.bucket_mask` is equal to `self.bucket_mask`). See also
2048        //                        `RawTableInner::set_ctrl` function.
2049        //
2050        // P.S. `h1(hash) & self.bucket_mask` is the same as `hash as usize % self.buckets()` because the number
2051        // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
2052        let data = Bucket::from_base_index(self.data_end(), 0);
2053        RawIter {
2054            // SAFETY: See explanation above
2055            iter: RawIterRange::new(self.ctrl.as_ptr(), data, self.buckets()),
2056            items: self.items,
2057        }
2058    }
2059
2060    /// Executes the destructors (if any) of the values stored in the table.
2061    ///
2062    /// # Note
2063    ///
2064    /// This function does not erase the control bytes of the table and does
2065    /// not make any changes to the `items` or `growth_left` fields of the
2066    /// table. If necessary, the caller of this function must manually set
2067    /// up these table fields, for example using the [`clear_no_drop`] function.
2068    ///
2069    /// Be careful during calling this function, because drop function of
2070    /// the elements can panic, and this can leave table in an inconsistent
2071    /// state.
2072    ///
2073    /// # Safety
2074    ///
2075    /// The type `T` must be the actual type of the elements stored in the table,
2076    /// otherwise calling this function may result in [`undefined behavior`].
2077    ///
2078    /// If `T` is a type that should be dropped and **the table is not empty**,
2079    /// calling this function more than once results in [`undefined behavior`].
2080    ///
2081    /// If `T` is not [`Copy`], attempting to use values stored in the table after
2082    /// calling this function may result in [`undefined behavior`].
2083    ///
2084    /// It is safe to call this function on a table that has not been allocated,
2085    /// on a table with uninitialized control bytes, and on a table with no actual
2086    /// data but with `Full` control bytes if `self.items == 0`.
2087    ///
2088    /// See also [`Bucket::drop`] / [`Bucket::as_ptr`] methods, for more information
2089    /// about of properly removing or saving `element` from / into the [`RawTable`] /
2090    /// [`RawTableInner`].
2091    ///
2092    /// [`Bucket::drop`]: Bucket::drop
2093    /// [`Bucket::as_ptr`]: Bucket::as_ptr
2094    /// [`clear_no_drop`]: RawTableInner::clear_no_drop
2095    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2096    unsafe fn drop_elements<T>(&mut self) {
2097        // Check that `self.items != 0`. Protects against the possibility
2098        // of creating an iterator on an table with uninitialized control bytes.
2099        if T::NEEDS_DROP && self.items != 0 {
2100            // SAFETY: We know for sure that RawTableInner will outlive the
2101            // returned `RawIter` iterator, and the caller of this function
2102            // must uphold the safety contract for `drop_elements` method.
2103            for item in self.iter::<T>() {
2104                // SAFETY: The caller must uphold the safety contract for
2105                // `drop_elements` method.
2106                item.drop();
2107            }
2108        }
2109    }
2110
2111    /// Executes the destructors (if any) of the values stored in the table and than
2112    /// deallocates the table.
2113    ///
2114    /// # Note
2115    ///
2116    /// Calling this function automatically makes invalid (dangling) all instances of
2117    /// buckets ([`Bucket`]) and makes invalid (dangling) the `ctrl` field of the table.
2118    ///
2119    /// This function does not make any changes to the `bucket_mask`, `items` or `growth_left`
2120    /// fields of the table. If necessary, the caller of this function must manually set
2121    /// up these table fields.
2122    ///
2123    /// # Safety
2124    ///
2125    /// If any of the following conditions are violated, the result is [`undefined behavior`]:
2126    ///
2127    /// * Calling this function more than once;
2128    ///
2129    /// * The type `T` must be the actual type of the elements stored in the table.
2130    ///
2131    /// * The `alloc` must be the same [`Allocator`] as the `Allocator` that was used
2132    ///   to allocate this table.
2133    ///
2134    /// * The `table_layout` must be the same [`TableLayout`] as the `TableLayout` that
2135    ///   was used to allocate this table.
2136    ///
2137    /// The caller of this function should pay attention to the possibility of the
2138    /// elements' drop function panicking, because this:
2139    ///
2140    ///    * May leave the table in an inconsistent state;
2141    ///
2142    ///    * Memory is never deallocated, so a memory leak may occur.
2143    ///
2144    /// Attempt to use the `ctrl` field of the table (dereference) after calling this
2145    /// function results in [`undefined behavior`].
2146    ///
2147    /// It is safe to call this function on a table that has not been allocated,
2148    /// on a table with uninitialized control bytes, and on a table with no actual
2149    /// data but with `Full` control bytes if `self.items == 0`.
2150    ///
2151    /// See also [`RawTableInner::drop_elements`] or [`RawTableInner::free_buckets`]
2152    /// for more  information.
2153    ///
2154    /// [`RawTableInner::drop_elements`]: RawTableInner::drop_elements
2155    /// [`RawTableInner::free_buckets`]: RawTableInner::free_buckets
2156    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2157    unsafe fn drop_inner_table<T, A: Allocator>(&mut self, alloc: &A, table_layout: TableLayout) {
2158        if !self.is_empty_singleton() {
2159            unsafe {
2160                // SAFETY: The caller must uphold the safety contract for `drop_inner_table` method.
2161                self.drop_elements::<T>();
2162                // SAFETY:
2163                // 1. We have checked that our table is allocated.
2164                // 2. The caller must uphold the safety contract for `drop_inner_table` method.
2165                self.free_buckets(alloc, table_layout);
2166            }
2167        }
2168    }
2169
2170    /// Returns a pointer to an element in the table (convenience for
2171    /// `Bucket::from_base_index(self.data_end::<T>(), index)`).
2172    ///
2173    /// The caller must ensure that the `RawTableInner` outlives the returned [`Bucket<T>`],
2174    /// otherwise using it may result in [`undefined behavior`].
2175    ///
2176    /// # Safety
2177    ///
2178    /// If `mem::size_of::<T>() != 0`, then the safety rules are directly derived from the
2179    /// safety rules of the [`Bucket::from_base_index`] function. Therefore, when calling
2180    /// this function, the following safety rules must be observed:
2181    ///
2182    /// * The table must already be allocated;
2183    ///
2184    /// * The `index` must not be greater than the number returned by the [`RawTableInner::buckets`]
2185    ///   function, i.e. `(index + 1) <= self.buckets()`.
2186    ///
2187    /// * The type `T` must be the actual type of the elements stored in the table, otherwise
2188    ///   using the returned [`Bucket`] may result in [`undefined behavior`].
2189    ///
2190    /// It is safe to call this function with index of zero (`index == 0`) on a table that has
2191    /// not been allocated, but using the returned [`Bucket`] results in [`undefined behavior`].
2192    ///
2193    /// If `mem::size_of::<T>() == 0`, then the only requirement is that the `index` must
2194    /// not be greater than the number returned by the [`RawTable::buckets`] function, i.e.
2195    /// `(index + 1) <= self.buckets()`.
2196    ///
2197    /// ```none
2198    /// If mem::size_of::<T>() != 0 then return a pointer to the `element` in the `data part` of the table
2199    /// (we start counting from "0", so that in the expression T[n], the "n" index actually one less than
2200    /// the "buckets" number of our `RawTableInner`, i.e. "n = RawTableInner::buckets() - 1"):
2201    ///
2202    ///           `table.bucket(3).as_ptr()` returns a pointer that points here in the `data`
2203    ///           part of the `RawTableInner`, i.e. to the start of T3 (see [`Bucket::as_ptr`])
2204    ///                  |
2205    ///                  |               `base = table.data_end::<T>()` points here
2206    ///                  |               (to the start of CT0 or to the end of T0)
2207    ///                  v                 v
2208    /// [Pad], T_n, ..., |T3|, T2, T1, T0, |CT0, CT1, CT2, CT3, ..., CT_n, CTa_0, CTa_1, ..., CTa_m
2209    ///                     ^                                              \__________  __________/
2210    ///        `table.bucket(3)` returns a pointer that points                        \/
2211    ///         here in the `data` part of the `RawTableInner`             additional control bytes
2212    ///         (to the end of T3)                                          `m = Group::WIDTH - 1`
2213    ///
2214    /// where: T0...T_n  - our stored data;
2215    ///        CT0...CT_n - control bytes or metadata for `data`;
2216    ///        CTa_0...CTa_m - additional control bytes (so that the search with loading `Group` bytes from
2217    ///                        the heap works properly, even if the result of `h1(hash) & self.bucket_mask`
2218    ///                        is equal to `self.bucket_mask`). See also `RawTableInner::set_ctrl` function.
2219    ///
2220    /// P.S. `h1(hash) & self.bucket_mask` is the same as `hash as usize % self.buckets()` because the number
2221    /// of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
2222    /// ```
2223    ///
2224    /// [`Bucket::from_base_index`]: Bucket::from_base_index
2225    /// [`RawTableInner::buckets`]: RawTableInner::buckets
2226    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2227    #[inline]
2228    unsafe fn bucket<T>(&self, index: usize) -> Bucket<T> {
2229        debug_assert_ne!(self.bucket_mask, 0);
2230        debug_assert!(index < self.buckets());
2231        Bucket::from_base_index(self.data_end(), index)
2232    }
2233
2234    /// Returns a raw `*mut u8` pointer to the start of the `data` element in the table
2235    /// (convenience for `self.data_end::<u8>().as_ptr().sub((index + 1) * size_of)`).
2236    ///
2237    /// The caller must ensure that the `RawTableInner` outlives the returned `*mut u8`,
2238    /// otherwise using it may result in [`undefined behavior`].
2239    ///
2240    /// # Safety
2241    ///
2242    /// If any of the following conditions are violated, the result is [`undefined behavior`]:
2243    ///
2244    /// * The table must already be allocated;
2245    ///
2246    /// * The `index` must not be greater than the number returned by the [`RawTableInner::buckets`]
2247    ///   function, i.e. `(index + 1) <= self.buckets()`;
2248    ///
2249    /// * The `size_of` must be equal to the size of the elements stored in the table;
2250    ///
2251    /// ```none
2252    /// If mem::size_of::<T>() != 0 then return a pointer to the `element` in the `data part` of the table
2253    /// (we start counting from "0", so that in the expression T[n], the "n" index actually one less than
2254    /// the "buckets" number of our `RawTableInner`, i.e. "n = RawTableInner::buckets() - 1"):
2255    ///
2256    ///           `table.bucket_ptr(3, mem::size_of::<T>())` returns a pointer that points here in the
2257    ///           `data` part of the `RawTableInner`, i.e. to the start of T3
2258    ///                  |
2259    ///                  |               `base = table.data_end::<u8>()` points here
2260    ///                  |               (to the start of CT0 or to the end of T0)
2261    ///                  v                 v
2262    /// [Pad], T_n, ..., |T3|, T2, T1, T0, |CT0, CT1, CT2, CT3, ..., CT_n, CTa_0, CTa_1, ..., CTa_m
2263    ///                                                                    \__________  __________/
2264    ///                                                                               \/
2265    ///                                                                    additional control bytes
2266    ///                                                                     `m = Group::WIDTH - 1`
2267    ///
2268    /// where: T0...T_n  - our stored data;
2269    ///        CT0...CT_n - control bytes or metadata for `data`;
2270    ///        CTa_0...CTa_m - additional control bytes (so that the search with loading `Group` bytes from
2271    ///                        the heap works properly, even if the result of `h1(hash) & self.bucket_mask`
2272    ///                        is equal to `self.bucket_mask`). See also `RawTableInner::set_ctrl` function.
2273    ///
2274    /// P.S. `h1(hash) & self.bucket_mask` is the same as `hash as usize % self.buckets()` because the number
2275    /// of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
2276    /// ```
2277    ///
2278    /// [`RawTableInner::buckets`]: RawTableInner::buckets
2279    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2280    #[inline]
2281    unsafe fn bucket_ptr(&self, index: usize, size_of: usize) -> *mut u8 {
2282        debug_assert_ne!(self.bucket_mask, 0);
2283        debug_assert!(index < self.buckets());
2284        let base: *mut u8 = self.data_end().as_ptr();
2285        base.sub((index + 1) * size_of)
2286    }
2287
2288    /// Returns pointer to one past last `data` element in the table as viewed from
2289    /// the start point of the allocation (convenience for `self.ctrl.cast()`).
2290    ///
2291    /// This function actually returns a pointer to the end of the `data element` at
2292    /// index "0" (zero).
2293    ///
2294    /// The caller must ensure that the `RawTableInner` outlives the returned [`NonNull<T>`],
2295    /// otherwise using it may result in [`undefined behavior`].
2296    ///
2297    /// # Note
2298    ///
2299    /// The type `T` must be the actual type of the elements stored in the table, otherwise
2300    /// using the returned [`NonNull<T>`] may result in [`undefined behavior`].
2301    ///
2302    /// ```none
2303    ///                        `table.data_end::<T>()` returns pointer that points here
2304    ///                        (to the end of `T0`)
2305    ///                          ∨
2306    /// [Pad], T_n, ..., T1, T0, |CT0, CT1, ..., CT_n|, CTa_0, CTa_1, ..., CTa_m
2307    ///                           \________  ________/
2308    ///                                    \/
2309    ///       `n = buckets - 1`, i.e. `RawTableInner::buckets() - 1`
2310    ///
2311    /// where: T0...T_n  - our stored data;
2312    ///        CT0...CT_n - control bytes or metadata for `data`.
2313    ///        CTa_0...CTa_m - additional control bytes, where `m = Group::WIDTH - 1` (so that the search
2314    ///                        with loading `Group` bytes from the heap works properly, even if the result
2315    ///                        of `h1(hash) & self.bucket_mask` is equal to `self.bucket_mask`). See also
2316    ///                        `RawTableInner::set_ctrl` function.
2317    ///
2318    /// P.S. `h1(hash) & self.bucket_mask` is the same as `hash as usize % self.buckets()` because the number
2319    /// of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
2320    /// ```
2321    ///
2322    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2323    #[inline]
2324    fn data_end<T>(&self) -> NonNull<T> {
2325        self.ctrl.cast()
2326    }
2327
2328    /// Returns an iterator-like object for a probe sequence on the table.
2329    ///
2330    /// This iterator never terminates, but is guaranteed to visit each bucket
2331    /// group exactly once. The loop using `probe_seq` must terminate upon
2332    /// reaching a group containing an empty bucket.
2333    #[inline]
2334    fn probe_seq(&self, hash: u64) -> ProbeSeq {
2335        ProbeSeq {
2336            // This is the same as `hash as usize % self.buckets()` because the number
2337            // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
2338            pos: h1(hash) & self.bucket_mask,
2339            stride: 0,
2340        }
2341    }
2342
2343    #[inline]
2344    unsafe fn record_item_insert_at(&mut self, index: usize, old_ctrl: Tag, hash: u64) {
2345        self.growth_left -= usize::from(old_ctrl.special_is_empty());
2346        self.set_ctrl_hash(index, hash);
2347        self.items += 1;
2348    }
2349
2350    #[inline]
2351    fn is_in_same_group(&self, i: usize, new_i: usize, hash: u64) -> bool {
2352        let probe_seq_pos = self.probe_seq(hash).pos;
2353        let probe_index =
2354            |pos: usize| (pos.wrapping_sub(probe_seq_pos) & self.bucket_mask) / Group::WIDTH;
2355        probe_index(i) == probe_index(new_i)
2356    }
2357
2358    /// Sets a control byte to the hash, and possibly also the replicated control byte at
2359    /// the end of the array.
2360    ///
2361    /// This function does not make any changes to the `data` parts of the table,
2362    /// or any changes to the `items` or `growth_left` field of the table.
2363    ///
2364    /// # Safety
2365    ///
2366    /// The safety rules are directly derived from the safety rules for [`RawTableInner::set_ctrl`]
2367    /// method. Thus, in order to uphold the safety contracts for the method, you must observe the
2368    /// following rules when calling this function:
2369    ///
2370    /// * The [`RawTableInner`] has already been allocated;
2371    ///
2372    /// * The `index` must not be greater than the `RawTableInner.bucket_mask`, i.e.
2373    ///   `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)` must
2374    ///   be no greater than the number returned by the function [`RawTableInner::buckets`].
2375    ///
2376    /// Calling this function on a table that has not been allocated results in [`undefined behavior`].
2377    ///
2378    /// See also [`Bucket::as_ptr`] method, for more information about of properly removing
2379    /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`].
2380    ///
2381    /// [`RawTableInner::set_ctrl`]: RawTableInner::set_ctrl
2382    /// [`RawTableInner::buckets`]: RawTableInner::buckets
2383    /// [`Bucket::as_ptr`]: Bucket::as_ptr
2384    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2385    #[inline]
2386    unsafe fn set_ctrl_hash(&mut self, index: usize, hash: u64) {
2387        // SAFETY: The caller must uphold the safety rules for the [`RawTableInner::set_ctrl_hash`]
2388        self.set_ctrl(index, Tag::full(hash));
2389    }
2390
2391    /// Replaces the hash in the control byte at the given index with the provided one,
2392    /// and possibly also replicates the new control byte at the end of the array of control
2393    /// bytes, returning the old control byte.
2394    ///
2395    /// This function does not make any changes to the `data` parts of the table,
2396    /// or any changes to the `items` or `growth_left` field of the table.
2397    ///
2398    /// # Safety
2399    ///
2400    /// The safety rules are directly derived from the safety rules for [`RawTableInner::set_ctrl_hash`]
2401    /// and [`RawTableInner::ctrl`] methods. Thus, in order to uphold the safety contracts for both
2402    /// methods, you must observe the following rules when calling this function:
2403    ///
2404    /// * The [`RawTableInner`] has already been allocated;
2405    ///
2406    /// * The `index` must not be greater than the `RawTableInner.bucket_mask`, i.e.
2407    ///   `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)` must
2408    ///   be no greater than the number returned by the function [`RawTableInner::buckets`].
2409    ///
2410    /// Calling this function on a table that has not been allocated results in [`undefined behavior`].
2411    ///
2412    /// See also [`Bucket::as_ptr`] method, for more information about of properly removing
2413    /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`].
2414    ///
2415    /// [`RawTableInner::set_ctrl_hash`]: RawTableInner::set_ctrl_hash
2416    /// [`RawTableInner::buckets`]: RawTableInner::buckets
2417    /// [`Bucket::as_ptr`]: Bucket::as_ptr
2418    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2419    #[inline]
2420    unsafe fn replace_ctrl_hash(&mut self, index: usize, hash: u64) -> Tag {
2421        // SAFETY: The caller must uphold the safety rules for the [`RawTableInner::replace_ctrl_hash`]
2422        let prev_ctrl = *self.ctrl(index);
2423        self.set_ctrl_hash(index, hash);
2424        prev_ctrl
2425    }
2426
2427    /// Sets a control byte, and possibly also the replicated control byte at
2428    /// the end of the array.
2429    ///
2430    /// This function does not make any changes to the `data` parts of the table,
2431    /// or any changes to the `items` or `growth_left` field of the table.
2432    ///
2433    /// # Safety
2434    ///
2435    /// You must observe the following safety rules when calling this function:
2436    ///
2437    /// * The [`RawTableInner`] has already been allocated;
2438    ///
2439    /// * The `index` must not be greater than the `RawTableInner.bucket_mask`, i.e.
2440    ///   `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)` must
2441    ///   be no greater than the number returned by the function [`RawTableInner::buckets`].
2442    ///
2443    /// Calling this function on a table that has not been allocated results in [`undefined behavior`].
2444    ///
2445    /// See also [`Bucket::as_ptr`] method, for more information about of properly removing
2446    /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`].
2447    ///
2448    /// [`RawTableInner::buckets`]: RawTableInner::buckets
2449    /// [`Bucket::as_ptr`]: Bucket::as_ptr
2450    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2451    #[inline]
2452    unsafe fn set_ctrl(&mut self, index: usize, ctrl: Tag) {
2453        // Replicate the first Group::WIDTH control bytes at the end of
2454        // the array without using a branch. If the tables smaller than
2455        // the group width (self.buckets() < Group::WIDTH),
2456        // `index2 = Group::WIDTH + index`, otherwise `index2` is:
2457        //
2458        // - If index >= Group::WIDTH then index == index2.
2459        // - Otherwise index2 == self.bucket_mask + 1 + index.
2460        //
2461        // The very last replicated control byte is never actually read because
2462        // we mask the initial index for unaligned loads, but we write it
2463        // anyways because it makes the set_ctrl implementation simpler.
2464        //
2465        // If there are fewer buckets than Group::WIDTH then this code will
2466        // replicate the buckets at the end of the trailing group. For example
2467        // with 2 buckets and a group size of 4, the control bytes will look
2468        // like this:
2469        //
2470        //     Real    |             Replicated
2471        // ---------------------------------------------
2472        // | [A] | [B] | [Tag::EMPTY] | [EMPTY] | [A] | [B] |
2473        // ---------------------------------------------
2474
2475        // This is the same as `(index.wrapping_sub(Group::WIDTH)) % self.buckets() + Group::WIDTH`
2476        // because the number of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
2477        let index2 = ((index.wrapping_sub(Group::WIDTH)) & self.bucket_mask) + Group::WIDTH;
2478
2479        // SAFETY: The caller must uphold the safety rules for the [`RawTableInner::set_ctrl`]
2480        *self.ctrl(index) = ctrl;
2481        *self.ctrl(index2) = ctrl;
2482    }
2483
2484    /// Returns a pointer to a control byte.
2485    ///
2486    /// # Safety
2487    ///
2488    /// For the allocated [`RawTableInner`], the result is [`Undefined Behavior`],
2489    /// if the `index` is greater than the `self.bucket_mask + 1 + Group::WIDTH`.
2490    /// In that case, calling this function with `index == self.bucket_mask + 1 + Group::WIDTH`
2491    /// will return a pointer to the end of the allocated table and it is useless on its own.
2492    ///
2493    /// Calling this function with `index >= self.bucket_mask + 1 + Group::WIDTH` on a
2494    /// table that has not been allocated results in [`Undefined Behavior`].
2495    ///
2496    /// So to satisfy both requirements you should always follow the rule that
2497    /// `index < self.bucket_mask + 1 + Group::WIDTH`
2498    ///
2499    /// Calling this function on [`RawTableInner`] that are not already allocated is safe
2500    /// for read-only purpose.
2501    ///
2502    /// See also [`Bucket::as_ptr()`] method, for more information about of properly removing
2503    /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`].
2504    ///
2505    /// [`Bucket::as_ptr()`]: Bucket::as_ptr()
2506    /// [`Undefined Behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2507    #[inline]
2508    unsafe fn ctrl(&self, index: usize) -> *mut Tag {
2509        debug_assert!(index < self.num_ctrl_bytes());
2510        // SAFETY: The caller must uphold the safety rules for the [`RawTableInner::ctrl`]
2511        self.ctrl.as_ptr().add(index).cast()
2512    }
2513
2514    /// Gets the slice of all control bytes.
2515    fn ctrl_slice(&mut self) -> &mut [Tag] {
2516        // SAFETY: We've intiailized all control bytes, and have the correct number.
2517        unsafe { slice::from_raw_parts_mut(self.ctrl.as_ptr().cast(), self.num_ctrl_bytes()) }
2518    }
2519
2520    #[inline]
2521    fn buckets(&self) -> usize {
2522        self.bucket_mask + 1
2523    }
2524
2525    /// Checks whether the bucket at `index` is full.
2526    ///
2527    /// # Safety
2528    ///
2529    /// The caller must ensure `index` is less than the number of buckets.
2530    #[inline]
2531    unsafe fn is_bucket_full(&self, index: usize) -> bool {
2532        debug_assert!(index < self.buckets());
2533        (*self.ctrl(index)).is_full()
2534    }
2535
2536    #[inline]
2537    fn num_ctrl_bytes(&self) -> usize {
2538        self.bucket_mask + 1 + Group::WIDTH
2539    }
2540
2541    #[inline]
2542    fn is_empty_singleton(&self) -> bool {
2543        self.bucket_mask == 0
2544    }
2545
2546    /// Attempts to allocate a new hash table with at least enough capacity
2547    /// for inserting the given number of elements without reallocating,
2548    /// and return it inside `ScopeGuard` to protect against panic in the hash
2549    /// function.
2550    ///
2551    /// # Note
2552    ///
2553    /// It is recommended (but not required):
2554    ///
2555    /// * That the new table's `capacity` be greater than or equal to `self.items`.
2556    ///
2557    /// * The `alloc` is the same [`Allocator`] as the `Allocator` used
2558    ///   to allocate this table.
2559    ///
2560    /// * The `table_layout` is the same [`TableLayout`] as the `TableLayout` used
2561    ///   to allocate this table.
2562    ///
2563    /// If `table_layout` does not match the `TableLayout` that was used to allocate
2564    /// this table, then using `mem::swap` with the `self` and the new table returned
2565    /// by this function results in [`undefined behavior`].
2566    ///
2567    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2568    #[allow(clippy::mut_mut)]
2569    #[inline]
2570    fn prepare_resize<'a, A>(
2571        &self,
2572        alloc: &'a A,
2573        table_layout: TableLayout,
2574        capacity: usize,
2575        fallibility: Fallibility,
2576    ) -> Result<crate::scopeguard::ScopeGuard<Self, impl FnMut(&mut Self) + 'a>, TryReserveError>
2577    where
2578        A: Allocator,
2579    {
2580        debug_assert!(self.items <= capacity);
2581
2582        // Allocate and initialize the new table.
2583        let new_table =
2584            RawTableInner::fallible_with_capacity(alloc, table_layout, capacity, fallibility)?;
2585
2586        // The hash function may panic, in which case we simply free the new
2587        // table without dropping any elements that may have been copied into
2588        // it.
2589        //
2590        // This guard is also used to free the old table on success, see
2591        // the comment at the bottom of this function.
2592        Ok(guard(new_table, move |self_| {
2593            if !self_.is_empty_singleton() {
2594                // SAFETY:
2595                // 1. We have checked that our table is allocated.
2596                // 2. We know for sure that the `alloc` and `table_layout` matches the
2597                //    [`Allocator`] and [`TableLayout`] used to allocate this table.
2598                unsafe { self_.free_buckets(alloc, table_layout) };
2599            }
2600        }))
2601    }
2602
2603    /// Reserves or rehashes to make room for `additional` more elements.
2604    ///
2605    /// This uses dynamic dispatch to reduce the amount of
2606    /// code generated, but it is eliminated by LLVM optimizations when inlined.
2607    ///
2608    /// # Safety
2609    ///
2610    /// If any of the following conditions are violated, the result is
2611    /// [`undefined behavior`]:
2612    ///
2613    /// * The `alloc` must be the same [`Allocator`] as the `Allocator` used
2614    ///   to allocate this table.
2615    ///
2616    /// * The `layout` must be the same [`TableLayout`] as the `TableLayout`
2617    ///   used to allocate this table.
2618    ///
2619    /// * The `drop` function (`fn(*mut u8)`) must be the actual drop function of
2620    ///   the elements stored in the table.
2621    ///
2622    /// * The [`RawTableInner`] must have properly initialized control bytes.
2623    ///
2624    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2625    #[allow(clippy::inline_always)]
2626    #[inline(always)]
2627    unsafe fn reserve_rehash_inner<A>(
2628        &mut self,
2629        alloc: &A,
2630        additional: usize,
2631        hasher: &dyn Fn(&mut Self, usize) -> u64,
2632        fallibility: Fallibility,
2633        layout: TableLayout,
2634        drop: Option<unsafe fn(*mut u8)>,
2635    ) -> Result<(), TryReserveError>
2636    where
2637        A: Allocator,
2638    {
2639        // Avoid `Option::ok_or_else` because it bloats LLVM IR.
2640        let new_items = match self.items.checked_add(additional) {
2641            Some(new_items) => new_items,
2642            None => return Err(fallibility.capacity_overflow()),
2643        };
2644        let full_capacity = bucket_mask_to_capacity(self.bucket_mask);
2645        if new_items <= full_capacity / 2 {
2646            // Rehash in-place without re-allocating if we have plenty of spare
2647            // capacity that is locked up due to DELETED entries.
2648
2649            // SAFETY:
2650            // 1. We know for sure that `[`RawTableInner`]` has already been allocated
2651            //    (since new_items <= full_capacity / 2);
2652            // 2. The caller ensures that `drop` function is the actual drop function of
2653            //    the elements stored in the table.
2654            // 3. The caller ensures that `layout` matches the [`TableLayout`] that was
2655            //    used to allocate this table.
2656            // 4. The caller ensures that the control bytes of the `RawTableInner`
2657            //    are already initialized.
2658            self.rehash_in_place(hasher, layout.size, drop);
2659            Ok(())
2660        } else {
2661            // Otherwise, conservatively resize to at least the next size up
2662            // to avoid churning deletes into frequent rehashes.
2663            //
2664            // SAFETY:
2665            // 1. We know for sure that `capacity >= self.items`.
2666            // 2. The caller ensures that `alloc` and `layout` matches the [`Allocator`] and
2667            //    [`TableLayout`] that were used to allocate this table.
2668            // 3. The caller ensures that the control bytes of the `RawTableInner`
2669            //    are already initialized.
2670            self.resize_inner(
2671                alloc,
2672                usize::max(new_items, full_capacity + 1),
2673                hasher,
2674                fallibility,
2675                layout,
2676            )
2677        }
2678    }
2679
2680    /// Returns an iterator over full buckets indices in the table.
2681    ///
2682    /// # Safety
2683    ///
2684    /// Behavior is undefined if any of the following conditions are violated:
2685    ///
2686    /// * The caller has to ensure that the `RawTableInner` outlives the
2687    ///   `FullBucketsIndices`. Because we cannot make the `next` method
2688    ///   unsafe on the `FullBucketsIndices` struct, we have to make the
2689    ///   `full_buckets_indices` method unsafe.
2690    ///
2691    /// * The [`RawTableInner`] must have properly initialized control bytes.
2692    #[inline(always)]
2693    unsafe fn full_buckets_indices(&self) -> FullBucketsIndices {
2694        // SAFETY:
2695        // 1. Since the caller of this function ensures that the control bytes
2696        //    are properly initialized and `self.ctrl(0)` points to the start
2697        //    of the array of control bytes, therefore: `ctrl` is valid for reads,
2698        //    properly aligned to `Group::WIDTH` and points to the properly initialized
2699        //    control bytes.
2700        // 2. The value of `items` is equal to the amount of data (values) added
2701        //    to the table.
2702        //
2703        //                         `ctrl` points here (to the start
2704        //                         of the first control byte `CT0`)
2705        //                          ∨
2706        // [Pad], T_n, ..., T1, T0, |CT0, CT1, ..., CT_n|, Group::WIDTH
2707        //                           \________  ________/
2708        //                                    \/
2709        //       `n = buckets - 1`, i.e. `RawTableInner::buckets() - 1`
2710        //
2711        // where: T0...T_n  - our stored data;
2712        //        CT0...CT_n - control bytes or metadata for `data`.
2713        let ctrl = NonNull::new_unchecked(self.ctrl(0).cast::<u8>());
2714
2715        FullBucketsIndices {
2716            // Load the first group
2717            // SAFETY: See explanation above.
2718            current_group: Group::load_aligned(ctrl.as_ptr().cast())
2719                .match_full()
2720                .into_iter(),
2721            group_first_index: 0,
2722            ctrl,
2723            items: self.items,
2724        }
2725    }
2726
2727    /// Allocates a new table of a different size and moves the contents of the
2728    /// current table into it.
2729    ///
2730    /// This uses dynamic dispatch to reduce the amount of
2731    /// code generated, but it is eliminated by LLVM optimizations when inlined.
2732    ///
2733    /// # Safety
2734    ///
2735    /// If any of the following conditions are violated, the result is
2736    /// [`undefined behavior`]:
2737    ///
2738    /// * The `alloc` must be the same [`Allocator`] as the `Allocator` used
2739    ///   to allocate this table;
2740    ///
2741    /// * The `layout` must be the same [`TableLayout`] as the `TableLayout`
2742    ///   used to allocate this table;
2743    ///
2744    /// * The [`RawTableInner`] must have properly initialized control bytes.
2745    ///
2746    /// The caller of this function must ensure that `capacity >= self.items`
2747    /// otherwise:
2748    ///
2749    /// * If `self.items != 0`, calling of this function with `capacity == 0`
2750    ///   results in [`undefined behavior`].
2751    ///
2752    /// * If `capacity_to_buckets(capacity) < Group::WIDTH` and
2753    ///   `self.items > capacity_to_buckets(capacity)` calling this function
2754    ///   results in [`undefined behavior`].
2755    ///
2756    /// * If `capacity_to_buckets(capacity) >= Group::WIDTH` and
2757    ///   `self.items > capacity_to_buckets(capacity)` calling this function
2758    ///   are never return (will go into an infinite loop).
2759    ///
2760    /// Note: It is recommended (but not required) that the new table's `capacity`
2761    /// be greater than or equal to `self.items`. In case if `capacity <= self.items`
2762    /// this function can never return. See [`RawTableInner::find_insert_slot`] for
2763    /// more information.
2764    ///
2765    /// [`RawTableInner::find_insert_slot`]: RawTableInner::find_insert_slot
2766    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2767    #[allow(clippy::inline_always)]
2768    #[inline(always)]
2769    unsafe fn resize_inner<A>(
2770        &mut self,
2771        alloc: &A,
2772        capacity: usize,
2773        hasher: &dyn Fn(&mut Self, usize) -> u64,
2774        fallibility: Fallibility,
2775        layout: TableLayout,
2776    ) -> Result<(), TryReserveError>
2777    where
2778        A: Allocator,
2779    {
2780        // SAFETY: We know for sure that `alloc` and `layout` matches the [`Allocator`] and [`TableLayout`]
2781        // that were used to allocate this table.
2782        let mut new_table = self.prepare_resize(alloc, layout, capacity, fallibility)?;
2783
2784        // SAFETY: We know for sure that RawTableInner will outlive the
2785        // returned `FullBucketsIndices` iterator, and the caller of this
2786        // function ensures that the control bytes are properly initialized.
2787        for full_byte_index in self.full_buckets_indices() {
2788            // This may panic.
2789            let hash = hasher(self, full_byte_index);
2790
2791            // SAFETY:
2792            // We can use a simpler version of insert() here since:
2793            // 1. There are no DELETED entries.
2794            // 2. We know there is enough space in the table.
2795            // 3. All elements are unique.
2796            // 4. The caller of this function guarantees that `capacity > 0`
2797            //    so `new_table` must already have some allocated memory.
2798            // 5. We set `growth_left` and `items` fields of the new table
2799            //    after the loop.
2800            // 6. We insert into the table, at the returned index, the data
2801            //    matching the given hash immediately after calling this function.
2802            let (new_index, _) = new_table.prepare_insert_slot(hash);
2803
2804            // SAFETY:
2805            //
2806            // * `src` is valid for reads of `layout.size` bytes, since the
2807            //   table is alive and the `full_byte_index` is guaranteed to be
2808            //   within bounds (see `FullBucketsIndices::next_impl`);
2809            //
2810            // * `dst` is valid for writes of `layout.size` bytes, since the
2811            //   caller ensures that `table_layout` matches the [`TableLayout`]
2812            //   that was used to allocate old table and we have the `new_index`
2813            //   returned by `prepare_insert_slot`.
2814            //
2815            // * Both `src` and `dst` are properly aligned.
2816            //
2817            // * Both `src` and `dst` point to different region of memory.
2818            ptr::copy_nonoverlapping(
2819                self.bucket_ptr(full_byte_index, layout.size),
2820                new_table.bucket_ptr(new_index, layout.size),
2821                layout.size,
2822            );
2823        }
2824
2825        // The hash function didn't panic, so we can safely set the
2826        // `growth_left` and `items` fields of the new table.
2827        new_table.growth_left -= self.items;
2828        new_table.items = self.items;
2829
2830        // We successfully copied all elements without panicking. Now replace
2831        // self with the new table. The old table will have its memory freed but
2832        // the items will not be dropped (since they have been moved into the
2833        // new table).
2834        // SAFETY: The caller ensures that `table_layout` matches the [`TableLayout`]
2835        // that was used to allocate this table.
2836        mem::swap(self, &mut new_table);
2837
2838        Ok(())
2839    }
2840
2841    /// Rehashes the contents of the table in place (i.e. without changing the
2842    /// allocation).
2843    ///
2844    /// If `hasher` panics then some the table's contents may be lost.
2845    ///
2846    /// This uses dynamic dispatch to reduce the amount of
2847    /// code generated, but it is eliminated by LLVM optimizations when inlined.
2848    ///
2849    /// # Safety
2850    ///
2851    /// If any of the following conditions are violated, the result is [`undefined behavior`]:
2852    ///
2853    /// * The `size_of` must be equal to the size of the elements stored in the table;
2854    ///
2855    /// * The `drop` function (`fn(*mut u8)`) must be the actual drop function of
2856    ///   the elements stored in the table.
2857    ///
2858    /// * The [`RawTableInner`] has already been allocated;
2859    ///
2860    /// * The [`RawTableInner`] must have properly initialized control bytes.
2861    ///
2862    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2863    #[allow(clippy::inline_always)]
2864    #[cfg_attr(feature = "inline-more", inline(always))]
2865    #[cfg_attr(not(feature = "inline-more"), inline)]
2866    unsafe fn rehash_in_place(
2867        &mut self,
2868        hasher: &dyn Fn(&mut Self, usize) -> u64,
2869        size_of: usize,
2870        drop: Option<unsafe fn(*mut u8)>,
2871    ) {
2872        // If the hash function panics then properly clean up any elements
2873        // that we haven't rehashed yet. We unfortunately can't preserve the
2874        // element since we lost their hash and have no way of recovering it
2875        // without risking another panic.
2876        self.prepare_rehash_in_place();
2877
2878        let mut guard = guard(self, move |self_| {
2879            if let Some(drop) = drop {
2880                for i in 0..self_.buckets() {
2881                    if *self_.ctrl(i) == Tag::DELETED {
2882                        self_.set_ctrl(i, Tag::EMPTY);
2883                        drop(self_.bucket_ptr(i, size_of));
2884                        self_.items -= 1;
2885                    }
2886                }
2887            }
2888            self_.growth_left = bucket_mask_to_capacity(self_.bucket_mask) - self_.items;
2889        });
2890
2891        // At this point, DELETED elements are elements that we haven't
2892        // rehashed yet. Find them and re-insert them at their ideal
2893        // position.
2894        'outer: for i in 0..guard.buckets() {
2895            if *guard.ctrl(i) != Tag::DELETED {
2896                continue;
2897            }
2898
2899            let i_p = guard.bucket_ptr(i, size_of);
2900
2901            'inner: loop {
2902                // Hash the current item
2903                let hash = hasher(*guard, i);
2904
2905                // Search for a suitable place to put it
2906                //
2907                // SAFETY: Caller of this function ensures that the control bytes
2908                // are properly initialized.
2909                let new_i = guard.find_insert_slot(hash).index;
2910
2911                // Probing works by scanning through all of the control
2912                // bytes in groups, which may not be aligned to the group
2913                // size. If both the new and old position fall within the
2914                // same unaligned group, then there is no benefit in moving
2915                // it and we can just continue to the next item.
2916                if likely(guard.is_in_same_group(i, new_i, hash)) {
2917                    guard.set_ctrl_hash(i, hash);
2918                    continue 'outer;
2919                }
2920
2921                let new_i_p = guard.bucket_ptr(new_i, size_of);
2922
2923                // We are moving the current item to a new position. Write
2924                // our H2 to the control byte of the new position.
2925                let prev_ctrl = guard.replace_ctrl_hash(new_i, hash);
2926                if prev_ctrl == Tag::EMPTY {
2927                    guard.set_ctrl(i, Tag::EMPTY);
2928                    // If the target slot is empty, simply move the current
2929                    // element into the new slot and clear the old control
2930                    // byte.
2931                    ptr::copy_nonoverlapping(i_p, new_i_p, size_of);
2932                    continue 'outer;
2933                } else {
2934                    // If the target slot is occupied, swap the two elements
2935                    // and then continue processing the element that we just
2936                    // swapped into the old slot.
2937                    debug_assert_eq!(prev_ctrl, Tag::DELETED);
2938                    ptr::swap_nonoverlapping(i_p, new_i_p, size_of);
2939                    continue 'inner;
2940                }
2941            }
2942        }
2943
2944        guard.growth_left = bucket_mask_to_capacity(guard.bucket_mask) - guard.items;
2945
2946        mem::forget(guard);
2947    }
2948
2949    /// Deallocates the table without dropping any entries.
2950    ///
2951    /// # Note
2952    ///
2953    /// This function must be called only after [`drop_elements`](RawTableInner::drop_elements),
2954    /// else it can lead to leaking of memory. Also calling this function automatically
2955    /// makes invalid (dangling) all instances of buckets ([`Bucket`]) and makes invalid
2956    /// (dangling) the `ctrl` field of the table.
2957    ///
2958    /// # Safety
2959    ///
2960    /// If any of the following conditions are violated, the result is [`Undefined Behavior`]:
2961    ///
2962    /// * The [`RawTableInner`] has already been allocated;
2963    ///
2964    /// * The `alloc` must be the same [`Allocator`] as the `Allocator` that was used
2965    ///   to allocate this table.
2966    ///
2967    /// * The `table_layout` must be the same [`TableLayout`] as the `TableLayout` that was used
2968    ///   to allocate this table.
2969    ///
2970    /// See also [`GlobalAlloc::dealloc`] or [`Allocator::deallocate`] for more  information.
2971    ///
2972    /// [`Undefined Behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2973    /// [`GlobalAlloc::dealloc`]: https://doc.rust-lang.org/alloc/alloc/trait.GlobalAlloc.html#tymethod.dealloc
2974    /// [`Allocator::deallocate`]: https://doc.rust-lang.org/alloc/alloc/trait.Allocator.html#tymethod.deallocate
2975    #[inline]
2976    unsafe fn free_buckets<A>(&mut self, alloc: &A, table_layout: TableLayout)
2977    where
2978        A: Allocator,
2979    {
2980        // SAFETY: The caller must uphold the safety contract for `free_buckets`
2981        // method.
2982        let (ptr, layout) = self.allocation_info(table_layout);
2983        alloc.deallocate(ptr, layout);
2984    }
2985
2986    /// Returns a pointer to the allocated memory and the layout that was used to
2987    /// allocate the table.
2988    ///
2989    /// # Safety
2990    ///
2991    /// Caller of this function must observe the following safety rules:
2992    ///
2993    /// * The [`RawTableInner`] has already been allocated, otherwise
2994    ///   calling this function results in [`undefined behavior`]
2995    ///
2996    /// * The `table_layout` must be the same [`TableLayout`] as the `TableLayout`
2997    ///   that was used to allocate this table. Failure to comply with this condition
2998    ///   may result in [`undefined behavior`].
2999    ///
3000    /// See also [`GlobalAlloc::dealloc`] or [`Allocator::deallocate`] for more  information.
3001    ///
3002    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
3003    /// [`GlobalAlloc::dealloc`]: https://doc.rust-lang.org/alloc/alloc/trait.GlobalAlloc.html#tymethod.dealloc
3004    /// [`Allocator::deallocate`]: https://doc.rust-lang.org/alloc/alloc/trait.Allocator.html#tymethod.deallocate
3005    #[inline]
3006    unsafe fn allocation_info(&self, table_layout: TableLayout) -> (NonNull<u8>, Layout) {
3007        debug_assert!(
3008            !self.is_empty_singleton(),
3009            "this function can only be called on non-empty tables"
3010        );
3011
3012        // Avoid `Option::unwrap_or_else` because it bloats LLVM IR.
3013        let (layout, ctrl_offset) = match table_layout.calculate_layout_for(self.buckets()) {
3014            Some(lco) => lco,
3015            None => unsafe { hint::unreachable_unchecked() },
3016        };
3017        (
3018            // SAFETY: The caller must uphold the safety contract for `allocation_info` method.
3019            unsafe { NonNull::new_unchecked(self.ctrl.as_ptr().sub(ctrl_offset)) },
3020            layout,
3021        )
3022    }
3023
3024    /// Returns the total amount of memory allocated internally by the hash
3025    /// table, in bytes.
3026    ///
3027    /// The returned number is informational only. It is intended to be
3028    /// primarily used for memory profiling.
3029    ///
3030    /// # Safety
3031    ///
3032    /// The `table_layout` must be the same [`TableLayout`] as the `TableLayout`
3033    /// that was used to allocate this table. Failure to comply with this condition
3034    /// may result in [`undefined behavior`].
3035    ///
3036    ///
3037    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
3038    #[inline]
3039    unsafe fn allocation_size_or_zero(&self, table_layout: TableLayout) -> usize {
3040        if self.is_empty_singleton() {
3041            0
3042        } else {
3043            // SAFETY:
3044            // 1. We have checked that our table is allocated.
3045            // 2. The caller ensures that `table_layout` matches the [`TableLayout`]
3046            // that was used to allocate this table.
3047            unsafe { self.allocation_info(table_layout).1.size() }
3048        }
3049    }
3050
3051    /// Marks all table buckets as empty without dropping their contents.
3052    #[inline]
3053    fn clear_no_drop(&mut self) {
3054        if !self.is_empty_singleton() {
3055            self.ctrl_slice().fill_empty();
3056        }
3057        self.items = 0;
3058        self.growth_left = bucket_mask_to_capacity(self.bucket_mask);
3059    }
3060
3061    /// Erases the [`Bucket`]'s control byte at the given index so that it does not
3062    /// triggered as full, decreases the `items` of the table and, if it can be done,
3063    /// increases `self.growth_left`.
3064    ///
3065    /// This function does not actually erase / drop the [`Bucket`] itself, i.e. it
3066    /// does not make any changes to the `data` parts of the table. The caller of this
3067    /// function must take care to properly drop the `data`, otherwise calling this
3068    /// function may result in a memory leak.
3069    ///
3070    /// # Safety
3071    ///
3072    /// You must observe the following safety rules when calling this function:
3073    ///
3074    /// * The [`RawTableInner`] has already been allocated;
3075    ///
3076    /// * It must be the full control byte at the given position;
3077    ///
3078    /// * The `index` must not be greater than the `RawTableInner.bucket_mask`, i.e.
3079    ///   `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)` must
3080    ///   be no greater than the number returned by the function [`RawTableInner::buckets`].
3081    ///
3082    /// Calling this function on a table that has not been allocated results in [`undefined behavior`].
3083    ///
3084    /// Calling this function on a table with no elements is unspecified, but calling subsequent
3085    /// functions is likely to result in [`undefined behavior`] due to overflow subtraction
3086    /// (`self.items -= 1 cause overflow when self.items == 0`).
3087    ///
3088    /// See also [`Bucket::as_ptr`] method, for more information about of properly removing
3089    /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`].
3090    ///
3091    /// [`RawTableInner::buckets`]: RawTableInner::buckets
3092    /// [`Bucket::as_ptr`]: Bucket::as_ptr
3093    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
3094    #[inline]
3095    unsafe fn erase(&mut self, index: usize) {
3096        debug_assert!(self.is_bucket_full(index));
3097
3098        // This is the same as `index.wrapping_sub(Group::WIDTH) % self.buckets()` because
3099        // the number of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
3100        let index_before = index.wrapping_sub(Group::WIDTH) & self.bucket_mask;
3101        // SAFETY:
3102        // - The caller must uphold the safety contract for `erase` method;
3103        // - `index_before` is guaranteed to be in range due to masking with `self.bucket_mask`
3104        let empty_before = Group::load(self.ctrl(index_before)).match_empty();
3105        let empty_after = Group::load(self.ctrl(index)).match_empty();
3106
3107        // Inserting and searching in the map is performed by two key functions:
3108        //
3109        // - The `find_insert_slot` function that looks up the index of any `Tag::EMPTY` or `Tag::DELETED`
3110        //   slot in a group to be able to insert. If it doesn't find an `Tag::EMPTY` or `Tag::DELETED`
3111        //   slot immediately in the first group, it jumps to the next `Group` looking for it,
3112        //   and so on until it has gone through all the groups in the control bytes.
3113        //
3114        // - The `find_inner` function that looks for the index of the desired element by looking
3115        //   at all the `FULL` bytes in the group. If it did not find the element right away, and
3116        //   there is no `Tag::EMPTY` byte in the group, then this means that the `find_insert_slot`
3117        //   function may have found a suitable slot in the next group. Therefore, `find_inner`
3118        //   jumps further, and if it does not find the desired element and again there is no `Tag::EMPTY`
3119        //   byte, then it jumps further, and so on. The search stops only if `find_inner` function
3120        //   finds the desired element or hits an `Tag::EMPTY` slot/byte.
3121        //
3122        // Accordingly, this leads to two consequences:
3123        //
3124        // - The map must have `Tag::EMPTY` slots (bytes);
3125        //
3126        // - You can't just mark the byte to be erased as `Tag::EMPTY`, because otherwise the `find_inner`
3127        //   function may stumble upon an `Tag::EMPTY` byte before finding the desired element and stop
3128        //   searching.
3129        //
3130        // Thus it is necessary to check all bytes after and before the erased element. If we are in
3131        // a contiguous `Group` of `FULL` or `Tag::DELETED` bytes (the number of `FULL` or `Tag::DELETED` bytes
3132        // before and after is greater than or equal to `Group::WIDTH`), then we must mark our byte as
3133        // `Tag::DELETED` in order for the `find_inner` function to go further. On the other hand, if there
3134        // is at least one `Tag::EMPTY` slot in the `Group`, then the `find_inner` function will still stumble
3135        // upon an `Tag::EMPTY` byte, so we can safely mark our erased byte as `Tag::EMPTY` as well.
3136        //
3137        // Finally, since `index_before == (index.wrapping_sub(Group::WIDTH) & self.bucket_mask) == index`
3138        // and given all of the above, tables smaller than the group width (self.buckets() < Group::WIDTH)
3139        // cannot have `Tag::DELETED` bytes.
3140        //
3141        // Note that in this context `leading_zeros` refers to the bytes at the end of a group, while
3142        // `trailing_zeros` refers to the bytes at the beginning of a group.
3143        let ctrl = if empty_before.leading_zeros() + empty_after.trailing_zeros() >= Group::WIDTH {
3144            Tag::DELETED
3145        } else {
3146            self.growth_left += 1;
3147            Tag::EMPTY
3148        };
3149        // SAFETY: the caller must uphold the safety contract for `erase` method.
3150        self.set_ctrl(index, ctrl);
3151        self.items -= 1;
3152    }
3153}
3154
3155impl<T: Clone, A: Allocator + Clone> Clone for RawTable<T, A> {
3156    fn clone(&self) -> Self {
3157        if self.table.is_empty_singleton() {
3158            Self::new_in(self.alloc.clone())
3159        } else {
3160            unsafe {
3161                // Avoid `Result::ok_or_else` because it bloats LLVM IR.
3162                //
3163                // SAFETY: This is safe as we are taking the size of an already allocated table
3164                // and therefore capacity overflow cannot occur, `self.table.buckets()` is power
3165                // of two and all allocator errors will be caught inside `RawTableInner::new_uninitialized`.
3166                let mut new_table = match Self::new_uninitialized(
3167                    self.alloc.clone(),
3168                    self.table.buckets(),
3169                    Fallibility::Infallible,
3170                ) {
3171                    Ok(table) => table,
3172                    Err(_) => hint::unreachable_unchecked(),
3173                };
3174
3175                // Cloning elements may fail (the clone function may panic). But we don't
3176                // need to worry about uninitialized control bits, since:
3177                // 1. The number of items (elements) in the table is zero, which means that
3178                //    the control bits will not be read by Drop function.
3179                // 2. The `clone_from_spec` method will first copy all control bits from
3180                //    `self` (thus initializing them). But this will not affect the `Drop`
3181                //    function, since the `clone_from_spec` function sets `items` only after
3182                //    successfully cloning all elements.
3183                new_table.clone_from_spec(self);
3184                new_table
3185            }
3186        }
3187    }
3188
3189    fn clone_from(&mut self, source: &Self) {
3190        if source.table.is_empty_singleton() {
3191            let mut old_inner = mem::replace(&mut self.table, RawTableInner::NEW);
3192            unsafe {
3193                // SAFETY:
3194                // 1. We call the function only once;
3195                // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`]
3196                //    and [`TableLayout`] that were used to allocate this table.
3197                // 3. If any elements' drop function panics, then there will only be a memory leak,
3198                //    because we have replaced the inner table with a new one.
3199                old_inner.drop_inner_table::<T, _>(&self.alloc, Self::TABLE_LAYOUT);
3200            }
3201        } else {
3202            unsafe {
3203                // Make sure that if any panics occurs, we clear the table and
3204                // leave it in an empty state.
3205                let mut self_ = guard(self, |self_| {
3206                    self_.clear_no_drop();
3207                });
3208
3209                // First, drop all our elements without clearing the control
3210                // bytes. If this panics then the scope guard will clear the
3211                // table, leaking any elements that were not dropped yet.
3212                //
3213                // This leak is unavoidable: we can't try dropping more elements
3214                // since this could lead to another panic and abort the process.
3215                //
3216                // SAFETY: If something gets wrong we clear our table right after
3217                // dropping the elements, so there is no double drop, since `items`
3218                // will be equal to zero.
3219                self_.table.drop_elements::<T>();
3220
3221                // If necessary, resize our table to match the source.
3222                if self_.buckets() != source.buckets() {
3223                    let new_inner = match RawTableInner::new_uninitialized(
3224                        &self_.alloc,
3225                        Self::TABLE_LAYOUT,
3226                        source.buckets(),
3227                        Fallibility::Infallible,
3228                    ) {
3229                        Ok(table) => table,
3230                        Err(_) => hint::unreachable_unchecked(),
3231                    };
3232                    // Replace the old inner with new uninitialized one. It's ok, since if something gets
3233                    // wrong `ScopeGuard` will initialize all control bytes and leave empty table.
3234                    let mut old_inner = mem::replace(&mut self_.table, new_inner);
3235                    if !old_inner.is_empty_singleton() {
3236                        // SAFETY:
3237                        // 1. We have checked that our table is allocated.
3238                        // 2. We know for sure that `alloc` and `table_layout` matches
3239                        // the [`Allocator`] and [`TableLayout`] that were used to allocate this table.
3240                        old_inner.free_buckets(&self_.alloc, Self::TABLE_LAYOUT);
3241                    }
3242                }
3243
3244                // Cloning elements may fail (the clone function may panic), but the `ScopeGuard`
3245                // inside the `clone_from_impl` function will take care of that, dropping all
3246                // cloned elements if necessary. Our `ScopeGuard` will clear the table.
3247                self_.clone_from_spec(source);
3248
3249                // Disarm the scope guard if cloning was successful.
3250                ScopeGuard::into_inner(self_);
3251            }
3252        }
3253    }
3254}
3255
3256/// Specialization of `clone_from` for `Copy` types
3257trait RawTableClone {
3258    unsafe fn clone_from_spec(&mut self, source: &Self);
3259}
3260impl<T: Clone, A: Allocator + Clone> RawTableClone for RawTable<T, A> {
3261    default_fn! {
3262        #[cfg_attr(feature = "inline-more", inline)]
3263        unsafe fn clone_from_spec(&mut self, source: &Self) {
3264            self.clone_from_impl(source);
3265        }
3266    }
3267}
3268#[cfg(feature = "nightly")]
3269impl<T: Copy, A: Allocator + Clone> RawTableClone for RawTable<T, A> {
3270    #[cfg_attr(feature = "inline-more", inline)]
3271    unsafe fn clone_from_spec(&mut self, source: &Self) {
3272        source
3273            .table
3274            .ctrl(0)
3275            .copy_to_nonoverlapping(self.table.ctrl(0), self.table.num_ctrl_bytes());
3276        source
3277            .data_start()
3278            .as_ptr()
3279            .copy_to_nonoverlapping(self.data_start().as_ptr(), self.table.buckets());
3280
3281        self.table.items = source.table.items;
3282        self.table.growth_left = source.table.growth_left;
3283    }
3284}
3285
3286impl<T: Clone, A: Allocator + Clone> RawTable<T, A> {
3287    /// Common code for `clone` and `clone_from`. Assumes:
3288    /// - `self.buckets() == source.buckets()`.
3289    /// - Any existing elements have been dropped.
3290    /// - The control bytes are not initialized yet.
3291    #[cfg_attr(feature = "inline-more", inline)]
3292    unsafe fn clone_from_impl(&mut self, source: &Self) {
3293        // Copy the control bytes unchanged. We do this in a single pass
3294        source
3295            .table
3296            .ctrl(0)
3297            .copy_to_nonoverlapping(self.table.ctrl(0), self.table.num_ctrl_bytes());
3298
3299        // The cloning of elements may panic, in which case we need
3300        // to make sure we drop only the elements that have been
3301        // cloned so far.
3302        let mut guard = guard((0, &mut *self), |(index, self_)| {
3303            if T::NEEDS_DROP {
3304                for i in 0..*index {
3305                    if self_.is_bucket_full(i) {
3306                        self_.bucket(i).drop();
3307                    }
3308                }
3309            }
3310        });
3311
3312        for from in source.iter() {
3313            let index = source.bucket_index(&from);
3314            let to = guard.1.bucket(index);
3315            to.write(from.as_ref().clone());
3316
3317            // Update the index in case we need to unwind.
3318            guard.0 = index + 1;
3319        }
3320
3321        // Successfully cloned all items, no need to clean up.
3322        mem::forget(guard);
3323
3324        self.table.items = source.table.items;
3325        self.table.growth_left = source.table.growth_left;
3326    }
3327}
3328
3329impl<T, A: Allocator + Default> Default for RawTable<T, A> {
3330    #[inline]
3331    fn default() -> Self {
3332        Self::new_in(Default::default())
3333    }
3334}
3335
3336#[cfg(feature = "nightly")]
3337unsafe impl<#[may_dangle] T, A: Allocator> Drop for RawTable<T, A> {
3338    #[cfg_attr(feature = "inline-more", inline)]
3339    fn drop(&mut self) {
3340        unsafe {
3341            // SAFETY:
3342            // 1. We call the function only once;
3343            // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`]
3344            //    and [`TableLayout`] that were used to allocate this table.
3345            // 3. If the drop function of any elements fails, then only a memory leak will occur,
3346            //    and we don't care because we are inside the `Drop` function of the `RawTable`,
3347            //    so there won't be any table left in an inconsistent state.
3348            self.table
3349                .drop_inner_table::<T, _>(&self.alloc, Self::TABLE_LAYOUT);
3350        }
3351    }
3352}
3353#[cfg(not(feature = "nightly"))]
3354impl<T, A: Allocator> Drop for RawTable<T, A> {
3355    #[cfg_attr(feature = "inline-more", inline)]
3356    fn drop(&mut self) {
3357        unsafe {
3358            // SAFETY:
3359            // 1. We call the function only once;
3360            // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`]
3361            //    and [`TableLayout`] that were used to allocate this table.
3362            // 3. If the drop function of any elements fails, then only a memory leak will occur,
3363            //    and we don't care because we are inside the `Drop` function of the `RawTable`,
3364            //    so there won't be any table left in an inconsistent state.
3365            self.table
3366                .drop_inner_table::<T, _>(&self.alloc, Self::TABLE_LAYOUT);
3367        }
3368    }
3369}
3370
3371impl<T, A: Allocator> IntoIterator for RawTable<T, A> {
3372    type Item = T;
3373    type IntoIter = RawIntoIter<T, A>;
3374
3375    #[cfg_attr(feature = "inline-more", inline)]
3376    fn into_iter(self) -> RawIntoIter<T, A> {
3377        unsafe {
3378            let iter = self.iter();
3379            self.into_iter_from(iter)
3380        }
3381    }
3382}
3383
3384/// Iterator over a sub-range of a table. Unlike `RawIter` this iterator does
3385/// not track an item count.
3386pub(crate) struct RawIterRange<T> {
3387    // Mask of full buckets in the current group. Bits are cleared from this
3388    // mask as each element is processed.
3389    current_group: BitMaskIter,
3390
3391    // Pointer to the buckets for the current group.
3392    data: Bucket<T>,
3393
3394    // Pointer to the next group of control bytes,
3395    // Must be aligned to the group size.
3396    next_ctrl: *const u8,
3397
3398    // Pointer one past the last control byte of this range.
3399    end: *const u8,
3400}
3401
3402impl<T> RawIterRange<T> {
3403    /// Returns a `RawIterRange` covering a subset of a table.
3404    ///
3405    /// # Safety
3406    ///
3407    /// If any of the following conditions are violated, the result is
3408    /// [`undefined behavior`]:
3409    ///
3410    /// * `ctrl` must be [valid] for reads, i.e. table outlives the `RawIterRange`;
3411    ///
3412    /// * `ctrl` must be properly aligned to the group size (`Group::WIDTH`);
3413    ///
3414    /// * `ctrl` must point to the array of properly initialized control bytes;
3415    ///
3416    /// * `data` must be the [`Bucket`] at the `ctrl` index in the table;
3417    ///
3418    /// * the value of `len` must be less than or equal to the number of table buckets,
3419    ///   and the returned value of `ctrl.as_ptr().add(len).offset_from(ctrl.as_ptr())`
3420    ///   must be positive.
3421    ///
3422    /// * The `ctrl.add(len)` pointer must be either in bounds or one
3423    ///   byte past the end of the same [allocated table].
3424    ///
3425    /// * The `len` must be a power of two.
3426    ///
3427    /// [valid]: https://doc.rust-lang.org/std/ptr/index.html#safety
3428    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
3429    #[cfg_attr(feature = "inline-more", inline)]
3430    unsafe fn new(ctrl: *const u8, data: Bucket<T>, len: usize) -> Self {
3431        debug_assert_ne!(len, 0);
3432        debug_assert_eq!(ctrl as usize % Group::WIDTH, 0);
3433        // SAFETY: The caller must uphold the safety rules for the [`RawIterRange::new`]
3434        let end = ctrl.add(len);
3435
3436        // Load the first group and advance ctrl to point to the next group
3437        // SAFETY: The caller must uphold the safety rules for the [`RawIterRange::new`]
3438        let current_group = Group::load_aligned(ctrl.cast()).match_full();
3439        let next_ctrl = ctrl.add(Group::WIDTH);
3440
3441        Self {
3442            current_group: current_group.into_iter(),
3443            data,
3444            next_ctrl,
3445            end,
3446        }
3447    }
3448
3449    /// Splits a `RawIterRange` into two halves.
3450    ///
3451    /// Returns `None` if the remaining range is smaller than or equal to the
3452    /// group width.
3453    #[cfg_attr(feature = "inline-more", inline)]
3454    #[cfg(feature = "rayon")]
3455    pub(crate) fn split(mut self) -> (Self, Option<RawIterRange<T>>) {
3456        unsafe {
3457            if self.end <= self.next_ctrl {
3458                // Nothing to split if the group that we are current processing
3459                // is the last one.
3460                (self, None)
3461            } else {
3462                // len is the remaining number of elements after the group that
3463                // we are currently processing. It must be a multiple of the
3464                // group size (small tables are caught by the check above).
3465                let len = offset_from(self.end, self.next_ctrl);
3466                debug_assert_eq!(len % Group::WIDTH, 0);
3467
3468                // Split the remaining elements into two halves, but round the
3469                // midpoint down in case there is an odd number of groups
3470                // remaining. This ensures that:
3471                // - The tail is at least 1 group long.
3472                // - The split is roughly even considering we still have the
3473                //   current group to process.
3474                let mid = (len / 2) & !(Group::WIDTH - 1);
3475
3476                let tail = Self::new(
3477                    self.next_ctrl.add(mid),
3478                    self.data.next_n(Group::WIDTH).next_n(mid),
3479                    len - mid,
3480                );
3481                debug_assert_eq!(
3482                    self.data.next_n(Group::WIDTH).next_n(mid).ptr,
3483                    tail.data.ptr
3484                );
3485                debug_assert_eq!(self.end, tail.end);
3486                self.end = self.next_ctrl.add(mid);
3487                debug_assert_eq!(self.end.add(Group::WIDTH), tail.next_ctrl);
3488                (self, Some(tail))
3489            }
3490        }
3491    }
3492
3493    /// # Safety
3494    /// If `DO_CHECK_PTR_RANGE` is false, caller must ensure that we never try to iterate
3495    /// after yielding all elements.
3496    #[cfg_attr(feature = "inline-more", inline)]
3497    unsafe fn next_impl<const DO_CHECK_PTR_RANGE: bool>(&mut self) -> Option<Bucket<T>> {
3498        loop {
3499            if let Some(index) = self.current_group.next() {
3500                return Some(self.data.next_n(index));
3501            }
3502
3503            if DO_CHECK_PTR_RANGE && self.next_ctrl >= self.end {
3504                return None;
3505            }
3506
3507            // We might read past self.end up to the next group boundary,
3508            // but this is fine because it only occurs on tables smaller
3509            // than the group size where the trailing control bytes are all
3510            // EMPTY. On larger tables self.end is guaranteed to be aligned
3511            // to the group size (since tables are power-of-two sized).
3512            self.current_group = Group::load_aligned(self.next_ctrl.cast())
3513                .match_full()
3514                .into_iter();
3515            self.data = self.data.next_n(Group::WIDTH);
3516            self.next_ctrl = self.next_ctrl.add(Group::WIDTH);
3517        }
3518    }
3519
3520    /// Folds every element into an accumulator by applying an operation,
3521    /// returning the final result.
3522    ///
3523    /// `fold_impl()` takes three arguments: the number of items remaining in
3524    /// the iterator, an initial value, and a closure with two arguments: an
3525    /// 'accumulator', and an element. The closure returns the value that the
3526    /// accumulator should have for the next iteration.
3527    ///
3528    /// The initial value is the value the accumulator will have on the first call.
3529    ///
3530    /// After applying this closure to every element of the iterator, `fold_impl()`
3531    /// returns the accumulator.
3532    ///
3533    /// # Safety
3534    ///
3535    /// If any of the following conditions are violated, the result is
3536    /// [`Undefined Behavior`]:
3537    ///
3538    /// * The [`RawTableInner`] / [`RawTable`] must be alive and not moved,
3539    ///   i.e. table outlives the `RawIterRange`;
3540    ///
3541    /// * The provided `n` value must match the actual number of items
3542    ///   in the table.
3543    ///
3544    /// [`Undefined Behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
3545    #[allow(clippy::while_let_on_iterator)]
3546    #[cfg_attr(feature = "inline-more", inline)]
3547    unsafe fn fold_impl<F, B>(mut self, mut n: usize, mut acc: B, mut f: F) -> B
3548    where
3549        F: FnMut(B, Bucket<T>) -> B,
3550    {
3551        loop {
3552            while let Some(index) = self.current_group.next() {
3553                // The returned `index` will always be in the range `0..Group::WIDTH`,
3554                // so that calling `self.data.next_n(index)` is safe (see detailed explanation below).
3555                debug_assert!(n != 0);
3556                let bucket = self.data.next_n(index);
3557                acc = f(acc, bucket);
3558                n -= 1;
3559            }
3560
3561            if n == 0 {
3562                return acc;
3563            }
3564
3565            // SAFETY: The caller of this function ensures that:
3566            //
3567            // 1. The provided `n` value matches the actual number of items in the table;
3568            // 2. The table is alive and did not moved.
3569            //
3570            // Taking the above into account, we always stay within the bounds, because:
3571            //
3572            // 1. For tables smaller than the group width (self.buckets() <= Group::WIDTH),
3573            //    we will never end up in the given branch, since we should have already
3574            //    yielded all the elements of the table.
3575            //
3576            // 2. For tables larger than the group width. The number of buckets is a
3577            //    power of two (2 ^ n), Group::WIDTH is also power of two (2 ^ k). Since
3578            //    `(2 ^ n) > (2 ^ k)`, than `(2 ^ n) % (2 ^ k) = 0`. As we start from the
3579            //    start of the array of control bytes, and never try to iterate after
3580            //    getting all the elements, the last `self.current_group` will read bytes
3581            //    from the `self.buckets() - Group::WIDTH` index.  We know also that
3582            //    `self.current_group.next()` will always return indices within the range
3583            //    `0..Group::WIDTH`.
3584            //
3585            //    Knowing all of the above and taking into account that we are synchronizing
3586            //    the `self.data` index with the index we used to read the `self.current_group`,
3587            //    the subsequent `self.data.next_n(index)` will always return a bucket with
3588            //    an index number less than `self.buckets()`.
3589            //
3590            //    The last `self.next_ctrl`, whose index would be `self.buckets()`, will never
3591            //    actually be read, since we should have already yielded all the elements of
3592            //    the table.
3593            self.current_group = Group::load_aligned(self.next_ctrl.cast())
3594                .match_full()
3595                .into_iter();
3596            self.data = self.data.next_n(Group::WIDTH);
3597            self.next_ctrl = self.next_ctrl.add(Group::WIDTH);
3598        }
3599    }
3600}
3601
3602// We make raw iterators unconditionally Send and Sync, and let the PhantomData
3603// in the actual iterator implementations determine the real Send/Sync bounds.
3604unsafe impl<T> Send for RawIterRange<T> {}
3605unsafe impl<T> Sync for RawIterRange<T> {}
3606
3607impl<T> Clone for RawIterRange<T> {
3608    #[cfg_attr(feature = "inline-more", inline)]
3609    fn clone(&self) -> Self {
3610        Self {
3611            data: self.data.clone(),
3612            next_ctrl: self.next_ctrl,
3613            current_group: self.current_group.clone(),
3614            end: self.end,
3615        }
3616    }
3617}
3618
3619impl<T> Iterator for RawIterRange<T> {
3620    type Item = Bucket<T>;
3621
3622    #[cfg_attr(feature = "inline-more", inline)]
3623    fn next(&mut self) -> Option<Bucket<T>> {
3624        unsafe {
3625            // SAFETY: We set checker flag to true.
3626            self.next_impl::<true>()
3627        }
3628    }
3629
3630    #[inline]
3631    fn size_hint(&self) -> (usize, Option<usize>) {
3632        // We don't have an item count, so just guess based on the range size.
3633        let remaining_buckets = if self.end > self.next_ctrl {
3634            unsafe { offset_from(self.end, self.next_ctrl) }
3635        } else {
3636            0
3637        };
3638
3639        // Add a group width to include the group we are currently processing.
3640        (0, Some(Group::WIDTH + remaining_buckets))
3641    }
3642}
3643
3644impl<T> FusedIterator for RawIterRange<T> {}
3645
3646/// Iterator which returns a raw pointer to every full bucket in the table.
3647///
3648/// For maximum flexibility this iterator is not bound by a lifetime, but you
3649/// must observe several rules when using it:
3650/// - You must not free the hash table while iterating (including via growing/shrinking).
3651/// - It is fine to erase a bucket that has been yielded by the iterator.
3652/// - Erasing a bucket that has not yet been yielded by the iterator may still
3653///   result in the iterator yielding that bucket (unless `reflect_remove` is called).
3654/// - It is unspecified whether an element inserted after the iterator was
3655///   created will be yielded by that iterator (unless `reflect_insert` is called).
3656/// - The order in which the iterator yields bucket is unspecified and may
3657///   change in the future.
3658pub struct RawIter<T> {
3659    pub(crate) iter: RawIterRange<T>,
3660    items: usize,
3661}
3662
3663impl<T> RawIter<T> {
3664    unsafe fn drop_elements(&mut self) {
3665        if T::NEEDS_DROP && self.items != 0 {
3666            for item in self {
3667                item.drop();
3668            }
3669        }
3670    }
3671}
3672
3673impl<T> Clone for RawIter<T> {
3674    #[cfg_attr(feature = "inline-more", inline)]
3675    fn clone(&self) -> Self {
3676        Self {
3677            iter: self.iter.clone(),
3678            items: self.items,
3679        }
3680    }
3681}
3682impl<T> Default for RawIter<T> {
3683    #[cfg_attr(feature = "inline-more", inline)]
3684    fn default() -> Self {
3685        // SAFETY: Because the table is static, it always outlives the iter.
3686        unsafe { RawTableInner::NEW.iter() }
3687    }
3688}
3689
3690impl<T> Iterator for RawIter<T> {
3691    type Item = Bucket<T>;
3692
3693    #[cfg_attr(feature = "inline-more", inline)]
3694    fn next(&mut self) -> Option<Bucket<T>> {
3695        // Inner iterator iterates over buckets
3696        // so it can do unnecessary work if we already yielded all items.
3697        if self.items == 0 {
3698            return None;
3699        }
3700
3701        let nxt = unsafe {
3702            // SAFETY: We check number of items to yield using `items` field.
3703            self.iter.next_impl::<false>()
3704        };
3705
3706        debug_assert!(nxt.is_some());
3707        self.items -= 1;
3708
3709        nxt
3710    }
3711
3712    #[inline]
3713    fn size_hint(&self) -> (usize, Option<usize>) {
3714        (self.items, Some(self.items))
3715    }
3716
3717    #[inline]
3718    fn fold<B, F>(self, init: B, f: F) -> B
3719    where
3720        Self: Sized,
3721        F: FnMut(B, Self::Item) -> B,
3722    {
3723        unsafe { self.iter.fold_impl(self.items, init, f) }
3724    }
3725}
3726
3727impl<T> ExactSizeIterator for RawIter<T> {}
3728impl<T> FusedIterator for RawIter<T> {}
3729
3730/// Iterator which returns an index of every full bucket in the table.
3731///
3732/// For maximum flexibility this iterator is not bound by a lifetime, but you
3733/// must observe several rules when using it:
3734/// - You must not free the hash table while iterating (including via growing/shrinking).
3735/// - It is fine to erase a bucket that has been yielded by the iterator.
3736/// - Erasing a bucket that has not yet been yielded by the iterator may still
3737///   result in the iterator yielding index of that bucket.
3738/// - It is unspecified whether an element inserted after the iterator was
3739///   created will be yielded by that iterator.
3740/// - The order in which the iterator yields indices of the buckets is unspecified
3741///   and may change in the future.
3742pub(crate) struct FullBucketsIndices {
3743    // Mask of full buckets in the current group. Bits are cleared from this
3744    // mask as each element is processed.
3745    current_group: BitMaskIter,
3746
3747    // Initial value of the bytes' indices of the current group (relative
3748    // to the start of the control bytes).
3749    group_first_index: usize,
3750
3751    // Pointer to the current group of control bytes,
3752    // Must be aligned to the group size (Group::WIDTH).
3753    ctrl: NonNull<u8>,
3754
3755    // Number of elements in the table.
3756    items: usize,
3757}
3758
3759impl FullBucketsIndices {
3760    /// Advances the iterator and returns the next value.
3761    ///
3762    /// # Safety
3763    ///
3764    /// If any of the following conditions are violated, the result is
3765    /// [`Undefined Behavior`]:
3766    ///
3767    /// * The [`RawTableInner`] / [`RawTable`] must be alive and not moved,
3768    ///   i.e. table outlives the `FullBucketsIndices`;
3769    ///
3770    /// * It never tries to iterate after getting all elements.
3771    ///
3772    /// [`Undefined Behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
3773    #[inline(always)]
3774    unsafe fn next_impl(&mut self) -> Option<usize> {
3775        loop {
3776            if let Some(index) = self.current_group.next() {
3777                // The returned `self.group_first_index + index` will always
3778                // be in the range `0..self.buckets()`. See explanation below.
3779                return Some(self.group_first_index + index);
3780            }
3781
3782            // SAFETY: The caller of this function ensures that:
3783            //
3784            // 1. It never tries to iterate after getting all the elements;
3785            // 2. The table is alive and did not moved;
3786            // 3. The first `self.ctrl` pointed to the start of the array of control bytes.
3787            //
3788            // Taking the above into account, we always stay within the bounds, because:
3789            //
3790            // 1. For tables smaller than the group width (self.buckets() <= Group::WIDTH),
3791            //    we will never end up in the given branch, since we should have already
3792            //    yielded all the elements of the table.
3793            //
3794            // 2. For tables larger than the group width. The number of buckets is a
3795            //    power of two (2 ^ n), Group::WIDTH is also power of two (2 ^ k). Since
3796            //    `(2 ^ n) > (2 ^ k)`, than `(2 ^ n) % (2 ^ k) = 0`. As we start from the
3797            //    the start of the array of control bytes, and never try to iterate after
3798            //    getting all the elements, the last `self.ctrl` will be equal to
3799            //    the `self.buckets() - Group::WIDTH`, so `self.current_group.next()`
3800            //    will always contains indices within the range `0..Group::WIDTH`,
3801            //    and subsequent `self.group_first_index + index` will always return a
3802            //    number less than `self.buckets()`.
3803            self.ctrl = NonNull::new_unchecked(self.ctrl.as_ptr().add(Group::WIDTH));
3804
3805            // SAFETY: See explanation above.
3806            self.current_group = Group::load_aligned(self.ctrl.as_ptr().cast())
3807                .match_full()
3808                .into_iter();
3809            self.group_first_index += Group::WIDTH;
3810        }
3811    }
3812}
3813
3814impl Iterator for FullBucketsIndices {
3815    type Item = usize;
3816
3817    /// Advances the iterator and returns the next value. It is up to
3818    /// the caller to ensure that the `RawTable` outlives the `FullBucketsIndices`,
3819    /// because we cannot make the `next` method unsafe.
3820    #[inline(always)]
3821    fn next(&mut self) -> Option<usize> {
3822        // Return if we already yielded all items.
3823        if self.items == 0 {
3824            return None;
3825        }
3826
3827        let nxt = unsafe {
3828            // SAFETY:
3829            // 1. We check number of items to yield using `items` field.
3830            // 2. The caller ensures that the table is alive and has not moved.
3831            self.next_impl()
3832        };
3833
3834        debug_assert!(nxt.is_some());
3835        self.items -= 1;
3836
3837        nxt
3838    }
3839
3840    #[inline(always)]
3841    fn size_hint(&self) -> (usize, Option<usize>) {
3842        (self.items, Some(self.items))
3843    }
3844}
3845
3846impl ExactSizeIterator for FullBucketsIndices {}
3847impl FusedIterator for FullBucketsIndices {}
3848
3849/// Iterator which consumes a table and returns elements.
3850pub struct RawIntoIter<T, A: Allocator = Global> {
3851    iter: RawIter<T>,
3852    allocation: Option<(NonNull<u8>, Layout, A)>,
3853    marker: PhantomData<T>,
3854}
3855
3856impl<T, A: Allocator> RawIntoIter<T, A> {
3857    #[cfg_attr(feature = "inline-more", inline)]
3858    pub fn iter(&self) -> RawIter<T> {
3859        self.iter.clone()
3860    }
3861}
3862
3863unsafe impl<T, A: Allocator> Send for RawIntoIter<T, A>
3864where
3865    T: Send,
3866    A: Send,
3867{
3868}
3869unsafe impl<T, A: Allocator> Sync for RawIntoIter<T, A>
3870where
3871    T: Sync,
3872    A: Sync,
3873{
3874}
3875
3876#[cfg(feature = "nightly")]
3877unsafe impl<#[may_dangle] T, A: Allocator> Drop for RawIntoIter<T, A> {
3878    #[cfg_attr(feature = "inline-more", inline)]
3879    fn drop(&mut self) {
3880        unsafe {
3881            // Drop all remaining elements
3882            self.iter.drop_elements();
3883
3884            // Free the table
3885            if let Some((ptr, layout, ref alloc)) = self.allocation {
3886                alloc.deallocate(ptr, layout);
3887            }
3888        }
3889    }
3890}
3891#[cfg(not(feature = "nightly"))]
3892impl<T, A: Allocator> Drop for RawIntoIter<T, A> {
3893    #[cfg_attr(feature = "inline-more", inline)]
3894    fn drop(&mut self) {
3895        unsafe {
3896            // Drop all remaining elements
3897            self.iter.drop_elements();
3898
3899            // Free the table
3900            if let Some((ptr, layout, ref alloc)) = self.allocation {
3901                alloc.deallocate(ptr, layout);
3902            }
3903        }
3904    }
3905}
3906
3907impl<T, A: Allocator> Default for RawIntoIter<T, A> {
3908    fn default() -> Self {
3909        Self {
3910            iter: Default::default(),
3911            allocation: None,
3912            marker: PhantomData,
3913        }
3914    }
3915}
3916impl<T, A: Allocator> Iterator for RawIntoIter<T, A> {
3917    type Item = T;
3918
3919    #[cfg_attr(feature = "inline-more", inline)]
3920    fn next(&mut self) -> Option<T> {
3921        unsafe { Some(self.iter.next()?.read()) }
3922    }
3923
3924    #[inline]
3925    fn size_hint(&self) -> (usize, Option<usize>) {
3926        self.iter.size_hint()
3927    }
3928}
3929
3930impl<T, A: Allocator> ExactSizeIterator for RawIntoIter<T, A> {}
3931impl<T, A: Allocator> FusedIterator for RawIntoIter<T, A> {}
3932
3933/// Iterator which consumes elements without freeing the table storage.
3934pub struct RawDrain<'a, T, A: Allocator = Global> {
3935    iter: RawIter<T>,
3936
3937    // The table is moved into the iterator for the duration of the drain. This
3938    // ensures that an empty table is left if the drain iterator is leaked
3939    // without dropping.
3940    table: RawTableInner,
3941    orig_table: NonNull<RawTableInner>,
3942
3943    // We don't use a &'a mut RawTable<T> because we want RawDrain to be
3944    // covariant over T.
3945    marker: PhantomData<&'a RawTable<T, A>>,
3946}
3947
3948impl<T, A: Allocator> RawDrain<'_, T, A> {
3949    #[cfg_attr(feature = "inline-more", inline)]
3950    pub fn iter(&self) -> RawIter<T> {
3951        self.iter.clone()
3952    }
3953}
3954
3955unsafe impl<T, A: Allocator> Send for RawDrain<'_, T, A>
3956where
3957    T: Send,
3958    A: Send,
3959{
3960}
3961unsafe impl<T, A: Allocator> Sync for RawDrain<'_, T, A>
3962where
3963    T: Sync,
3964    A: Sync,
3965{
3966}
3967
3968impl<T, A: Allocator> Drop for RawDrain<'_, T, A> {
3969    #[cfg_attr(feature = "inline-more", inline)]
3970    fn drop(&mut self) {
3971        unsafe {
3972            // Drop all remaining elements. Note that this may panic.
3973            self.iter.drop_elements();
3974
3975            // Reset the contents of the table now that all elements have been
3976            // dropped.
3977            self.table.clear_no_drop();
3978
3979            // Move the now empty table back to its original location.
3980            self.orig_table
3981                .as_ptr()
3982                .copy_from_nonoverlapping(&self.table, 1);
3983        }
3984    }
3985}
3986
3987impl<T, A: Allocator> Iterator for RawDrain<'_, T, A> {
3988    type Item = T;
3989
3990    #[cfg_attr(feature = "inline-more", inline)]
3991    fn next(&mut self) -> Option<T> {
3992        unsafe {
3993            let item = self.iter.next()?;
3994            Some(item.read())
3995        }
3996    }
3997
3998    #[inline]
3999    fn size_hint(&self) -> (usize, Option<usize>) {
4000        self.iter.size_hint()
4001    }
4002}
4003
4004impl<T, A: Allocator> ExactSizeIterator for RawDrain<'_, T, A> {}
4005impl<T, A: Allocator> FusedIterator for RawDrain<'_, T, A> {}
4006
4007/// Iterator over occupied buckets that could match a given hash.
4008///
4009/// `RawTable` only stores 7 bits of the hash value, so this iterator may return
4010/// items that have a hash value different than the one provided. You should
4011/// always validate the returned values before using them.
4012///
4013/// For maximum flexibility this iterator is not bound by a lifetime, but you
4014/// must observe several rules when using it:
4015/// - You must not free the hash table while iterating (including via growing/shrinking).
4016/// - It is fine to erase a bucket that has been yielded by the iterator.
4017/// - Erasing a bucket that has not yet been yielded by the iterator may still
4018///   result in the iterator yielding that bucket.
4019/// - It is unspecified whether an element inserted after the iterator was
4020///   created will be yielded by that iterator.
4021/// - The order in which the iterator yields buckets is unspecified and may
4022///   change in the future.
4023pub struct RawIterHash<T> {
4024    inner: RawIterHashInner,
4025    _marker: PhantomData<T>,
4026}
4027
4028#[derive(Clone)]
4029struct RawIterHashInner {
4030    // See `RawTableInner`'s corresponding fields for details.
4031    // We can't store a `*const RawTableInner` as it would get
4032    // invalidated by the user calling `&mut` methods on `RawTable`.
4033    bucket_mask: usize,
4034    ctrl: NonNull<u8>,
4035
4036    // The top 7 bits of the hash.
4037    tag_hash: Tag,
4038
4039    // The sequence of groups to probe in the search.
4040    probe_seq: ProbeSeq,
4041
4042    group: Group,
4043
4044    // The elements within the group with a matching tag-hash.
4045    bitmask: BitMaskIter,
4046}
4047
4048impl<T> RawIterHash<T> {
4049    #[cfg_attr(feature = "inline-more", inline)]
4050    unsafe fn new<A: Allocator>(table: &RawTable<T, A>, hash: u64) -> Self {
4051        RawIterHash {
4052            inner: RawIterHashInner::new(&table.table, hash),
4053            _marker: PhantomData,
4054        }
4055    }
4056}
4057
4058impl<T> Clone for RawIterHash<T> {
4059    #[cfg_attr(feature = "inline-more", inline)]
4060    fn clone(&self) -> Self {
4061        Self {
4062            inner: self.inner.clone(),
4063            _marker: PhantomData,
4064        }
4065    }
4066}
4067
4068impl<T> Default for RawIterHash<T> {
4069    #[cfg_attr(feature = "inline-more", inline)]
4070    fn default() -> Self {
4071        Self {
4072            // SAFETY: Because the table is static, it always outlives the iter.
4073            inner: unsafe { RawIterHashInner::new(&RawTableInner::NEW, 0) },
4074            _marker: PhantomData,
4075        }
4076    }
4077}
4078
4079impl RawIterHashInner {
4080    #[cfg_attr(feature = "inline-more", inline)]
4081    unsafe fn new(table: &RawTableInner, hash: u64) -> Self {
4082        let tag_hash = Tag::full(hash);
4083        let probe_seq = table.probe_seq(hash);
4084        let group = Group::load(table.ctrl(probe_seq.pos));
4085        let bitmask = group.match_tag(tag_hash).into_iter();
4086
4087        RawIterHashInner {
4088            bucket_mask: table.bucket_mask,
4089            ctrl: table.ctrl,
4090            tag_hash,
4091            probe_seq,
4092            group,
4093            bitmask,
4094        }
4095    }
4096}
4097
4098impl<T> Iterator for RawIterHash<T> {
4099    type Item = Bucket<T>;
4100
4101    fn next(&mut self) -> Option<Bucket<T>> {
4102        unsafe {
4103            match self.inner.next() {
4104                Some(index) => {
4105                    // Can't use `RawTable::bucket` here as we don't have
4106                    // an actual `RawTable` reference to use.
4107                    debug_assert!(index <= self.inner.bucket_mask);
4108                    let bucket = Bucket::from_base_index(self.inner.ctrl.cast(), index);
4109                    Some(bucket)
4110                }
4111                None => None,
4112            }
4113        }
4114    }
4115}
4116
4117impl Iterator for RawIterHashInner {
4118    type Item = usize;
4119
4120    fn next(&mut self) -> Option<Self::Item> {
4121        unsafe {
4122            loop {
4123                if let Some(bit) = self.bitmask.next() {
4124                    let index = (self.probe_seq.pos + bit) & self.bucket_mask;
4125                    return Some(index);
4126                }
4127                if likely(self.group.match_empty().any_bit_set()) {
4128                    return None;
4129                }
4130                self.probe_seq.move_next(self.bucket_mask);
4131
4132                // Can't use `RawTableInner::ctrl` here as we don't have
4133                // an actual `RawTableInner` reference to use.
4134                let index = self.probe_seq.pos;
4135                debug_assert!(index < self.bucket_mask + 1 + Group::WIDTH);
4136                let group_ctrl = self.ctrl.as_ptr().add(index).cast();
4137
4138                self.group = Group::load(group_ctrl);
4139                self.bitmask = self.group.match_tag(self.tag_hash).into_iter();
4140            }
4141        }
4142    }
4143}
4144
4145pub(crate) struct RawExtractIf<'a, T, A: Allocator> {
4146    pub iter: RawIter<T>,
4147    pub table: &'a mut RawTable<T, A>,
4148}
4149
4150impl<T, A: Allocator> RawExtractIf<'_, T, A> {
4151    #[cfg_attr(feature = "inline-more", inline)]
4152    pub(crate) fn next<F>(&mut self, mut f: F) -> Option<T>
4153    where
4154        F: FnMut(&mut T) -> bool,
4155    {
4156        unsafe {
4157            for item in &mut self.iter {
4158                if f(item.as_mut()) {
4159                    return Some(self.table.remove(item).0);
4160                }
4161            }
4162        }
4163        None
4164    }
4165}
4166
4167#[cfg(test)]
4168mod test_map {
4169    use super::*;
4170
4171    #[test]
4172    fn test_minimum_capacity_for_small_types() {
4173        #[track_caller]
4174        fn test_t<T>() {
4175            let raw_table: RawTable<T> = RawTable::with_capacity(1);
4176            let actual_buckets = raw_table.buckets();
4177            let min_buckets = Group::WIDTH / core::mem::size_of::<T>();
4178            assert!(
4179                actual_buckets >= min_buckets,
4180                "expected at least {min_buckets} buckets, got {actual_buckets} buckets"
4181            );
4182        }
4183
4184        test_t::<u8>();
4185
4186        // This is only "small" for some platforms, like x86_64 with SSE2, but
4187        // there's no harm in running it on other platforms.
4188        test_t::<u16>();
4189    }
4190
4191    fn rehash_in_place<T>(table: &mut RawTable<T>, hasher: impl Fn(&T) -> u64) {
4192        unsafe {
4193            table.table.rehash_in_place(
4194                &|table, index| hasher(table.bucket::<T>(index).as_ref()),
4195                mem::size_of::<T>(),
4196                if mem::needs_drop::<T>() {
4197                    Some(|ptr| ptr::drop_in_place(ptr as *mut T))
4198                } else {
4199                    None
4200                },
4201            );
4202        }
4203    }
4204
4205    #[test]
4206    fn rehash() {
4207        let mut table = RawTable::new();
4208        let hasher = |i: &u64| *i;
4209        for i in 0..100 {
4210            table.insert(i, i, hasher);
4211        }
4212
4213        for i in 0..100 {
4214            unsafe {
4215                assert_eq!(table.find(i, |x| *x == i).map(|b| b.read()), Some(i));
4216            }
4217            assert!(table.find(i + 100, |x| *x == i + 100).is_none());
4218        }
4219
4220        rehash_in_place(&mut table, hasher);
4221
4222        for i in 0..100 {
4223            unsafe {
4224                assert_eq!(table.find(i, |x| *x == i).map(|b| b.read()), Some(i));
4225            }
4226            assert!(table.find(i + 100, |x| *x == i + 100).is_none());
4227        }
4228    }
4229
4230    /// CHECKING THAT WE ARE NOT TRYING TO READ THE MEMORY OF
4231    /// AN UNINITIALIZED TABLE DURING THE DROP
4232    #[test]
4233    fn test_drop_uninitialized() {
4234        use ::alloc::vec::Vec;
4235
4236        let table = unsafe {
4237            // SAFETY: The `buckets` is power of two and we're not
4238            // trying to actually use the returned RawTable.
4239            RawTable::<(u64, Vec<i32>)>::new_uninitialized(Global, 8, Fallibility::Infallible)
4240                .unwrap()
4241        };
4242        drop(table);
4243    }
4244
4245    /// CHECKING THAT WE DON'T TRY TO DROP DATA IF THE `ITEMS`
4246    /// ARE ZERO, EVEN IF WE HAVE `FULL` CONTROL BYTES.
4247    #[test]
4248    fn test_drop_zero_items() {
4249        use ::alloc::vec::Vec;
4250        unsafe {
4251            // SAFETY: The `buckets` is power of two and we're not
4252            // trying to actually use the returned RawTable.
4253            let mut table =
4254                RawTable::<(u64, Vec<i32>)>::new_uninitialized(Global, 8, Fallibility::Infallible)
4255                    .unwrap();
4256
4257            // WE SIMULATE, AS IT WERE, A FULL TABLE.
4258
4259            // SAFETY: We checked that the table is allocated and therefore the table already has
4260            // `self.bucket_mask + 1 + Group::WIDTH` number of control bytes (see TableLayout::calculate_layout_for)
4261            // so writing `table.table.num_ctrl_bytes() == bucket_mask + 1 + Group::WIDTH` bytes is safe.
4262            table.table.ctrl_slice().fill_empty();
4263
4264            // SAFETY: table.capacity() is guaranteed to be smaller than table.buckets()
4265            table.table.ctrl(0).write_bytes(0, table.capacity());
4266
4267            // Fix up the trailing control bytes. See the comments in set_ctrl
4268            // for the handling of tables smaller than the group width.
4269            if table.buckets() < Group::WIDTH {
4270                // SAFETY: We have `self.bucket_mask + 1 + Group::WIDTH` number of control bytes,
4271                // so copying `self.buckets() == self.bucket_mask + 1` bytes with offset equal to
4272                // `Group::WIDTH` is safe
4273                table
4274                    .table
4275                    .ctrl(0)
4276                    .copy_to(table.table.ctrl(Group::WIDTH), table.table.buckets());
4277            } else {
4278                // SAFETY: We have `self.bucket_mask + 1 + Group::WIDTH` number of
4279                // control bytes,so copying `Group::WIDTH` bytes with offset equal
4280                // to `self.buckets() == self.bucket_mask + 1` is safe
4281                table
4282                    .table
4283                    .ctrl(0)
4284                    .copy_to(table.table.ctrl(table.table.buckets()), Group::WIDTH);
4285            }
4286            drop(table);
4287        }
4288    }
4289
4290    /// CHECKING THAT WE DON'T TRY TO DROP DATA IF THE `ITEMS`
4291    /// ARE ZERO, EVEN IF WE HAVE `FULL` CONTROL BYTES.
4292    #[test]
4293    fn test_catch_panic_clone_from() {
4294        use super::{AllocError, Allocator, Global};
4295        use ::alloc::sync::Arc;
4296        use ::alloc::vec::Vec;
4297        use core::sync::atomic::{AtomicI8, Ordering};
4298        use std::thread;
4299
4300        struct MyAllocInner {
4301            drop_count: Arc<AtomicI8>,
4302        }
4303
4304        #[derive(Clone)]
4305        struct MyAlloc {
4306            _inner: Arc<MyAllocInner>,
4307        }
4308
4309        impl Drop for MyAllocInner {
4310            fn drop(&mut self) {
4311                println!("MyAlloc freed.");
4312                self.drop_count.fetch_sub(1, Ordering::SeqCst);
4313            }
4314        }
4315
4316        unsafe impl Allocator for MyAlloc {
4317            fn allocate(&self, layout: Layout) -> std::result::Result<NonNull<[u8]>, AllocError> {
4318                let g = Global;
4319                g.allocate(layout)
4320            }
4321
4322            unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
4323                let g = Global;
4324                g.deallocate(ptr, layout)
4325            }
4326        }
4327
4328        const DISARMED: bool = false;
4329        const ARMED: bool = true;
4330
4331        struct CheckedCloneDrop {
4332            panic_in_clone: bool,
4333            dropped: bool,
4334            need_drop: Vec<u64>,
4335        }
4336
4337        impl Clone for CheckedCloneDrop {
4338            fn clone(&self) -> Self {
4339                if self.panic_in_clone {
4340                    panic!("panic in clone")
4341                }
4342                Self {
4343                    panic_in_clone: self.panic_in_clone,
4344                    dropped: self.dropped,
4345                    need_drop: self.need_drop.clone(),
4346                }
4347            }
4348        }
4349
4350        impl Drop for CheckedCloneDrop {
4351            fn drop(&mut self) {
4352                if self.dropped {
4353                    panic!("double drop");
4354                }
4355                self.dropped = true;
4356            }
4357        }
4358
4359        let dropped: Arc<AtomicI8> = Arc::new(AtomicI8::new(2));
4360
4361        let mut table = RawTable::new_in(MyAlloc {
4362            _inner: Arc::new(MyAllocInner {
4363                drop_count: dropped.clone(),
4364            }),
4365        });
4366
4367        for (idx, panic_in_clone) in core::iter::repeat(DISARMED).take(7).enumerate() {
4368            let idx = idx as u64;
4369            table.insert(
4370                idx,
4371                (
4372                    idx,
4373                    CheckedCloneDrop {
4374                        panic_in_clone,
4375                        dropped: false,
4376                        need_drop: vec![idx],
4377                    },
4378                ),
4379                |(k, _)| *k,
4380            );
4381        }
4382
4383        assert_eq!(table.len(), 7);
4384
4385        thread::scope(|s| {
4386            let result = s.spawn(|| {
4387                let armed_flags = [
4388                    DISARMED, DISARMED, ARMED, DISARMED, DISARMED, DISARMED, DISARMED,
4389                ];
4390                let mut scope_table = RawTable::new_in(MyAlloc {
4391                    _inner: Arc::new(MyAllocInner {
4392                        drop_count: dropped.clone(),
4393                    }),
4394                });
4395                for (idx, &panic_in_clone) in armed_flags.iter().enumerate() {
4396                    let idx = idx as u64;
4397                    scope_table.insert(
4398                        idx,
4399                        (
4400                            idx,
4401                            CheckedCloneDrop {
4402                                panic_in_clone,
4403                                dropped: false,
4404                                need_drop: vec![idx + 100],
4405                            },
4406                        ),
4407                        |(k, _)| *k,
4408                    );
4409                }
4410                table.clone_from(&scope_table);
4411            });
4412            assert!(result.join().is_err());
4413        });
4414
4415        // Let's check that all iterators work fine and do not return elements
4416        // (especially `RawIterRange`, which does not depend on the number of
4417        // elements in the table, but looks directly at the control bytes)
4418        //
4419        // SAFETY: We know for sure that `RawTable` will outlive
4420        // the returned `RawIter / RawIterRange` iterator.
4421        assert_eq!(table.len(), 0);
4422        assert_eq!(unsafe { table.iter().count() }, 0);
4423        assert_eq!(unsafe { table.iter().iter.count() }, 0);
4424
4425        for idx in 0..table.buckets() {
4426            let idx = idx as u64;
4427            assert!(
4428                table.find(idx, |(k, _)| *k == idx).is_none(),
4429                "Index: {idx}"
4430            );
4431        }
4432
4433        // All allocator clones should already be dropped.
4434        assert_eq!(dropped.load(Ordering::SeqCst), 1);
4435    }
4436}