1mod entry;
11
12pub mod raw_entry_v1;
13
14use hashbrown::hash_table;
15
16use crate::vec::{self, Vec};
17use crate::TryReserveError;
18use core::mem;
19use core::ops::RangeBounds;
20
21use crate::util::simplify_range;
22use crate::{Bucket, Equivalent, HashValue};
23
24type Indices = hash_table::HashTable<usize>;
25type Entries<K, V> = Vec<Bucket<K, V>>;
26
27pub use entry::{Entry, IndexedEntry, OccupiedEntry, VacantEntry};
28
29#[derive(Debug)]
31pub(crate) struct IndexMapCore<K, V> {
32 indices: Indices,
34 entries: Entries<K, V>,
36}
37
38struct RefMut<'a, K, V> {
45 indices: &'a mut Indices,
46 entries: &'a mut Entries<K, V>,
47}
48
49#[inline(always)]
50fn get_hash<K, V>(entries: &[Bucket<K, V>]) -> impl Fn(&usize) -> u64 + '_ {
51 move |&i| entries[i].hash.get()
52}
53
54#[inline]
55fn equivalent<'a, K, V, Q: ?Sized + Equivalent<K>>(
56 key: &'a Q,
57 entries: &'a [Bucket<K, V>],
58) -> impl Fn(&usize) -> bool + 'a {
59 move |&i| Q::equivalent(key, &entries[i].key)
60}
61
62#[inline]
63fn erase_index(table: &mut Indices, hash: HashValue, index: usize) {
64 if let Ok(entry) = table.find_entry(hash.get(), move |&i| i == index) {
65 entry.remove();
66 } else if cfg!(debug_assertions) {
67 panic!("index not found");
68 }
69}
70
71#[inline]
72fn update_index(table: &mut Indices, hash: HashValue, old: usize, new: usize) {
73 let index = table
74 .find_mut(hash.get(), move |&i| i == old)
75 .expect("index not found");
76 *index = new;
77}
78
79fn insert_bulk_no_grow<K, V>(indices: &mut Indices, entries: &[Bucket<K, V>]) {
84 assert!(indices.capacity() - indices.len() >= entries.len());
85 for entry in entries {
86 indices.insert_unique(entry.hash.get(), indices.len(), |_| unreachable!());
87 }
88}
89
90impl<K, V> Clone for IndexMapCore<K, V>
91where
92 K: Clone,
93 V: Clone,
94{
95 fn clone(&self) -> Self {
96 let mut new = Self::new();
97 new.clone_from(self);
98 new
99 }
100
101 fn clone_from(&mut self, other: &Self) {
102 self.indices.clone_from(&other.indices);
103 if self.entries.capacity() < other.entries.len() {
104 let additional = other.entries.len() - self.entries.len();
106 self.borrow_mut().reserve_entries(additional);
107 }
108 self.entries.clone_from(&other.entries);
109 }
110}
111
112impl<K, V> crate::Entries for IndexMapCore<K, V> {
113 type Entry = Bucket<K, V>;
114
115 #[inline]
116 fn into_entries(self) -> Vec<Self::Entry> {
117 self.entries
118 }
119
120 #[inline]
121 fn as_entries(&self) -> &[Self::Entry] {
122 &self.entries
123 }
124
125 #[inline]
126 fn as_entries_mut(&mut self) -> &mut [Self::Entry] {
127 &mut self.entries
128 }
129
130 fn with_entries<F>(&mut self, f: F)
131 where
132 F: FnOnce(&mut [Self::Entry]),
133 {
134 f(&mut self.entries);
135 self.rebuild_hash_table();
136 }
137}
138
139impl<K, V> IndexMapCore<K, V> {
140 const MAX_ENTRIES_CAPACITY: usize = (isize::MAX as usize) / mem::size_of::<Bucket<K, V>>();
142
143 #[inline]
144 pub(crate) const fn new() -> Self {
145 IndexMapCore {
146 indices: Indices::new(),
147 entries: Vec::new(),
148 }
149 }
150
151 #[inline]
152 fn borrow_mut(&mut self) -> RefMut<'_, K, V> {
153 RefMut::new(&mut self.indices, &mut self.entries)
154 }
155
156 #[inline]
157 pub(crate) fn with_capacity(n: usize) -> Self {
158 IndexMapCore {
159 indices: Indices::with_capacity(n),
160 entries: Vec::with_capacity(n),
161 }
162 }
163
164 #[inline]
165 pub(crate) fn len(&self) -> usize {
166 self.indices.len()
167 }
168
169 #[inline]
170 pub(crate) fn capacity(&self) -> usize {
171 Ord::min(self.indices.capacity(), self.entries.capacity())
172 }
173
174 pub(crate) fn clear(&mut self) {
175 self.indices.clear();
176 self.entries.clear();
177 }
178
179 pub(crate) fn truncate(&mut self, len: usize) {
180 if len < self.len() {
181 self.erase_indices(len, self.entries.len());
182 self.entries.truncate(len);
183 }
184 }
185
186 pub(crate) fn drain<R>(&mut self, range: R) -> vec::Drain<'_, Bucket<K, V>>
187 where
188 R: RangeBounds<usize>,
189 {
190 let range = simplify_range(range, self.entries.len());
191 self.erase_indices(range.start, range.end);
192 self.entries.drain(range)
193 }
194
195 #[cfg(feature = "rayon")]
196 pub(crate) fn par_drain<R>(&mut self, range: R) -> rayon::vec::Drain<'_, Bucket<K, V>>
197 where
198 K: Send,
199 V: Send,
200 R: RangeBounds<usize>,
201 {
202 use rayon::iter::ParallelDrainRange;
203 let range = simplify_range(range, self.entries.len());
204 self.erase_indices(range.start, range.end);
205 self.entries.par_drain(range)
206 }
207
208 pub(crate) fn split_off(&mut self, at: usize) -> Self {
209 assert!(at <= self.entries.len());
210 self.erase_indices(at, self.entries.len());
211 let entries = self.entries.split_off(at);
212
213 let mut indices = Indices::with_capacity(entries.len());
214 insert_bulk_no_grow(&mut indices, &entries);
215 Self { indices, entries }
216 }
217
218 pub(crate) fn split_splice<R>(&mut self, range: R) -> (Self, vec::IntoIter<Bucket<K, V>>)
219 where
220 R: RangeBounds<usize>,
221 {
222 let range = simplify_range(range, self.len());
223 self.erase_indices(range.start, self.entries.len());
224 let entries = self.entries.split_off(range.end);
225 let drained = self.entries.split_off(range.start);
226
227 let mut indices = Indices::with_capacity(entries.len());
228 insert_bulk_no_grow(&mut indices, &entries);
229 (Self { indices, entries }, drained.into_iter())
230 }
231
232 pub(crate) fn append_unchecked(&mut self, other: &mut Self) {
234 self.reserve(other.len());
235 insert_bulk_no_grow(&mut self.indices, &other.entries);
236 self.entries.append(&mut other.entries);
237 other.indices.clear();
238 }
239
240 pub(crate) fn reserve(&mut self, additional: usize) {
242 self.indices.reserve(additional, get_hash(&self.entries));
243 if additional > self.entries.capacity() - self.entries.len() {
245 self.borrow_mut().reserve_entries(additional);
246 }
247 }
248
249 pub(crate) fn reserve_exact(&mut self, additional: usize) {
251 self.indices.reserve(additional, get_hash(&self.entries));
252 self.entries.reserve_exact(additional);
253 }
254
255 pub(crate) fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> {
257 self.indices
258 .try_reserve(additional, get_hash(&self.entries))
259 .map_err(TryReserveError::from_hashbrown)?;
260 if additional > self.entries.capacity() - self.entries.len() {
262 self.try_reserve_entries(additional)
263 } else {
264 Ok(())
265 }
266 }
267
268 fn try_reserve_entries(&mut self, additional: usize) -> Result<(), TryReserveError> {
270 let new_capacity = Ord::min(self.indices.capacity(), Self::MAX_ENTRIES_CAPACITY);
273 let try_add = new_capacity - self.entries.len();
274 if try_add > additional && self.entries.try_reserve_exact(try_add).is_ok() {
275 return Ok(());
276 }
277 self.entries
278 .try_reserve_exact(additional)
279 .map_err(TryReserveError::from_alloc)
280 }
281
282 pub(crate) fn try_reserve_exact(&mut self, additional: usize) -> Result<(), TryReserveError> {
284 self.indices
285 .try_reserve(additional, get_hash(&self.entries))
286 .map_err(TryReserveError::from_hashbrown)?;
287 self.entries
288 .try_reserve_exact(additional)
289 .map_err(TryReserveError::from_alloc)
290 }
291
292 pub(crate) fn shrink_to(&mut self, min_capacity: usize) {
294 self.indices
295 .shrink_to(min_capacity, get_hash(&self.entries));
296 self.entries.shrink_to(min_capacity);
297 }
298
299 pub(crate) fn pop(&mut self) -> Option<(K, V)> {
301 if let Some(entry) = self.entries.pop() {
302 let last = self.entries.len();
303 erase_index(&mut self.indices, entry.hash, last);
304 Some((entry.key, entry.value))
305 } else {
306 None
307 }
308 }
309
310 pub(crate) fn get_index_of<Q>(&self, hash: HashValue, key: &Q) -> Option<usize>
312 where
313 Q: ?Sized + Equivalent<K>,
314 {
315 let eq = equivalent(key, &self.entries);
316 self.indices.find(hash.get(), eq).copied()
317 }
318
319 fn push_entry(&mut self, hash: HashValue, key: K, value: V) {
322 if self.entries.len() == self.entries.capacity() {
323 self.borrow_mut().reserve_entries(1);
326 }
327 self.entries.push(Bucket { hash, key, value });
328 }
329
330 pub(crate) fn insert_full(&mut self, hash: HashValue, key: K, value: V) -> (usize, Option<V>)
331 where
332 K: Eq,
333 {
334 let eq = equivalent(&key, &self.entries);
335 let hasher = get_hash(&self.entries);
336 match self.indices.entry(hash.get(), eq, hasher) {
337 hash_table::Entry::Occupied(entry) => {
338 let i = *entry.get();
339 (i, Some(mem::replace(&mut self.entries[i].value, value)))
340 }
341 hash_table::Entry::Vacant(entry) => {
342 let i = self.entries.len();
343 entry.insert(i);
344 self.push_entry(hash, key, value);
345 debug_assert_eq!(self.indices.len(), self.entries.len());
346 (i, None)
347 }
348 }
349 }
350
351 pub(crate) fn replace_full(
353 &mut self,
354 hash: HashValue,
355 key: K,
356 value: V,
357 ) -> (usize, Option<(K, V)>)
358 where
359 K: Eq,
360 {
361 let eq = equivalent(&key, &self.entries);
362 let hasher = get_hash(&self.entries);
363 match self.indices.entry(hash.get(), eq, hasher) {
364 hash_table::Entry::Occupied(entry) => {
365 let i = *entry.get();
366 let entry = &mut self.entries[i];
367 let kv = (
368 mem::replace(&mut entry.key, key),
369 mem::replace(&mut entry.value, value),
370 );
371 (i, Some(kv))
372 }
373 hash_table::Entry::Vacant(entry) => {
374 let i = self.entries.len();
375 entry.insert(i);
376 self.push_entry(hash, key, value);
377 debug_assert_eq!(self.indices.len(), self.entries.len());
378 (i, None)
379 }
380 }
381 }
382
383 pub(crate) fn shift_remove_full<Q>(&mut self, hash: HashValue, key: &Q) -> Option<(usize, K, V)>
385 where
386 Q: ?Sized + Equivalent<K>,
387 {
388 let eq = equivalent(key, &self.entries);
389 match self.indices.find_entry(hash.get(), eq) {
390 Ok(entry) => {
391 let (index, _) = entry.remove();
392 let (key, value) = self.borrow_mut().shift_remove_finish(index);
393 Some((index, key, value))
394 }
395 Err(_) => None,
396 }
397 }
398
399 #[inline]
401 pub(crate) fn shift_remove_index(&mut self, index: usize) -> Option<(K, V)> {
402 self.borrow_mut().shift_remove_index(index)
403 }
404
405 #[inline]
406 pub(super) fn move_index(&mut self, from: usize, to: usize) {
407 self.borrow_mut().move_index(from, to);
408 }
409
410 #[inline]
411 pub(crate) fn swap_indices(&mut self, a: usize, b: usize) {
412 self.borrow_mut().swap_indices(a, b);
413 }
414
415 pub(crate) fn swap_remove_full<Q>(&mut self, hash: HashValue, key: &Q) -> Option<(usize, K, V)>
417 where
418 Q: ?Sized + Equivalent<K>,
419 {
420 let eq = equivalent(key, &self.entries);
421 match self.indices.find_entry(hash.get(), eq) {
422 Ok(entry) => {
423 let (index, _) = entry.remove();
424 let (key, value) = self.borrow_mut().swap_remove_finish(index);
425 Some((index, key, value))
426 }
427 Err(_) => None,
428 }
429 }
430
431 #[inline]
433 pub(crate) fn swap_remove_index(&mut self, index: usize) -> Option<(K, V)> {
434 self.borrow_mut().swap_remove_index(index)
435 }
436
437 fn erase_indices(&mut self, start: usize, end: usize) {
442 let (init, shifted_entries) = self.entries.split_at(end);
443 let (start_entries, erased_entries) = init.split_at(start);
444
445 let erased = erased_entries.len();
446 let shifted = shifted_entries.len();
447 let half_capacity = self.indices.capacity() / 2;
448
449 if erased == 0 {
451 } else if start + shifted < half_capacity && start < erased {
453 self.indices.clear();
455
456 insert_bulk_no_grow(&mut self.indices, start_entries);
458 insert_bulk_no_grow(&mut self.indices, shifted_entries);
459 } else if erased + shifted < half_capacity {
460 for (i, entry) in (start..).zip(erased_entries) {
464 erase_index(&mut self.indices, entry.hash, i);
465 }
466
467 for ((new, old), entry) in (start..).zip(end..).zip(shifted_entries) {
469 update_index(&mut self.indices, entry.hash, old, new);
470 }
471 } else {
472 let offset = end - start;
474 self.indices.retain(move |i| {
475 if *i >= end {
476 *i -= offset;
477 true
478 } else {
479 *i < start
480 }
481 });
482 }
483
484 debug_assert_eq!(self.indices.len(), start + shifted);
485 }
486
487 pub(crate) fn retain_in_order<F>(&mut self, mut keep: F)
488 where
489 F: FnMut(&mut K, &mut V) -> bool,
490 {
491 self.entries
492 .retain_mut(|entry| keep(&mut entry.key, &mut entry.value));
493 if self.entries.len() < self.indices.len() {
494 self.rebuild_hash_table();
495 }
496 }
497
498 fn rebuild_hash_table(&mut self) {
499 self.indices.clear();
500 insert_bulk_no_grow(&mut self.indices, &self.entries);
501 }
502
503 pub(crate) fn reverse(&mut self) {
504 self.entries.reverse();
505
506 let len = self.entries.len();
509 for i in &mut self.indices {
510 *i = len - *i - 1;
511 }
512 }
513}
514
515impl<'a, K, V> RefMut<'a, K, V> {
516 #[inline]
517 fn new(indices: &'a mut Indices, entries: &'a mut Entries<K, V>) -> Self {
518 Self { indices, entries }
519 }
520
521 fn reserve_entries(&mut self, additional: usize) {
523 let new_capacity = Ord::min(
526 self.indices.capacity(),
527 IndexMapCore::<K, V>::MAX_ENTRIES_CAPACITY,
528 );
529 let try_add = new_capacity - self.entries.len();
530 if try_add > additional && self.entries.try_reserve_exact(try_add).is_ok() {
531 return;
532 }
533 self.entries.reserve_exact(additional);
534 }
535
536 fn insert_unique(mut self, hash: HashValue, key: K, value: V) -> OccupiedEntry<'a, K, V> {
539 if self.entries.len() == self.entries.capacity() {
540 self.reserve_entries(1);
543 }
544 let i = self.indices.len();
545 let entry = self
546 .indices
547 .insert_unique(hash.get(), i, get_hash(self.entries));
548 debug_assert_eq!(i, self.entries.len());
549 self.entries.push(Bucket { hash, key, value });
550 OccupiedEntry::new(self.entries, entry)
551 }
552
553 fn shift_insert_unique(&mut self, index: usize, hash: HashValue, key: K, value: V) {
556 let end = self.indices.len();
557 assert!(index <= end);
558 self.increment_indices(index, end);
560 let entries = &*self.entries;
561 self.indices.insert_unique(hash.get(), index, move |&i| {
562 debug_assert_ne!(i, index);
564 let i = if i < index { i } else { i - 1 };
565 entries[i].hash.get()
566 });
567 if self.entries.len() == self.entries.capacity() {
568 self.reserve_entries(1);
571 }
572 self.entries.insert(index, Bucket { hash, key, value });
573 }
574
575 fn shift_remove_index(&mut self, index: usize) -> Option<(K, V)> {
577 match self.entries.get(index) {
578 Some(entry) => {
579 erase_index(self.indices, entry.hash, index);
580 Some(self.shift_remove_finish(index))
581 }
582 None => None,
583 }
584 }
585
586 fn shift_remove_finish(&mut self, index: usize) -> (K, V) {
590 self.decrement_indices(index + 1, self.entries.len());
592
593 let entry = self.entries.remove(index);
595 (entry.key, entry.value)
596 }
597
598 fn swap_remove_index(&mut self, index: usize) -> Option<(K, V)> {
600 match self.entries.get(index) {
601 Some(entry) => {
602 erase_index(self.indices, entry.hash, index);
603 Some(self.swap_remove_finish(index))
604 }
605 None => None,
606 }
607 }
608
609 fn swap_remove_finish(&mut self, index: usize) -> (K, V) {
613 let entry = self.entries.swap_remove(index);
616
617 if let Some(entry) = self.entries.get(index) {
619 let last = self.entries.len();
622 update_index(self.indices, entry.hash, last, index);
623 }
624
625 (entry.key, entry.value)
626 }
627
628 fn decrement_indices(&mut self, start: usize, end: usize) {
633 let shifted_entries = &self.entries[start..end];
635 if shifted_entries.len() > self.indices.capacity() / 2 {
636 for i in &mut *self.indices {
638 if start <= *i && *i < end {
639 *i -= 1;
640 }
641 }
642 } else {
643 for (i, entry) in (start..end).zip(shifted_entries) {
645 update_index(self.indices, entry.hash, i, i - 1);
646 }
647 }
648 }
649
650 fn increment_indices(&mut self, start: usize, end: usize) {
655 let shifted_entries = &self.entries[start..end];
657 if shifted_entries.len() > self.indices.capacity() / 2 {
658 for i in &mut *self.indices {
660 if start <= *i && *i < end {
661 *i += 1;
662 }
663 }
664 } else {
665 for (i, entry) in (start..end).zip(shifted_entries).rev() {
668 update_index(self.indices, entry.hash, i, i + 1);
669 }
670 }
671 }
672
673 fn move_index(&mut self, from: usize, to: usize) {
674 let from_hash = self.entries[from].hash;
675 let _ = self.entries[to]; if from != to {
677 update_index(self.indices, from_hash, from, usize::MAX);
679
680 if from < to {
682 self.decrement_indices(from + 1, to + 1);
683 self.entries[from..=to].rotate_left(1);
684 } else if to < from {
685 self.increment_indices(to, from);
686 self.entries[to..=from].rotate_right(1);
687 }
688
689 update_index(self.indices, from_hash, usize::MAX, to);
691 }
692 }
693
694 fn swap_indices(&mut self, a: usize, b: usize) {
695 if a == b && a < self.entries.len() {
697 return;
698 }
699
700 match self.indices.get_many_mut(
703 [self.entries[a].hash.get(), self.entries[b].hash.get()],
704 move |i, &x| if i == 0 { x == a } else { x == b },
705 ) {
706 [Some(ref_a), Some(ref_b)] => {
707 mem::swap(ref_a, ref_b);
708 self.entries.swap(a, b);
709 }
710 _ => panic!("indices not found"),
711 }
712 }
713}
714
715#[test]
716fn assert_send_sync() {
717 fn assert_send_sync<T: Send + Sync>() {}
718 assert_send_sync::<IndexMapCore<i32, i32>>();
719 assert_send_sync::<Entry<'_, i32, i32>>();
720 assert_send_sync::<IndexedEntry<'_, i32, i32>>();
721 assert_send_sync::<raw_entry_v1::RawEntryMut<'_, i32, i32, ()>>();
722}