priority_queue/
store.rs

1/*
2 *  Copyright 2017 Gianmarco Garrisi
3 *
4 *
5 *  This program is free software: you can redistribute it and/or modify
6 *  it under the terms of the GNU Lesser General Public License as published by
7 *  the Free Software Foundation, either version 3 of the License, or
8 *  (at your option) any later version, or (at your option) under the terms
9 *  of the Mozilla Public License version 2.0.
10 *
11 *  ----
12 *
13 *  This program is distributed in the hope that it will be useful,
14 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16 *  GNU Lesser General Public License for more details.
17 *
18 *  You should have received a copy of the GNU Lesser General Public License
19 *  along with this program.  If not, see <http://www.gnu.org/licenses/>.
20 *
21 *  ----
22 *
23 *  This Source Code Form is subject to the terms of the Mozilla Public License,
24 *  v. 2.0. If a copy of the MPL was not distributed with this file, You can
25 *  obtain one at http://mozilla.org/MPL/2.0/.
26 *
27 */
28#[cfg(not(feature = "std"))]
29use std::vec::Vec;
30
31// an improvement in terms of complexity would be to use a bare HashMap
32// as vec instead of the IndexMap
33use crate::core_iterators::*;
34use crate::TryReserveError;
35
36use std::borrow::Borrow;
37use std::cmp::{Eq, Ord};
38#[cfg(feature = "std")]
39use std::collections::hash_map::RandomState;
40use std::hash::{BuildHasher, Hash};
41use std::iter::{FromIterator, IntoIterator, Iterator};
42use std::mem::swap;
43
44use indexmap::map::{IndexMap, MutableKeys};
45
46/// The Index of the element in the Map
47#[derive(Copy, Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
48pub(crate) struct Index(pub usize);
49/// The Position of the element in the Heap
50#[derive(Copy, Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
51pub(crate) struct Position(pub usize);
52
53/// Internal storage of PriorityQueue and DoublePriorityQueue
54#[derive(Clone)]
55#[cfg(feature = "std")]
56pub(crate) struct Store<I, P, H = RandomState> {
57    pub map: IndexMap<I, P, H>, // Stores the items and assign them an index
58    pub heap: Vec<Index>,       // Implements the heap of indexes
59    pub qp: Vec<Position>,      // Performs the translation from the index
60    // of the map to the index of the heap
61    pub size: usize, // The size of the heap
62}
63
64#[derive(Clone)]
65#[cfg(not(feature = "std"))]
66pub(crate) struct Store<I, P, H> {
67    pub map: IndexMap<I, P, H>, // Stores the items and assign them an index
68    pub heap: Vec<Index>,       // Implements the heap of indexes
69    pub qp: Vec<Position>,      // Performs the translation from the index
70    // of the map to the index of the heap
71    pub size: usize, // The size of the heap
72}
73
74// do not [derive(Eq)] to loosen up trait requirements for other types and impls
75impl<I, P, H> Eq for Store<I, P, H>
76where
77    I: Hash + Eq,
78    P: Ord,
79    H: BuildHasher,
80{
81}
82
83impl<I, P, H> Default for Store<I, P, H>
84where
85    I: Hash + Eq,
86    P: Ord,
87    H: BuildHasher + Default,
88{
89    fn default() -> Self {
90        Self::with_default_hasher()
91    }
92}
93
94impl<I, P, H> Store<I, P, H>
95where
96    P: Ord,
97    I: Hash + Eq,
98    H: BuildHasher + Default,
99{
100    /// Creates an empty `Store` with the default hasher
101    pub fn with_default_hasher() -> Self {
102        Self::with_capacity_and_default_hasher(0)
103    }
104
105    /// Creates an empty `Store` with the specified capacity and default hasher
106    pub fn with_capacity_and_default_hasher(capacity: usize) -> Self {
107        Self::with_capacity_and_hasher(capacity, H::default())
108    }
109}
110
111impl<I, P, H> Store<I, P, H>
112where
113    P: Ord,
114    I: Hash + Eq,
115    H: BuildHasher,
116{
117    /// Creates an empty `Store` with the specified hasher
118    pub fn with_hasher(hash_builder: H) -> Self {
119        Self::with_capacity_and_hasher(0, hash_builder)
120    }
121
122    /// Creates an empty `Store` with the specified capacity and hasher
123    ///
124    /// The internal collections will be able to hold at least `capacity`
125    /// elements without reallocating.
126    /// If `capacity` is 0, there will be no allocation.
127    pub fn with_capacity_and_hasher(capacity: usize, hash_builder: H) -> Self {
128        Self {
129            map: IndexMap::with_capacity_and_hasher(capacity, hash_builder),
130            heap: Vec::with_capacity(capacity),
131            qp: Vec::with_capacity(capacity),
132            size: 0,
133        }
134    }
135}
136
137impl<I, P, H> Store<I, P, H> {
138    /// Returns an iterator in arbitrary order over the
139    /// (item, priority) elements in the queue
140    pub fn iter(&self) -> Iter<I, P> {
141        Iter {
142            iter: self.map.iter(),
143        }
144    }
145
146    /// Shrinks the capacity of the internal data structures
147    /// that support this operation as much as possible.
148    #[inline(always)]
149    pub fn shrink_to_fit(&mut self) {
150        self.heap.shrink_to_fit();
151        self.qp.shrink_to_fit();
152        self.map.shrink_to_fit();
153    }
154
155    /// Reserves capacity for at least `additional` more elements to be inserted
156    /// in the given `PriorityQueue`. The collection may reserve more space to avoid
157    /// frequent reallocations. After calling `reserve`, capacity will be
158    /// greater than or equal to `self.len() + additional`. Does nothing if
159    /// capacity is already sufficient.
160    ///
161    /// # Panics
162    ///
163    /// Panics if the new capacity overflows `usize`.
164    ///
165    /// Computes in O(n) time.
166    pub fn reserve(&mut self, additional: usize) {
167        self.map.reserve(additional);
168        self.heap.reserve(additional);
169        self.qp.reserve(additional);
170    }
171
172    /// Reserve capacity for `additional` more elements, without over-allocating.
173    ///
174    /// Unlike `reserve`, this does not deliberately over-allocate the entry capacity to avoid
175    /// frequent re-allocations. However, the underlying data structures may still have internal
176    /// capacity requirements, and the allocator itself may give more space than requested, so this
177    /// cannot be relied upon to be precisely minimal.
178    ///
179    /// Computes in **O(n)** time.
180    pub fn reserve_exact(&mut self, additional: usize) {
181        self.map.reserve_exact(additional);
182        self.heap.reserve_exact(additional);
183        self.qp.reserve_exact(additional);
184    }
185
186    /// Try to reserve capacity for at least `additional` more elements.
187    ///
188    /// Computes in O(n) time.
189    pub fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> {
190        self.map.try_reserve(additional)?;
191        self.heap.try_reserve(additional)?;
192        self.qp.try_reserve(additional)?;
193        Ok(())
194    }
195
196    /// Try to reserve capacity for `additional` more elements, without over-allocating.
197    ///
198    /// Unlike `reserve`, this does not deliberately over-allocate the entry capacity to avoid
199    /// frequent re-allocations. However, the underlying data structures may still have internal
200    /// capacity requirements, and the allocator itself may give more space than requested, so this
201    /// cannot be relied upon to be precisely minimal.
202    ///
203    /// Computes in **O(n)** time.
204    pub fn try_reserve_exact(&mut self, additional: usize) -> Result<(), TryReserveError> {
205        self.map.try_reserve_exact(additional)?;
206        self.heap.try_reserve_exact(additional)?;
207        self.qp.try_reserve_exact(additional)?;
208        Ok(())
209    }
210
211    /// Clears the store, returning an iterator over the removed elements in arbitrary order.
212    /// If the iterator is dropped before being fully consumed, it drops the remaining elements in arbitrary order.
213    pub fn drain(&mut self) -> Drain<'_, I, P> {
214        self.heap.clear();
215        self.qp.clear();
216        self.size = 0;
217
218        Drain {
219            iter: self.map.drain(..),
220        }
221    }
222
223    /// Returns the number of elements the internal map can hold without
224    /// reallocating.
225    ///
226    /// This number is a lower bound; the map might be able to hold more,
227    /// but is guaranteed to be able to hold at least this many.
228    #[inline(always)]
229    pub fn capacity(&self) -> usize {
230        self.map.capacity()
231    }
232
233    /// Returns the number of elements in the priority queue.
234    #[inline(always)]
235    pub fn len(&self) -> usize {
236        self.size
237    }
238
239    /// Returns true if the priority queue contains no elements.
240    #[inline(always)]
241    pub fn is_empty(&self) -> bool {
242        self.size == 0
243    }
244
245    /// Returns the items not ordered
246    pub fn into_vec(self) -> Vec<I> {
247        self.map.into_iter().map(|(i, _)| i).collect()
248    }
249
250    /// Drops all items from the priority queue
251    pub fn clear(&mut self) {
252        self.heap.clear();
253        self.qp.clear();
254        self.map.clear();
255        self.size = 0;
256    }
257
258    /// Swap two elements keeping a consistent state.
259    ///
260    /// Computes in **O(1)** time
261    #[inline(always)]
262    pub fn swap(&mut self, a: Position, b: Position) {
263        self.qp.swap(
264            unsafe { self.heap.get_unchecked(a.0) }.0,
265            unsafe { self.heap.get_unchecked(b.0) }.0,
266        );
267        self.heap.swap(a.0, b.0);
268    }
269
270    /// Remove and return the element at index `idx`
271    /// and swap it with the last element keeping a consistent
272    /// state.
273    ///
274    /// Computes in **O(1)** time (average)
275    pub fn swap_remove_index(&mut self, idx: Index) -> Option<(I, P)> {
276        // swap_remove the position from the qp
277        let position = self.qp.swap_remove(idx.0);
278        self.size -= 1;
279
280        if idx.0 < self.size {
281            // SAFETY: head validity checked on the previous line.
282            // All positions point to valid heap items because we already
283            // updated the qp.
284            unsafe {
285                *self.heap.get_unchecked_mut(self.qp.get_unchecked(idx.0).0) = idx;
286            }
287        }
288        self.heap.swap_remove(position.0);
289        // Fix indexes and swap remove the old heap head from the qp
290        if position.0 < self.size {
291            // SAFETY: position validity checked on the previous line.
292            // Indexes still point to valid qp items because we didn't
293            // remove anything from qp yet
294            unsafe {
295                *self
296                    .qp
297                    .get_unchecked_mut(self.heap.get_unchecked(position.0).0) = position;
298            }
299        }
300
301        // swap remove from the map and return to the client
302        self.map.swap_remove_index(idx.0)
303    }
304
305    /// Remove and return the element in position `position`
306    /// and swap it with the last element keeping a consistent
307    /// state.
308    ///
309    /// Computes in **O(1)** time (average)
310    pub fn swap_remove(&mut self, position: Position) -> Option<(I, P)> {
311        // swap_remove the head
312        let head: Index = self.heap.swap_remove(position.0);
313        self.size -= 1;
314
315        // Fix indexes and swap remove the old heap head from the qp
316        if position.0 < self.size {
317            // SAFETY: position validity checked on the previous line.
318            // Indexes still point to valid qp items because we didn't
319            // remove anything from qp yet
320            unsafe {
321                *self
322                    .qp
323                    .get_unchecked_mut(self.heap.get_unchecked(position.0).0) = position;
324            }
325        }
326        self.qp.swap_remove(head.0);
327        if head.0 < self.size {
328            // SAFETY: head validity checked on the previous line.
329            // All positions point to valid heap items because we already
330            // updated the qp.
331            unsafe {
332                *self.heap.get_unchecked_mut(self.qp.get_unchecked(head.0).0) = head;
333            }
334        }
335        // swap remove from the map and return to the client
336        self.map.swap_remove_index(head.0)
337    }
338
339    #[inline(always)]
340    pub unsafe fn get_priority_from_position(&self, position: Position) -> &P {
341        unsafe {
342            self.map
343                .get_index(self.heap.get_unchecked(position.0).0)
344                .unwrap()
345                .1
346        }
347    }
348}
349
350impl<I, P, H> Store<I, P, H>
351where
352    P: Ord,
353    I: Hash + Eq,
354    H: BuildHasher,
355{
356    pub fn retain<F>(&mut self, mut predicate: F)
357    where
358        F: FnMut(&I, &P) -> bool,
359    {
360        self.retain_mut(|i, p| predicate(&*i, &*p));
361    }
362
363    pub fn retain_mut<F>(&mut self, predicate: F)
364    where
365        F: FnMut(&mut I, &mut P) -> bool,
366    {
367        self.map.retain2(predicate);
368        if self.map.len() != self.size {
369            self.size = self.map.len();
370            self.heap = (0..self.size).map(Index).collect();
371            self.qp = (0..self.size).map(Position).collect();
372        }
373    }
374
375    /// If the predicate returns true for the element in position `position`,
376    /// remove it and swap it with the last element keeping a consistent
377    /// state.
378    ///
379    /// Computes in **O(1)** time (average)
380    pub fn swap_remove_if<F>(&mut self, position: Position, f: F) -> Option<(I, P)>
381    where
382        F: FnOnce(&mut I, &mut P) -> bool,
383    {
384        let head: Index = unsafe { *self.heap.get_unchecked(position.0) };
385        let (i, p) = self.map.get_index_mut2(head.0).unwrap();
386
387        if f(i, p) {
388            self.swap_remove(position)
389        } else {
390            None
391        }
392    }
393
394    /// Change the priority of an Item returning the old value of priority,
395    /// or `None` if the item wasn't in the queue.
396    ///
397    /// The argument `item` is only used for lookup, and is not used to overwrite the item's data
398    /// in the priority queue.
399    ///
400    /// The item is found in **O(1)** thanks to the hash table.
401    /// The operation is performed in **O(log(N))** time.
402    pub fn change_priority<Q>(&mut self, item: &Q, mut new_priority: P) -> Option<(P, Position)>
403    where
404        I: Borrow<Q>,
405        Q: Eq + Hash + ?Sized,
406    {
407        let Store { map, qp, .. } = self;
408        map.get_full_mut(item).map(|(index, _, p)| {
409            swap(p, &mut new_priority);
410            let pos = unsafe { *qp.get_unchecked(index) };
411            (new_priority, pos)
412        })
413    }
414
415    /// Change the priority of an Item using the provided function.
416    /// The item is found in **O(1)** thanks to the hash table.
417    /// The operation is performed in **O(log(N))** time (worst case).
418    pub fn change_priority_by<Q, F>(&mut self, item: &Q, priority_setter: F) -> Option<Position>
419    where
420        I: Borrow<Q>,
421        Q: Eq + Hash + ?Sized,
422        F: FnOnce(&mut P),
423    {
424        let Store { map, qp, .. } = self;
425        map.get_full_mut(item).map(|(index, _, p)| {
426            priority_setter(p);
427            unsafe { *qp.get_unchecked(index) }
428        })
429    }
430
431    /// Get the priority of an item, or `None`, if the item is not in the queue
432    pub fn get_priority<Q>(&self, item: &Q) -> Option<&P>
433    where
434        I: Borrow<Q>,
435        Q: Eq + Hash + ?Sized,
436    {
437        self.map.get(item)
438    }
439
440    /// Check if the store contains `item`.
441    ///
442    /// Returns `true` if `item` is in the store, `false` if it is not.
443    pub fn contains<Q>(&self, item: &Q) -> bool
444    where
445        I: Borrow<Q>,
446        Q: Eq + Hash + ?Sized,
447    {
448        self.map.contains_key(item)
449    }
450
451    /// Get the couple (item, priority) of an arbitrary element, as reference
452    /// or `None` if the item is not in the queue.
453    pub fn get<Q>(&self, item: &Q) -> Option<(&I, &P)>
454    where
455        I: Borrow<Q>,
456        Q: Eq + Hash + ?Sized,
457    {
458        self.map.get_full(item).map(|(_, k, v)| (k, v))
459    }
460
461    /// Get the couple (item, priority) of an arbitrary element, or `None`
462    /// if the item was not in the queue.
463    ///
464    /// The item is a mutable reference, but it's a logic error to modify it
465    /// in a way that change the result of  `Hash` or `Eq`.
466    ///
467    /// The priority cannot be modified with a call to this function.
468    /// To modify the priority use `push`, `change_priority` or
469    /// `change_priority_by`.
470    pub fn get_mut<Q>(&mut self, item: &Q) -> Option<(&mut I, &P)>
471    where
472        I: Borrow<Q>,
473        Q: Eq + Hash + ?Sized,
474    {
475        self.map.get_full_mut2(item).map(|(_, k, v)| (k, &*v))
476    }
477
478    pub fn remove<Q>(&mut self, item: &Q) -> Option<(I, P, Position)>
479    where
480        I: Borrow<Q>,
481        Q: Eq + Hash + ?Sized,
482    {
483        self.map.swap_remove_full(item).map(|(i, item, priority)| {
484            let i = Index(i);
485            self.size -= 1;
486
487            let pos: Position = self.qp.swap_remove(i.0);
488            self.heap.swap_remove(pos.0);
489            if i.0 < self.size {
490                unsafe {
491                    let qpi = self.qp.get_unchecked_mut(i.0);
492                    if qpi.0 == self.size {
493                        *qpi = pos;
494                    } else {
495                        *self.heap.get_unchecked_mut(qpi.0) = i;
496                    }
497                }
498            }
499            if pos.0 < self.size {
500                unsafe {
501                    let heap_pos = self.heap.get_unchecked_mut(pos.0);
502                    if heap_pos.0 == self.size {
503                        *heap_pos = i;
504                    } else {
505                        *self.qp.get_unchecked_mut(heap_pos.0) = pos;
506                    }
507                }
508            }
509            (item, priority, pos)
510        })
511    }
512
513    /// Move all items of the `other` queue to `self`
514    /// ignoring the items Eq to elements already in `self`
515    /// At the end, `other` will be empty.
516    ///
517    /// **Note** that at the end, the priority of the duplicated elements
518    /// inside self may be the one of the elements in other,
519    /// if other is longer than self
520    pub fn append(&mut self, other: &mut Self) {
521        if other.size > self.size {
522            std::mem::swap(self, other);
523        }
524        if other.size == 0 {
525            return;
526        }
527        // what should we do for duplicated keys?
528        // ignore
529        for (k, v) in other.drain() {
530            if !self.map.contains_key(&k) {
531                let i = self.size;
532                self.map.insert(k, v);
533                self.heap.push(Index(i));
534                self.qp.push(Position(i));
535                self.size += 1;
536            }
537        }
538    }
539}
540
541impl<I, P, H> IntoIterator for Store<I, P, H> {
542    type Item = (I, P);
543    type IntoIter = IntoIter<I, P>;
544    fn into_iter(self) -> IntoIter<I, P> {
545        IntoIter {
546            iter: self.map.into_iter(),
547        }
548    }
549}
550
551impl<'a, I, P, H> IntoIterator for &'a Store<I, P, H> {
552    type Item = (&'a I, &'a P);
553    type IntoIter = Iter<'a, I, P>;
554    fn into_iter(self) -> Iter<'a, I, P> {
555        Iter {
556            iter: self.map.iter(),
557        }
558    }
559}
560
561use std::cmp::PartialEq;
562
563impl<I, P1, H1, P2, H2> PartialEq<Store<I, P2, H2>> for Store<I, P1, H1>
564where
565    I: Hash + Eq,
566    P1: Ord,
567    P1: PartialEq<P2>,
568    Option<P1>: PartialEq<Option<P2>>,
569    P2: Ord,
570    H1: BuildHasher,
571    H2: BuildHasher,
572{
573    fn eq(&self, other: &Store<I, P2, H2>) -> bool {
574        self.map == other.map
575    }
576}
577
578impl<I, P, H> From<Vec<(I, P)>> for Store<I, P, H>
579where
580    I: Hash + Eq,
581    P: Ord,
582    H: BuildHasher + Default,
583{
584    fn from(vec: Vec<(I, P)>) -> Self {
585        let mut store = Self::with_capacity_and_hasher(vec.len(), <_>::default());
586        let mut i = 0;
587        for (item, priority) in vec {
588            if !store.map.contains_key(&item) {
589                store.map.insert(item, priority);
590                store.qp.push(Position(i));
591                store.heap.push(Index(i));
592                i += 1;
593            }
594        }
595        store.size = i;
596        store
597    }
598}
599
600impl<I, P, H> FromIterator<(I, P)> for Store<I, P, H>
601where
602    I: Hash + Eq,
603    P: Ord,
604    H: BuildHasher + Default,
605{
606    fn from_iter<IT>(iter: IT) -> Self
607    where
608        IT: IntoIterator<Item = (I, P)>,
609    {
610        let iter = iter.into_iter();
611        let (min, max) = iter.size_hint();
612        let mut store = if let Some(max) = max {
613            Self::with_capacity_and_hasher(max, <_>::default())
614        } else if min > 0 {
615            Self::with_capacity_and_hasher(min, <_>::default())
616        } else {
617            Self::with_hasher(<_>::default())
618        };
619        for (item, priority) in iter {
620            if store.map.contains_key(&item) {
621                let (_, old_item, old_priority) = store.map.get_full_mut2(&item).unwrap();
622                *old_item = item;
623                *old_priority = priority;
624            } else {
625                store.map.insert(item, priority);
626                store.qp.push(Position(store.size));
627                store.heap.push(Index(store.size));
628                store.size += 1;
629            }
630        }
631        store
632    }
633}
634
635impl<I, P, H> Extend<(I, P)> for Store<I, P, H>
636where
637    I: Hash + Eq,
638    P: Ord,
639    H: BuildHasher,
640{
641    fn extend<T: IntoIterator<Item = (I, P)>>(&mut self, iter: T) {
642        for (item, priority) in iter {
643            if self.map.contains_key(&item) {
644                let (_, old_item, old_priority) = self.map.get_full_mut2(&item).unwrap();
645                *old_item = item;
646                *old_priority = priority;
647            } else {
648                self.map.insert(item, priority);
649                self.qp.push(Position(self.size));
650                self.heap.push(Index(self.size));
651                self.size += 1;
652            }
653        }
654    }
655}
656
657use std::fmt;
658impl<I, P, H> fmt::Debug for Store<I, P, H>
659where
660    I: fmt::Debug,
661    P: fmt::Debug,
662{
663    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
664        f.debug_map()
665            .entries(
666                self.heap
667                    .iter()
668                    .map(|&i| (i, self.map.get_index(i.0).unwrap())),
669            )
670            .finish()
671    }
672}
673
674#[cfg(feature = "serde")]
675mod serde {
676    use crate::store::{Index, Position, Store};
677
678    use std::cmp::{Eq, Ord};
679    use std::collections::hash_map::RandomState;
680    use std::hash::{BuildHasher, Hash};
681    use std::marker::PhantomData;
682
683    use serde::ser::{Serialize, SerializeSeq, Serializer};
684
685    impl<I, P, H> Serialize for Store<I, P, H>
686    where
687        I: Serialize,
688        P: Serialize,
689    {
690        fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
691        where
692            S: Serializer,
693        {
694            let mut map_serializer = serializer.serialize_seq(Some(self.size))?;
695            for (k, v) in &self.map {
696                map_serializer.serialize_element(&(k, v))?;
697            }
698            map_serializer.end()
699        }
700    }
701
702    use serde::de::{Deserialize, Deserializer, SeqAccess, Visitor};
703    impl<'de, I, P, H> Deserialize<'de> for Store<I, P, H>
704    where
705        I: Hash + Eq + Deserialize<'de>,
706        P: Ord + Deserialize<'de>,
707        H: BuildHasher + Default,
708    {
709        fn deserialize<D>(deserializer: D) -> Result<Store<I, P, H>, D::Error>
710        where
711            D: Deserializer<'de>,
712        {
713            deserializer.deserialize_seq(StoreVisitor {
714                marker: PhantomData,
715            })
716        }
717    }
718
719    struct StoreVisitor<I, P, H = RandomState>
720    where
721        I: Hash + Eq,
722        P: Ord,
723    {
724        marker: PhantomData<Store<I, P, H>>,
725    }
726    impl<'de, I, P, H> Visitor<'de> for StoreVisitor<I, P, H>
727    where
728        I: Hash + Eq + Deserialize<'de>,
729        P: Ord + Deserialize<'de>,
730        H: BuildHasher + Default,
731    {
732        type Value = Store<I, P, H>;
733
734        fn expecting(&self, formatter: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
735            write!(formatter, "A priority queue")
736        }
737
738        fn visit_unit<E>(self) -> Result<Self::Value, E> {
739            Ok(Store::with_default_hasher())
740        }
741
742        fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
743        where
744            A: SeqAccess<'de>,
745        {
746            let mut store: Store<I, P, H> = if let Some(size) = seq.size_hint() {
747                Store::with_capacity_and_default_hasher(size)
748            } else {
749                Store::with_default_hasher()
750            };
751
752            while let Some((item, priority)) = seq.next_element()? {
753                store.map.insert(item, priority);
754                store.qp.push(Position(store.size));
755                store.heap.push(Index(store.size));
756                store.size += 1;
757            }
758            Ok(store)
759        }
760    }
761}