sharded_slab/page/
mod.rs

1use crate::cfg::{self, CfgPrivate};
2use crate::clear::Clear;
3use crate::sync::UnsafeCell;
4use crate::Pack;
5
6pub(crate) mod slot;
7mod stack;
8pub(crate) use self::slot::Slot;
9use std::{fmt, marker::PhantomData};
10
11/// A page address encodes the location of a slot within a shard (the page
12/// number and offset within that page) as a single linear value.
13#[repr(transparent)]
14pub(crate) struct Addr<C: cfg::Config = cfg::DefaultConfig> {
15    addr: usize,
16    _cfg: PhantomData<fn(C)>,
17}
18
19impl<C: cfg::Config> Addr<C> {
20    const NULL: usize = Self::BITS + 1;
21
22    pub(crate) fn index(self) -> usize {
23        // Since every page is twice as large as the previous page, and all page sizes
24        // are powers of two, we can determine the page index that contains a given
25        // address by counting leading zeros, which tells us what power of two
26        // the offset fits into.
27        //
28        // First, we must shift down to the smallest page size, so that the last
29        // offset on the first page becomes 0.
30        let shifted = (self.addr + C::INITIAL_SZ) >> C::ADDR_INDEX_SHIFT;
31        // Now, we can  determine the number of twos places by counting the
32        // number of leading  zeros (unused twos places) in the number's binary
33        // representation, and subtracting that count from the total number of bits in a word.
34        cfg::WIDTH - shifted.leading_zeros() as usize
35    }
36
37    pub(crate) fn offset(self) -> usize {
38        self.addr
39    }
40}
41
42pub(crate) trait FreeList<C> {
43    fn push<T>(&self, new_head: usize, slot: &Slot<T, C>)
44    where
45        C: cfg::Config;
46}
47
48impl<C: cfg::Config> Pack<C> for Addr<C> {
49    const LEN: usize = C::MAX_PAGES + C::ADDR_INDEX_SHIFT;
50
51    type Prev = ();
52
53    fn as_usize(&self) -> usize {
54        self.addr
55    }
56
57    fn from_usize(addr: usize) -> Self {
58        debug_assert!(addr <= Self::BITS);
59        Self {
60            addr,
61            _cfg: PhantomData,
62        }
63    }
64}
65
66pub(crate) type Iter<'a, T, C> = std::iter::FilterMap<
67    std::slice::Iter<'a, Slot<Option<T>, C>>,
68    fn(&'a Slot<Option<T>, C>) -> Option<&'a T>,
69>;
70
71pub(crate) struct Local {
72    /// Index of the first slot on the local free list
73    head: UnsafeCell<usize>,
74}
75
76pub(crate) struct Shared<T, C> {
77    /// The remote free list
78    ///
79    /// Slots freed from a remote thread are pushed onto this list.
80    remote: stack::TransferStack<C>,
81    // Total size of the page.
82    //
83    // If the head index of the local or remote free list is greater than the size of the
84    // page, then that free list is emtpy. If the head of both free lists is greater than `size`
85    // then there are no slots left in that page.
86    size: usize,
87    prev_sz: usize,
88    slab: UnsafeCell<Option<Slots<T, C>>>,
89}
90
91type Slots<T, C> = Box<[Slot<T, C>]>;
92
93impl Local {
94    pub(crate) fn new() -> Self {
95        Self {
96            head: UnsafeCell::new(0),
97        }
98    }
99
100    #[inline(always)]
101    fn head(&self) -> usize {
102        self.head.with(|head| unsafe { *head })
103    }
104
105    #[inline(always)]
106    fn set_head(&self, new_head: usize) {
107        self.head.with_mut(|head| unsafe {
108            *head = new_head;
109        })
110    }
111}
112
113impl<C: cfg::Config> FreeList<C> for Local {
114    fn push<T>(&self, new_head: usize, slot: &Slot<T, C>) {
115        slot.set_next(self.head());
116        self.set_head(new_head);
117    }
118}
119
120impl<T, C> Shared<T, C>
121where
122    C: cfg::Config,
123{
124    const NULL: usize = Addr::<C>::NULL;
125
126    pub(crate) fn new(size: usize, prev_sz: usize) -> Self {
127        Self {
128            prev_sz,
129            size,
130            remote: stack::TransferStack::new(),
131            slab: UnsafeCell::new(None),
132        }
133    }
134
135    /// Return the head of the freelist
136    ///
137    /// If there is space on the local list, it returns the head of the local list. Otherwise, it
138    /// pops all the slots from the global list and returns the head of that list
139    ///
140    /// *Note*: The local list's head is reset when setting the new state in the slot pointed to be
141    /// `head` returned from this function
142    #[inline]
143    fn pop(&self, local: &Local) -> Option<usize> {
144        let head = local.head();
145
146        test_println!("-> local head {:?}", head);
147
148        // are there any items on the local free list? (fast path)
149        let head = if head < self.size {
150            head
151        } else {
152            // slow path: if the local free list is empty, pop all the items on
153            // the remote free list.
154            let head = self.remote.pop_all();
155
156            test_println!("-> remote head {:?}", head);
157            head?
158        };
159
160        // if the head is still null, both the local and remote free lists are
161        // empty --- we can't fit any more items on this page.
162        if head == Self::NULL {
163            test_println!("-> NULL! {:?}", head);
164            None
165        } else {
166            Some(head)
167        }
168    }
169
170    /// Returns `true` if storage is currently allocated for this page, `false`
171    /// otherwise.
172    #[inline]
173    fn is_unallocated(&self) -> bool {
174        self.slab.with(|s| unsafe { (*s).is_none() })
175    }
176
177    #[inline]
178    pub(crate) fn with_slot<'a, U>(
179        &'a self,
180        addr: Addr<C>,
181        f: impl FnOnce(&'a Slot<T, C>) -> Option<U>,
182    ) -> Option<U> {
183        let poff = addr.offset() - self.prev_sz;
184
185        test_println!("-> offset {:?}", poff);
186
187        self.slab.with(|slab| {
188            let slot = unsafe { &*slab }.as_ref()?.get(poff)?;
189            f(slot)
190        })
191    }
192
193    #[inline(always)]
194    pub(crate) fn free_list(&self) -> &impl FreeList<C> {
195        &self.remote
196    }
197}
198
199impl<'a, T, C> Shared<Option<T>, C>
200where
201    C: cfg::Config + 'a,
202{
203    pub(crate) fn take<F>(
204        &self,
205        addr: Addr<C>,
206        gen: slot::Generation<C>,
207        free_list: &F,
208    ) -> Option<T>
209    where
210        F: FreeList<C>,
211    {
212        let offset = addr.offset() - self.prev_sz;
213
214        test_println!("-> take: offset {:?}", offset);
215
216        self.slab.with(|slab| {
217            let slab = unsafe { &*slab }.as_ref()?;
218            let slot = slab.get(offset)?;
219            slot.remove_value(gen, offset, free_list)
220        })
221    }
222
223    pub(crate) fn remove<F: FreeList<C>>(
224        &self,
225        addr: Addr<C>,
226        gen: slot::Generation<C>,
227        free_list: &F,
228    ) -> bool {
229        let offset = addr.offset() - self.prev_sz;
230
231        test_println!("-> offset {:?}", offset);
232
233        self.slab.with(|slab| {
234            let slab = unsafe { &*slab }.as_ref();
235            if let Some(slot) = slab.and_then(|slab| slab.get(offset)) {
236                slot.try_remove_value(gen, offset, free_list)
237            } else {
238                false
239            }
240        })
241    }
242
243    // Need this function separately, as we need to pass a function pointer to `filter_map` and
244    // `Slot::value` just returns a `&T`, specifically a `&Option<T>` for this impl.
245    fn make_ref(slot: &'a Slot<Option<T>, C>) -> Option<&'a T> {
246        slot.value().as_ref()
247    }
248
249    pub(crate) fn iter(&self) -> Option<Iter<'a, T, C>> {
250        let slab = self.slab.with(|slab| unsafe { (*slab).as_ref() });
251        slab.map(|slab| {
252            slab.iter()
253                .filter_map(Shared::make_ref as fn(&'a Slot<Option<T>, C>) -> Option<&'a T>)
254        })
255    }
256}
257
258impl<T, C> Shared<T, C>
259where
260    T: Clear + Default,
261    C: cfg::Config,
262{
263    pub(crate) fn init_with<U>(
264        &self,
265        local: &Local,
266        init: impl FnOnce(usize, &Slot<T, C>) -> Option<U>,
267    ) -> Option<U> {
268        let head = self.pop(local)?;
269
270        // do we need to allocate storage for this page?
271        if self.is_unallocated() {
272            self.allocate();
273        }
274
275        let index = head + self.prev_sz;
276
277        let result = self.slab.with(|slab| {
278            let slab = unsafe { &*(slab) }
279                .as_ref()
280                .expect("page must have been allocated to insert!");
281            let slot = &slab[head];
282            let result = init(index, slot)?;
283            local.set_head(slot.next());
284            Some(result)
285        })?;
286
287        test_println!("-> init_with: insert at offset: {}", index);
288        Some(result)
289    }
290
291    /// Allocates storage for the page's slots.
292    #[cold]
293    fn allocate(&self) {
294        test_println!("-> alloc new page ({})", self.size);
295        debug_assert!(self.is_unallocated());
296
297        let mut slab = Vec::with_capacity(self.size);
298        slab.extend((1..self.size).map(Slot::new));
299        slab.push(Slot::new(Self::NULL));
300        self.slab.with_mut(|s| {
301            // safety: this mut access is safe — it only occurs to initially allocate the page,
302            // which only happens on this thread; if the page has not yet been allocated, other
303            // threads will not try to access it yet.
304            unsafe {
305                *s = Some(slab.into_boxed_slice());
306            }
307        });
308    }
309
310    pub(crate) fn mark_clear<F: FreeList<C>>(
311        &self,
312        addr: Addr<C>,
313        gen: slot::Generation<C>,
314        free_list: &F,
315    ) -> bool {
316        let offset = addr.offset() - self.prev_sz;
317
318        test_println!("-> offset {:?}", offset);
319
320        self.slab.with(|slab| {
321            let slab = unsafe { &*slab }.as_ref();
322            if let Some(slot) = slab.and_then(|slab| slab.get(offset)) {
323                slot.try_clear_storage(gen, offset, free_list)
324            } else {
325                false
326            }
327        })
328    }
329
330    pub(crate) fn clear<F: FreeList<C>>(
331        &self,
332        addr: Addr<C>,
333        gen: slot::Generation<C>,
334        free_list: &F,
335    ) -> bool {
336        let offset = addr.offset() - self.prev_sz;
337
338        test_println!("-> offset {:?}", offset);
339
340        self.slab.with(|slab| {
341            let slab = unsafe { &*slab }.as_ref();
342            if let Some(slot) = slab.and_then(|slab| slab.get(offset)) {
343                slot.clear_storage(gen, offset, free_list)
344            } else {
345                false
346            }
347        })
348    }
349}
350
351impl fmt::Debug for Local {
352    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
353        self.head.with(|head| {
354            let head = unsafe { *head };
355            f.debug_struct("Local")
356                .field("head", &format_args!("{:#0x}", head))
357                .finish()
358        })
359    }
360}
361
362impl<C, T> fmt::Debug for Shared<C, T> {
363    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
364        f.debug_struct("Shared")
365            .field("remote", &self.remote)
366            .field("prev_sz", &self.prev_sz)
367            .field("size", &self.size)
368            // .field("slab", &self.slab)
369            .finish()
370    }
371}
372
373impl<C: cfg::Config> fmt::Debug for Addr<C> {
374    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
375        f.debug_struct("Addr")
376            .field("addr", &format_args!("{:#0x}", &self.addr))
377            .field("index", &self.index())
378            .field("offset", &self.offset())
379            .finish()
380    }
381}
382
383impl<C: cfg::Config> PartialEq for Addr<C> {
384    fn eq(&self, other: &Self) -> bool {
385        self.addr == other.addr
386    }
387}
388
389impl<C: cfg::Config> Eq for Addr<C> {}
390
391impl<C: cfg::Config> PartialOrd for Addr<C> {
392    fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
393        self.addr.partial_cmp(&other.addr)
394    }
395}
396
397impl<C: cfg::Config> Ord for Addr<C> {
398    fn cmp(&self, other: &Self) -> std::cmp::Ordering {
399        self.addr.cmp(&other.addr)
400    }
401}
402
403impl<C: cfg::Config> Clone for Addr<C> {
404    fn clone(&self) -> Self {
405        *self
406    }
407}
408
409impl<C: cfg::Config> Copy for Addr<C> {}
410
411#[inline(always)]
412pub(crate) fn indices<C: cfg::Config>(idx: usize) -> (Addr<C>, usize) {
413    let addr = C::unpack_addr(idx);
414    (addr, addr.index())
415}
416
417#[cfg(test)]
418mod test {
419    use super::*;
420    use crate::Pack;
421    use proptest::prelude::*;
422
423    proptest! {
424        #[test]
425        fn addr_roundtrips(pidx in 0usize..Addr::<cfg::DefaultConfig>::BITS) {
426            let addr = Addr::<cfg::DefaultConfig>::from_usize(pidx);
427            let packed = addr.pack(0);
428            assert_eq!(addr, Addr::from_packed(packed));
429        }
430        #[test]
431        fn gen_roundtrips(gen in 0usize..slot::Generation::<cfg::DefaultConfig>::BITS) {
432            let gen = slot::Generation::<cfg::DefaultConfig>::from_usize(gen);
433            let packed = gen.pack(0);
434            assert_eq!(gen, slot::Generation::from_packed(packed));
435        }
436
437        #[test]
438        fn page_roundtrips(
439            gen in 0usize..slot::Generation::<cfg::DefaultConfig>::BITS,
440            addr in 0usize..Addr::<cfg::DefaultConfig>::BITS,
441        ) {
442            let gen = slot::Generation::<cfg::DefaultConfig>::from_usize(gen);
443            let addr = Addr::<cfg::DefaultConfig>::from_usize(addr);
444            let packed = gen.pack(addr.pack(0));
445            assert_eq!(addr, Addr::from_packed(packed));
446            assert_eq!(gen, slot::Generation::from_packed(packed));
447        }
448    }
449}