lock_api/
remutex.rs

1// Copyright 2018 Amanieu d'Antras
2//
3// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
4// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
5// http://opensource.org/licenses/MIT>, at your option. This file may not be
6// copied, modified, or distributed except according to those terms.
7
8use crate::{
9    mutex::{RawMutex, RawMutexFair, RawMutexTimed},
10    GuardNoSend,
11};
12use core::{
13    cell::{Cell, UnsafeCell},
14    fmt,
15    marker::PhantomData,
16    mem,
17    num::NonZeroUsize,
18    ops::Deref,
19    sync::atomic::{AtomicUsize, Ordering},
20};
21
22#[cfg(feature = "arc_lock")]
23use alloc::sync::Arc;
24#[cfg(feature = "arc_lock")]
25use core::mem::ManuallyDrop;
26#[cfg(feature = "arc_lock")]
27use core::ptr;
28
29#[cfg(feature = "owning_ref")]
30use owning_ref::StableAddress;
31
32#[cfg(feature = "serde")]
33use serde::{Deserialize, Deserializer, Serialize, Serializer};
34
35/// Helper trait which returns a non-zero thread ID.
36///
37/// The simplest way to implement this trait is to return the address of a
38/// thread-local variable.
39///
40/// # Safety
41///
42/// Implementations of this trait must ensure that no two active threads share
43/// the same thread ID. However the ID of a thread that has exited can be
44/// re-used since that thread is no longer active.
45pub unsafe trait GetThreadId {
46    /// Initial value.
47    // A “non-constant” const item is a legacy way to supply an initialized value to downstream
48    // static items. Can hopefully be replaced with `const fn new() -> Self` at some point.
49    #[allow(clippy::declare_interior_mutable_const)]
50    const INIT: Self;
51
52    /// Returns a non-zero thread ID which identifies the current thread of
53    /// execution.
54    fn nonzero_thread_id(&self) -> NonZeroUsize;
55}
56
57/// A raw mutex type that wraps another raw mutex to provide reentrancy.
58///
59/// Although this has the same methods as the [`RawMutex`] trait, it does
60/// not implement it, and should not be used in the same way, since this
61/// mutex can successfully acquire a lock multiple times in the same thread.
62/// Only use this when you know you want a raw mutex that can be locked
63/// reentrantly; you probably want [`ReentrantMutex`] instead.
64pub struct RawReentrantMutex<R, G> {
65    owner: AtomicUsize,
66    lock_count: Cell<usize>,
67    mutex: R,
68    get_thread_id: G,
69}
70
71unsafe impl<R: RawMutex + Send, G: GetThreadId + Send> Send for RawReentrantMutex<R, G> {}
72unsafe impl<R: RawMutex + Sync, G: GetThreadId + Sync> Sync for RawReentrantMutex<R, G> {}
73
74impl<R: RawMutex, G: GetThreadId> RawReentrantMutex<R, G> {
75    /// Initial value for an unlocked mutex.
76    #[allow(clippy::declare_interior_mutable_const)]
77    pub const INIT: Self = RawReentrantMutex {
78        owner: AtomicUsize::new(0),
79        lock_count: Cell::new(0),
80        mutex: R::INIT,
81        get_thread_id: G::INIT,
82    };
83
84    #[inline]
85    fn lock_internal<F: FnOnce() -> bool>(&self, try_lock: F) -> bool {
86        let id = self.get_thread_id.nonzero_thread_id().get();
87        if self.owner.load(Ordering::Relaxed) == id {
88            self.lock_count.set(
89                self.lock_count
90                    .get()
91                    .checked_add(1)
92                    .expect("ReentrantMutex lock count overflow"),
93            );
94        } else {
95            if !try_lock() {
96                return false;
97            }
98            self.owner.store(id, Ordering::Relaxed);
99            debug_assert_eq!(self.lock_count.get(), 0);
100            self.lock_count.set(1);
101        }
102        true
103    }
104
105    /// Acquires this mutex, blocking if it's held by another thread.
106    #[inline]
107    pub fn lock(&self) {
108        self.lock_internal(|| {
109            self.mutex.lock();
110            true
111        });
112    }
113
114    /// Attempts to acquire this mutex without blocking. Returns `true`
115    /// if the lock was successfully acquired and `false` otherwise.
116    #[inline]
117    pub fn try_lock(&self) -> bool {
118        self.lock_internal(|| self.mutex.try_lock())
119    }
120
121    /// Unlocks this mutex. The inner mutex may not be unlocked if
122    /// this mutex was acquired previously in the current thread.
123    ///
124    /// # Safety
125    ///
126    /// This method may only be called if the mutex is held by the current thread.
127    #[inline]
128    pub unsafe fn unlock(&self) {
129        let lock_count = self.lock_count.get() - 1;
130        self.lock_count.set(lock_count);
131        if lock_count == 0 {
132            self.owner.store(0, Ordering::Relaxed);
133            self.mutex.unlock();
134        }
135    }
136
137    /// Checks whether the mutex is currently locked.
138    #[inline]
139    pub fn is_locked(&self) -> bool {
140        self.mutex.is_locked()
141    }
142
143    /// Checks whether the mutex is currently held by the current thread.
144    #[inline]
145    pub fn is_owned_by_current_thread(&self) -> bool {
146        let id = self.get_thread_id.nonzero_thread_id().get();
147        self.owner.load(Ordering::Relaxed) == id
148    }
149}
150
151impl<R: RawMutexFair, G: GetThreadId> RawReentrantMutex<R, G> {
152    /// Unlocks this mutex using a fair unlock protocol. The inner mutex
153    /// may not be unlocked if this mutex was acquired previously in the
154    /// current thread.
155    ///
156    /// # Safety
157    ///
158    /// This method may only be called if the mutex is held by the current thread.
159    #[inline]
160    pub unsafe fn unlock_fair(&self) {
161        let lock_count = self.lock_count.get() - 1;
162        self.lock_count.set(lock_count);
163        if lock_count == 0 {
164            self.owner.store(0, Ordering::Relaxed);
165            self.mutex.unlock_fair();
166        }
167    }
168
169    /// Temporarily yields the mutex to a waiting thread if there is one.
170    ///
171    /// This method is functionally equivalent to calling `unlock_fair` followed
172    /// by `lock`, however it can be much more efficient in the case where there
173    /// are no waiting threads.
174    ///
175    /// # Safety
176    ///
177    /// This method may only be called if the mutex is held by the current thread.
178    #[inline]
179    pub unsafe fn bump(&self) {
180        if self.lock_count.get() == 1 {
181            let id = self.owner.load(Ordering::Relaxed);
182            self.owner.store(0, Ordering::Relaxed);
183            self.lock_count.set(0);
184            self.mutex.bump();
185            self.owner.store(id, Ordering::Relaxed);
186            self.lock_count.set(1);
187        }
188    }
189}
190
191impl<R: RawMutexTimed, G: GetThreadId> RawReentrantMutex<R, G> {
192    /// Attempts to acquire this lock until a timeout is reached.
193    #[inline]
194    pub fn try_lock_until(&self, timeout: R::Instant) -> bool {
195        self.lock_internal(|| self.mutex.try_lock_until(timeout))
196    }
197
198    /// Attempts to acquire this lock until a timeout is reached.
199    #[inline]
200    pub fn try_lock_for(&self, timeout: R::Duration) -> bool {
201        self.lock_internal(|| self.mutex.try_lock_for(timeout))
202    }
203}
204
205/// A mutex which can be recursively locked by a single thread.
206///
207/// This type is identical to `Mutex` except for the following points:
208///
209/// - Locking multiple times from the same thread will work correctly instead of
210///   deadlocking.
211/// - `ReentrantMutexGuard` does not give mutable references to the locked data.
212///   Use a `RefCell` if you need this.
213///
214/// See [`Mutex`](crate::Mutex) for more details about the underlying mutex
215/// primitive.
216pub struct ReentrantMutex<R, G, T: ?Sized> {
217    raw: RawReentrantMutex<R, G>,
218    data: UnsafeCell<T>,
219}
220
221unsafe impl<R: RawMutex + Send, G: GetThreadId + Send, T: ?Sized + Send> Send
222    for ReentrantMutex<R, G, T>
223{
224}
225unsafe impl<R: RawMutex + Sync, G: GetThreadId + Sync, T: ?Sized + Send> Sync
226    for ReentrantMutex<R, G, T>
227{
228}
229
230impl<R: RawMutex, G: GetThreadId, T> ReentrantMutex<R, G, T> {
231    /// Creates a new reentrant mutex in an unlocked state ready for use.
232    #[cfg(has_const_fn_trait_bound)]
233    #[inline]
234    pub const fn new(val: T) -> ReentrantMutex<R, G, T> {
235        ReentrantMutex {
236            data: UnsafeCell::new(val),
237            raw: RawReentrantMutex {
238                owner: AtomicUsize::new(0),
239                lock_count: Cell::new(0),
240                mutex: R::INIT,
241                get_thread_id: G::INIT,
242            },
243        }
244    }
245
246    /// Creates a new reentrant mutex in an unlocked state ready for use.
247    #[cfg(not(has_const_fn_trait_bound))]
248    #[inline]
249    pub fn new(val: T) -> ReentrantMutex<R, G, T> {
250        ReentrantMutex {
251            data: UnsafeCell::new(val),
252            raw: RawReentrantMutex {
253                owner: AtomicUsize::new(0),
254                lock_count: Cell::new(0),
255                mutex: R::INIT,
256                get_thread_id: G::INIT,
257            },
258        }
259    }
260
261    /// Consumes this mutex, returning the underlying data.
262    #[inline]
263    pub fn into_inner(self) -> T {
264        self.data.into_inner()
265    }
266}
267
268impl<R, G, T> ReentrantMutex<R, G, T> {
269    /// Creates a new reentrant mutex based on a pre-existing raw mutex and a
270    /// helper to get the thread ID.
271    #[inline]
272    pub const fn from_raw(raw_mutex: R, get_thread_id: G, val: T) -> ReentrantMutex<R, G, T> {
273        ReentrantMutex {
274            data: UnsafeCell::new(val),
275            raw: RawReentrantMutex {
276                owner: AtomicUsize::new(0),
277                lock_count: Cell::new(0),
278                mutex: raw_mutex,
279                get_thread_id,
280            },
281        }
282    }
283
284    /// Creates a new reentrant mutex based on a pre-existing raw mutex and a
285    /// helper to get the thread ID.
286    ///
287    /// This allows creating a reentrant mutex in a constant context on stable
288    /// Rust.
289    ///
290    /// This method is a legacy alias for [`from_raw`](Self::from_raw).
291    #[inline]
292    pub const fn const_new(raw_mutex: R, get_thread_id: G, val: T) -> ReentrantMutex<R, G, T> {
293        Self::from_raw(raw_mutex, get_thread_id, val)
294    }
295}
296
297impl<R: RawMutex, G: GetThreadId, T: ?Sized> ReentrantMutex<R, G, T> {
298    /// Creates a new `ReentrantMutexGuard` without checking if the lock is held.
299    ///
300    /// # Safety
301    ///
302    /// This method must only be called if the thread logically holds the lock.
303    ///
304    /// Calling this function when a guard has already been produced is undefined behaviour unless
305    /// the guard was forgotten with `mem::forget`.
306    #[inline]
307    pub unsafe fn make_guard_unchecked(&self) -> ReentrantMutexGuard<'_, R, G, T> {
308        ReentrantMutexGuard {
309            remutex: &self,
310            marker: PhantomData,
311        }
312    }
313
314    /// Acquires a reentrant mutex, blocking the current thread until it is able
315    /// to do so.
316    ///
317    /// If the mutex is held by another thread then this function will block the
318    /// local thread until it is available to acquire the mutex. If the mutex is
319    /// already held by the current thread then this function will increment the
320    /// lock reference count and return immediately. Upon returning,
321    /// the thread is the only thread with the mutex held. An RAII guard is
322    /// returned to allow scoped unlock of the lock. When the guard goes out of
323    /// scope, the mutex will be unlocked.
324    #[inline]
325    pub fn lock(&self) -> ReentrantMutexGuard<'_, R, G, T> {
326        self.raw.lock();
327        // SAFETY: The lock is held, as required.
328        unsafe { self.make_guard_unchecked() }
329    }
330
331    /// Attempts to acquire this lock.
332    ///
333    /// If the lock could not be acquired at this time, then `None` is returned.
334    /// Otherwise, an RAII guard is returned. The lock will be unlocked when the
335    /// guard is dropped.
336    ///
337    /// This function does not block.
338    #[inline]
339    pub fn try_lock(&self) -> Option<ReentrantMutexGuard<'_, R, G, T>> {
340        if self.raw.try_lock() {
341            // SAFETY: The lock is held, as required.
342            Some(unsafe { self.make_guard_unchecked() })
343        } else {
344            None
345        }
346    }
347
348    /// Returns a mutable reference to the underlying data.
349    ///
350    /// Since this call borrows the `ReentrantMutex` mutably, no actual locking needs to
351    /// take place---the mutable borrow statically guarantees no locks exist.
352    #[inline]
353    pub fn get_mut(&mut self) -> &mut T {
354        unsafe { &mut *self.data.get() }
355    }
356
357    /// Checks whether the mutex is currently locked.
358    #[inline]
359    pub fn is_locked(&self) -> bool {
360        self.raw.is_locked()
361    }
362
363    /// Checks whether the mutex is currently held by the current thread.
364    #[inline]
365    pub fn is_owned_by_current_thread(&self) -> bool {
366        self.raw.is_owned_by_current_thread()
367    }
368
369    /// Forcibly unlocks the mutex.
370    ///
371    /// This is useful when combined with `mem::forget` to hold a lock without
372    /// the need to maintain a `ReentrantMutexGuard` object alive, for example when
373    /// dealing with FFI.
374    ///
375    /// # Safety
376    ///
377    /// This method must only be called if the current thread logically owns a
378    /// `ReentrantMutexGuard` but that guard has be discarded using `mem::forget`.
379    /// Behavior is undefined if a mutex is unlocked when not locked.
380    #[inline]
381    pub unsafe fn force_unlock(&self) {
382        self.raw.unlock();
383    }
384
385    /// Returns the underlying raw mutex object.
386    ///
387    /// Note that you will most likely need to import the `RawMutex` trait from
388    /// `lock_api` to be able to call functions on the raw mutex.
389    ///
390    /// # Safety
391    ///
392    /// This method is unsafe because it allows unlocking a mutex while
393    /// still holding a reference to a `ReentrantMutexGuard`.
394    #[inline]
395    pub unsafe fn raw(&self) -> &R {
396        &self.raw.mutex
397    }
398
399    /// Returns a raw pointer to the underlying data.
400    ///
401    /// This is useful when combined with `mem::forget` to hold a lock without
402    /// the need to maintain a `ReentrantMutexGuard` object alive, for example
403    /// when dealing with FFI.
404    ///
405    /// # Safety
406    ///
407    /// You must ensure that there are no data races when dereferencing the
408    /// returned pointer, for example if the current thread logically owns a
409    /// `ReentrantMutexGuard` but that guard has been discarded using
410    /// `mem::forget`.
411    #[inline]
412    pub fn data_ptr(&self) -> *mut T {
413        self.data.get()
414    }
415
416    /// Creates a new `ArcReentrantMutexGuard` without checking if the lock is held.
417    ///
418    /// # Safety
419    ///
420    /// This method must only be called if the thread logically holds the lock.
421    ///
422    /// Calling this function when a guard has already been produced is undefined behaviour unless
423    /// the guard was forgotten with `mem::forget`.
424    #[cfg(feature = "arc_lock")]
425    #[inline]
426    pub unsafe fn make_arc_guard_unchecked(self: &Arc<Self>) -> ArcReentrantMutexGuard<R, G, T> {
427        ArcReentrantMutexGuard {
428            remutex: self.clone(),
429            marker: PhantomData,
430        }
431    }
432
433    /// Acquires a reentrant mutex through an `Arc`.
434    ///
435    /// This method is similar to the `lock` method; however, it requires the `ReentrantMutex` to be inside of an
436    /// `Arc` and the resulting mutex guard has no lifetime requirements.
437    #[cfg(feature = "arc_lock")]
438    #[inline]
439    pub fn lock_arc(self: &Arc<Self>) -> ArcReentrantMutexGuard<R, G, T> {
440        self.raw.lock();
441        // SAFETY: locking guarantee is upheld
442        unsafe { self.make_arc_guard_unchecked() }
443    }
444
445    /// Attempts to acquire a reentrant mutex through an `Arc`.
446    ///
447    /// This method is similar to the `try_lock` method; however, it requires the `ReentrantMutex` to be inside
448    /// of an `Arc` and the resulting mutex guard has no lifetime requirements.
449    #[cfg(feature = "arc_lock")]
450    #[inline]
451    pub fn try_lock_arc(self: &Arc<Self>) -> Option<ArcReentrantMutexGuard<R, G, T>> {
452        if self.raw.try_lock() {
453            // SAFETY: locking guarantee is upheld
454            Some(unsafe { self.make_arc_guard_unchecked() })
455        } else {
456            None
457        }
458    }
459}
460
461impl<R: RawMutexFair, G: GetThreadId, T: ?Sized> ReentrantMutex<R, G, T> {
462    /// Forcibly unlocks the mutex using a fair unlock protocol.
463    ///
464    /// This is useful when combined with `mem::forget` to hold a lock without
465    /// the need to maintain a `ReentrantMutexGuard` object alive, for example when
466    /// dealing with FFI.
467    ///
468    /// # Safety
469    ///
470    /// This method must only be called if the current thread logically owns a
471    /// `ReentrantMutexGuard` but that guard has be discarded using `mem::forget`.
472    /// Behavior is undefined if a mutex is unlocked when not locked.
473    #[inline]
474    pub unsafe fn force_unlock_fair(&self) {
475        self.raw.unlock_fair();
476    }
477}
478
479impl<R: RawMutexTimed, G: GetThreadId, T: ?Sized> ReentrantMutex<R, G, T> {
480    /// Attempts to acquire this lock until a timeout is reached.
481    ///
482    /// If the lock could not be acquired before the timeout expired, then
483    /// `None` is returned. Otherwise, an RAII guard is returned. The lock will
484    /// be unlocked when the guard is dropped.
485    #[inline]
486    pub fn try_lock_for(&self, timeout: R::Duration) -> Option<ReentrantMutexGuard<'_, R, G, T>> {
487        if self.raw.try_lock_for(timeout) {
488            // SAFETY: The lock is held, as required.
489            Some(unsafe { self.make_guard_unchecked() })
490        } else {
491            None
492        }
493    }
494
495    /// Attempts to acquire this lock until a timeout is reached.
496    ///
497    /// If the lock could not be acquired before the timeout expired, then
498    /// `None` is returned. Otherwise, an RAII guard is returned. The lock will
499    /// be unlocked when the guard is dropped.
500    #[inline]
501    pub fn try_lock_until(&self, timeout: R::Instant) -> Option<ReentrantMutexGuard<'_, R, G, T>> {
502        if self.raw.try_lock_until(timeout) {
503            // SAFETY: The lock is held, as required.
504            Some(unsafe { self.make_guard_unchecked() })
505        } else {
506            None
507        }
508    }
509
510    /// Attempts to acquire this lock until a timeout is reached, through an `Arc`.
511    ///
512    /// This method is similar to the `try_lock_for` method; however, it requires the `ReentrantMutex` to be
513    /// inside of an `Arc` and the resulting mutex guard has no lifetime requirements.
514    #[cfg(feature = "arc_lock")]
515    #[inline]
516    pub fn try_lock_arc_for(
517        self: &Arc<Self>,
518        timeout: R::Duration,
519    ) -> Option<ArcReentrantMutexGuard<R, G, T>> {
520        if self.raw.try_lock_for(timeout) {
521            // SAFETY: locking guarantee is upheld
522            Some(unsafe { self.make_arc_guard_unchecked() })
523        } else {
524            None
525        }
526    }
527
528    /// Attempts to acquire this lock until a timeout is reached, through an `Arc`.
529    ///
530    /// This method is similar to the `try_lock_until` method; however, it requires the `ReentrantMutex` to be
531    /// inside of an `Arc` and the resulting mutex guard has no lifetime requirements.
532    #[cfg(feature = "arc_lock")]
533    #[inline]
534    pub fn try_lock_arc_until(
535        self: &Arc<Self>,
536        timeout: R::Instant,
537    ) -> Option<ArcReentrantMutexGuard<R, G, T>> {
538        if self.raw.try_lock_until(timeout) {
539            // SAFETY: locking guarantee is upheld
540            Some(unsafe { self.make_arc_guard_unchecked() })
541        } else {
542            None
543        }
544    }
545}
546
547impl<R: RawMutex, G: GetThreadId, T: ?Sized + Default> Default for ReentrantMutex<R, G, T> {
548    #[inline]
549    fn default() -> ReentrantMutex<R, G, T> {
550        ReentrantMutex::new(Default::default())
551    }
552}
553
554impl<R: RawMutex, G: GetThreadId, T> From<T> for ReentrantMutex<R, G, T> {
555    #[inline]
556    fn from(t: T) -> ReentrantMutex<R, G, T> {
557        ReentrantMutex::new(t)
558    }
559}
560
561impl<R: RawMutex, G: GetThreadId, T: ?Sized + fmt::Debug> fmt::Debug for ReentrantMutex<R, G, T> {
562    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
563        match self.try_lock() {
564            Some(guard) => f
565                .debug_struct("ReentrantMutex")
566                .field("data", &&*guard)
567                .finish(),
568            None => {
569                struct LockedPlaceholder;
570                impl fmt::Debug for LockedPlaceholder {
571                    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
572                        f.write_str("<locked>")
573                    }
574                }
575
576                f.debug_struct("ReentrantMutex")
577                    .field("data", &LockedPlaceholder)
578                    .finish()
579            }
580        }
581    }
582}
583
584// Copied and modified from serde
585#[cfg(feature = "serde")]
586impl<R, G, T> Serialize for ReentrantMutex<R, G, T>
587where
588    R: RawMutex,
589    G: GetThreadId,
590    T: Serialize + ?Sized,
591{
592    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
593    where
594        S: Serializer,
595    {
596        self.lock().serialize(serializer)
597    }
598}
599
600#[cfg(feature = "serde")]
601impl<'de, R, G, T> Deserialize<'de> for ReentrantMutex<R, G, T>
602where
603    R: RawMutex,
604    G: GetThreadId,
605    T: Deserialize<'de> + ?Sized,
606{
607    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
608    where
609        D: Deserializer<'de>,
610    {
611        Deserialize::deserialize(deserializer).map(ReentrantMutex::new)
612    }
613}
614
615/// An RAII implementation of a "scoped lock" of a reentrant mutex. When this structure
616/// is dropped (falls out of scope), the lock will be unlocked.
617///
618/// The data protected by the mutex can be accessed through this guard via its
619/// `Deref` implementation.
620#[clippy::has_significant_drop]
621#[must_use = "if unused the ReentrantMutex will immediately unlock"]
622pub struct ReentrantMutexGuard<'a, R: RawMutex, G: GetThreadId, T: ?Sized> {
623    remutex: &'a ReentrantMutex<R, G, T>,
624    marker: PhantomData<(&'a T, GuardNoSend)>,
625}
626
627unsafe impl<'a, R: RawMutex + Sync + 'a, G: GetThreadId + Sync + 'a, T: ?Sized + Sync + 'a> Sync
628    for ReentrantMutexGuard<'a, R, G, T>
629{
630}
631
632impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> ReentrantMutexGuard<'a, R, G, T> {
633    /// Returns a reference to the original `ReentrantMutex` object.
634    pub fn remutex(s: &Self) -> &'a ReentrantMutex<R, G, T> {
635        s.remutex
636    }
637
638    /// Makes a new `MappedReentrantMutexGuard` for a component of the locked data.
639    ///
640    /// This operation cannot fail as the `ReentrantMutexGuard` passed
641    /// in already locked the mutex.
642    ///
643    /// This is an associated function that needs to be
644    /// used as `ReentrantMutexGuard::map(...)`. A method would interfere with methods of
645    /// the same name on the contents of the locked data.
646    #[inline]
647    pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedReentrantMutexGuard<'a, R, G, U>
648    where
649        F: FnOnce(&T) -> &U,
650    {
651        let raw = &s.remutex.raw;
652        let data = f(unsafe { &*s.remutex.data.get() });
653        mem::forget(s);
654        MappedReentrantMutexGuard {
655            raw,
656            data,
657            marker: PhantomData,
658        }
659    }
660
661    /// Attempts to make  a new `MappedReentrantMutexGuard` for a component of the
662    /// locked data. The original guard is return if the closure returns `None`.
663    ///
664    /// This operation cannot fail as the `ReentrantMutexGuard` passed
665    /// in already locked the mutex.
666    ///
667    /// This is an associated function that needs to be
668    /// used as `ReentrantMutexGuard::try_map(...)`. A method would interfere with methods of
669    /// the same name on the contents of the locked data.
670    #[inline]
671    pub fn try_map<U: ?Sized, F>(
672        s: Self,
673        f: F,
674    ) -> Result<MappedReentrantMutexGuard<'a, R, G, U>, Self>
675    where
676        F: FnOnce(&T) -> Option<&U>,
677    {
678        let raw = &s.remutex.raw;
679        let data = match f(unsafe { &*s.remutex.data.get() }) {
680            Some(data) => data,
681            None => return Err(s),
682        };
683        mem::forget(s);
684        Ok(MappedReentrantMutexGuard {
685            raw,
686            data,
687            marker: PhantomData,
688        })
689    }
690
691    /// Temporarily unlocks the mutex to execute the given function.
692    ///
693    /// This is safe because `&mut` guarantees that there exist no other
694    /// references to the data protected by the mutex.
695    #[inline]
696    pub fn unlocked<F, U>(s: &mut Self, f: F) -> U
697    where
698        F: FnOnce() -> U,
699    {
700        // Safety: A ReentrantMutexGuard always holds the lock.
701        unsafe {
702            s.remutex.raw.unlock();
703        }
704        defer!(s.remutex.raw.lock());
705        f()
706    }
707}
708
709impl<'a, R: RawMutexFair + 'a, G: GetThreadId + 'a, T: ?Sized + 'a>
710    ReentrantMutexGuard<'a, R, G, T>
711{
712    /// Unlocks the mutex using a fair unlock protocol.
713    ///
714    /// By default, mutexes are unfair and allow the current thread to re-lock
715    /// the mutex before another has the chance to acquire the lock, even if
716    /// that thread has been blocked on the mutex for a long time. This is the
717    /// default because it allows much higher throughput as it avoids forcing a
718    /// context switch on every mutex unlock. This can result in one thread
719    /// acquiring a mutex many more times than other threads.
720    ///
721    /// However in some cases it can be beneficial to ensure fairness by forcing
722    /// the lock to pass on to a waiting thread if there is one. This is done by
723    /// using this method instead of dropping the `ReentrantMutexGuard` normally.
724    #[inline]
725    pub fn unlock_fair(s: Self) {
726        // Safety: A ReentrantMutexGuard always holds the lock
727        unsafe {
728            s.remutex.raw.unlock_fair();
729        }
730        mem::forget(s);
731    }
732
733    /// Temporarily unlocks the mutex to execute the given function.
734    ///
735    /// The mutex is unlocked a fair unlock protocol.
736    ///
737    /// This is safe because `&mut` guarantees that there exist no other
738    /// references to the data protected by the mutex.
739    #[inline]
740    pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U
741    where
742        F: FnOnce() -> U,
743    {
744        // Safety: A ReentrantMutexGuard always holds the lock
745        unsafe {
746            s.remutex.raw.unlock_fair();
747        }
748        defer!(s.remutex.raw.lock());
749        f()
750    }
751
752    /// Temporarily yields the mutex to a waiting thread if there is one.
753    ///
754    /// This method is functionally equivalent to calling `unlock_fair` followed
755    /// by `lock`, however it can be much more efficient in the case where there
756    /// are no waiting threads.
757    #[inline]
758    pub fn bump(s: &mut Self) {
759        // Safety: A ReentrantMutexGuard always holds the lock
760        unsafe {
761            s.remutex.raw.bump();
762        }
763    }
764}
765
766impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> Deref
767    for ReentrantMutexGuard<'a, R, G, T>
768{
769    type Target = T;
770    #[inline]
771    fn deref(&self) -> &T {
772        unsafe { &*self.remutex.data.get() }
773    }
774}
775
776impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> Drop
777    for ReentrantMutexGuard<'a, R, G, T>
778{
779    #[inline]
780    fn drop(&mut self) {
781        // Safety: A ReentrantMutexGuard always holds the lock.
782        unsafe {
783            self.remutex.raw.unlock();
784        }
785    }
786}
787
788impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug
789    for ReentrantMutexGuard<'a, R, G, T>
790{
791    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
792        fmt::Debug::fmt(&**self, f)
793    }
794}
795
796impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display
797    for ReentrantMutexGuard<'a, R, G, T>
798{
799    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
800        (**self).fmt(f)
801    }
802}
803
804#[cfg(feature = "owning_ref")]
805unsafe impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> StableAddress
806    for ReentrantMutexGuard<'a, R, G, T>
807{
808}
809
810/// An RAII mutex guard returned by the `Arc` locking operations on `ReentrantMutex`.
811///
812/// This is similar to the `ReentrantMutexGuard` struct, except instead of using a reference to unlock the
813/// `Mutex` it uses an `Arc<ReentrantMutex>`. This has several advantages, most notably that it has an `'static`
814/// lifetime.
815#[cfg(feature = "arc_lock")]
816#[clippy::has_significant_drop]
817#[must_use = "if unused the ReentrantMutex will immediately unlock"]
818pub struct ArcReentrantMutexGuard<R: RawMutex, G: GetThreadId, T: ?Sized> {
819    remutex: Arc<ReentrantMutex<R, G, T>>,
820    marker: PhantomData<GuardNoSend>,
821}
822
823#[cfg(feature = "arc_lock")]
824impl<R: RawMutex, G: GetThreadId, T: ?Sized> ArcReentrantMutexGuard<R, G, T> {
825    /// Returns a reference to the `ReentrantMutex` this object is guarding, contained in its `Arc`.
826    pub fn remutex(s: &Self) -> &Arc<ReentrantMutex<R, G, T>> {
827        &s.remutex
828    }
829
830    /// Temporarily unlocks the mutex to execute the given function.
831    ///
832    /// This is safe because `&mut` guarantees that there exist no other
833    /// references to the data protected by the mutex.
834    #[inline]
835    pub fn unlocked<F, U>(s: &mut Self, f: F) -> U
836    where
837        F: FnOnce() -> U,
838    {
839        // Safety: A ReentrantMutexGuard always holds the lock.
840        unsafe {
841            s.remutex.raw.unlock();
842        }
843        defer!(s.remutex.raw.lock());
844        f()
845    }
846}
847
848#[cfg(feature = "arc_lock")]
849impl<R: RawMutexFair, G: GetThreadId, T: ?Sized> ArcReentrantMutexGuard<R, G, T> {
850    /// Unlocks the mutex using a fair unlock protocol.
851    ///
852    /// This is functionally identical to the `unlock_fair` method on [`ReentrantMutexGuard`].
853    #[inline]
854    pub fn unlock_fair(s: Self) {
855        // Safety: A ReentrantMutexGuard always holds the lock
856        unsafe {
857            s.remutex.raw.unlock_fair();
858        }
859
860        // SAFETY: ensure that the Arc's refcount is decremented
861        let mut s = ManuallyDrop::new(s);
862        unsafe { ptr::drop_in_place(&mut s.remutex) };
863    }
864
865    /// Temporarily unlocks the mutex to execute the given function.
866    ///
867    /// This is functionally identical to the `unlocked_fair` method on [`ReentrantMutexGuard`].
868    #[inline]
869    pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U
870    where
871        F: FnOnce() -> U,
872    {
873        // Safety: A ReentrantMutexGuard always holds the lock
874        unsafe {
875            s.remutex.raw.unlock_fair();
876        }
877        defer!(s.remutex.raw.lock());
878        f()
879    }
880
881    /// Temporarily yields the mutex to a waiting thread if there is one.
882    ///
883    /// This is functionally equivalent to the `bump` method on [`ReentrantMutexGuard`].
884    #[inline]
885    pub fn bump(s: &mut Self) {
886        // Safety: A ReentrantMutexGuard always holds the lock
887        unsafe {
888            s.remutex.raw.bump();
889        }
890    }
891}
892
893#[cfg(feature = "arc_lock")]
894impl<R: RawMutex, G: GetThreadId, T: ?Sized> Deref for ArcReentrantMutexGuard<R, G, T> {
895    type Target = T;
896    #[inline]
897    fn deref(&self) -> &T {
898        unsafe { &*self.remutex.data.get() }
899    }
900}
901
902#[cfg(feature = "arc_lock")]
903impl<R: RawMutex, G: GetThreadId, T: ?Sized> Drop for ArcReentrantMutexGuard<R, G, T> {
904    #[inline]
905    fn drop(&mut self) {
906        // Safety: A ReentrantMutexGuard always holds the lock.
907        unsafe {
908            self.remutex.raw.unlock();
909        }
910    }
911}
912
913/// An RAII mutex guard returned by `ReentrantMutexGuard::map`, which can point to a
914/// subfield of the protected data.
915///
916/// The main difference between `MappedReentrantMutexGuard` and `ReentrantMutexGuard` is that the
917/// former doesn't support temporarily unlocking and re-locking, since that
918/// could introduce soundness issues if the locked object is modified by another
919/// thread.
920#[clippy::has_significant_drop]
921#[must_use = "if unused the ReentrantMutex will immediately unlock"]
922pub struct MappedReentrantMutexGuard<'a, R: RawMutex, G: GetThreadId, T: ?Sized> {
923    raw: &'a RawReentrantMutex<R, G>,
924    data: *const T,
925    marker: PhantomData<&'a T>,
926}
927
928unsafe impl<'a, R: RawMutex + Sync + 'a, G: GetThreadId + Sync + 'a, T: ?Sized + Sync + 'a> Sync
929    for MappedReentrantMutexGuard<'a, R, G, T>
930{
931}
932
933impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a>
934    MappedReentrantMutexGuard<'a, R, G, T>
935{
936    /// Makes a new `MappedReentrantMutexGuard` for a component of the locked data.
937    ///
938    /// This operation cannot fail as the `MappedReentrantMutexGuard` passed
939    /// in already locked the mutex.
940    ///
941    /// This is an associated function that needs to be
942    /// used as `MappedReentrantMutexGuard::map(...)`. A method would interfere with methods of
943    /// the same name on the contents of the locked data.
944    #[inline]
945    pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedReentrantMutexGuard<'a, R, G, U>
946    where
947        F: FnOnce(&T) -> &U,
948    {
949        let raw = s.raw;
950        let data = f(unsafe { &*s.data });
951        mem::forget(s);
952        MappedReentrantMutexGuard {
953            raw,
954            data,
955            marker: PhantomData,
956        }
957    }
958
959    /// Attempts to make  a new `MappedReentrantMutexGuard` for a component of the
960    /// locked data. The original guard is return if the closure returns `None`.
961    ///
962    /// This operation cannot fail as the `MappedReentrantMutexGuard` passed
963    /// in already locked the mutex.
964    ///
965    /// This is an associated function that needs to be
966    /// used as `MappedReentrantMutexGuard::try_map(...)`. A method would interfere with methods of
967    /// the same name on the contents of the locked data.
968    #[inline]
969    pub fn try_map<U: ?Sized, F>(
970        s: Self,
971        f: F,
972    ) -> Result<MappedReentrantMutexGuard<'a, R, G, U>, Self>
973    where
974        F: FnOnce(&T) -> Option<&U>,
975    {
976        let raw = s.raw;
977        let data = match f(unsafe { &*s.data }) {
978            Some(data) => data,
979            None => return Err(s),
980        };
981        mem::forget(s);
982        Ok(MappedReentrantMutexGuard {
983            raw,
984            data,
985            marker: PhantomData,
986        })
987    }
988}
989
990impl<'a, R: RawMutexFair + 'a, G: GetThreadId + 'a, T: ?Sized + 'a>
991    MappedReentrantMutexGuard<'a, R, G, T>
992{
993    /// Unlocks the mutex using a fair unlock protocol.
994    ///
995    /// By default, mutexes are unfair and allow the current thread to re-lock
996    /// the mutex before another has the chance to acquire the lock, even if
997    /// that thread has been blocked on the mutex for a long time. This is the
998    /// default because it allows much higher throughput as it avoids forcing a
999    /// context switch on every mutex unlock. This can result in one thread
1000    /// acquiring a mutex many more times than other threads.
1001    ///
1002    /// However in some cases it can be beneficial to ensure fairness by forcing
1003    /// the lock to pass on to a waiting thread if there is one. This is done by
1004    /// using this method instead of dropping the `ReentrantMutexGuard` normally.
1005    #[inline]
1006    pub fn unlock_fair(s: Self) {
1007        // Safety: A MappedReentrantMutexGuard always holds the lock
1008        unsafe {
1009            s.raw.unlock_fair();
1010        }
1011        mem::forget(s);
1012    }
1013}
1014
1015impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> Deref
1016    for MappedReentrantMutexGuard<'a, R, G, T>
1017{
1018    type Target = T;
1019    #[inline]
1020    fn deref(&self) -> &T {
1021        unsafe { &*self.data }
1022    }
1023}
1024
1025impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> Drop
1026    for MappedReentrantMutexGuard<'a, R, G, T>
1027{
1028    #[inline]
1029    fn drop(&mut self) {
1030        // Safety: A MappedReentrantMutexGuard always holds the lock.
1031        unsafe {
1032            self.raw.unlock();
1033        }
1034    }
1035}
1036
1037impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug
1038    for MappedReentrantMutexGuard<'a, R, G, T>
1039{
1040    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1041        fmt::Debug::fmt(&**self, f)
1042    }
1043}
1044
1045impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display
1046    for MappedReentrantMutexGuard<'a, R, G, T>
1047{
1048    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1049        (**self).fmt(f)
1050    }
1051}
1052
1053#[cfg(feature = "owning_ref")]
1054unsafe impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> StableAddress
1055    for MappedReentrantMutexGuard<'a, R, G, T>
1056{
1057}