spin/
once.rs

1//! Synchronization primitives for one-time evaluation.
2
3use crate::{
4    atomic::{AtomicU8, Ordering},
5    RelaxStrategy, Spin,
6};
7use core::{cell::UnsafeCell, fmt, marker::PhantomData, mem::MaybeUninit};
8
9/// A primitive that provides lazy one-time initialization.
10///
11/// Unlike its `std::sync` equivalent, this is generalized such that the closure returns a
12/// value to be stored by the [`Once`] (`std::sync::Once` can be trivially emulated with
13/// `Once`).
14///
15/// Because [`Once::new`] is `const`, this primitive may be used to safely initialize statics.
16///
17/// # Examples
18///
19/// ```
20/// use spin;
21///
22/// static START: spin::Once = spin::Once::new();
23///
24/// START.call_once(|| {
25///     // run initialization here
26/// });
27/// ```
28pub struct Once<T = (), R = Spin> {
29    phantom: PhantomData<R>,
30    status: AtomicStatus,
31    data: UnsafeCell<MaybeUninit<T>>,
32}
33
34impl<T, R> Default for Once<T, R> {
35    fn default() -> Self {
36        Self::new()
37    }
38}
39
40impl<T: fmt::Debug, R> fmt::Debug for Once<T, R> {
41    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
42        let mut d = f.debug_tuple("Once");
43        let d = if let Some(x) = self.get() {
44            d.field(&x)
45        } else {
46            d.field(&format_args!("<uninit>"))
47        };
48        d.finish()
49    }
50}
51
52// Same unsafe impls as `std::sync::RwLock`, because this also allows for
53// concurrent reads.
54unsafe impl<T: Send + Sync, R> Sync for Once<T, R> {}
55unsafe impl<T: Send, R> Send for Once<T, R> {}
56
57mod status {
58    use super::*;
59
60    // SAFETY: This structure has an invariant, namely that the inner atomic u8 must *always* have
61    // a value for which there exists a valid Status. This means that users of this API must only
62    // be allowed to load and store `Status`es.
63    #[repr(transparent)]
64    pub struct AtomicStatus(AtomicU8);
65
66    // Four states that a Once can be in, encoded into the lower bits of `status` in
67    // the Once structure.
68    #[repr(u8)]
69    #[derive(Clone, Copy, Debug, PartialEq)]
70    pub enum Status {
71        Incomplete = 0x00,
72        Running = 0x01,
73        Complete = 0x02,
74        Panicked = 0x03,
75    }
76    impl Status {
77        // Construct a status from an inner u8 integer.
78        //
79        // # Safety
80        //
81        // For this to be safe, the inner number must have a valid corresponding enum variant.
82        unsafe fn new_unchecked(inner: u8) -> Self {
83            core::mem::transmute(inner)
84        }
85    }
86
87    impl AtomicStatus {
88        #[inline(always)]
89        pub const fn new(status: Status) -> Self {
90            // SAFETY: We got the value directly from status, so transmuting back is fine.
91            Self(AtomicU8::new(status as u8))
92        }
93        #[inline(always)]
94        pub fn load(&self, ordering: Ordering) -> Status {
95            // SAFETY: We know that the inner integer must have been constructed from a Status in
96            // the first place.
97            unsafe { Status::new_unchecked(self.0.load(ordering)) }
98        }
99        #[inline(always)]
100        pub fn store(&self, status: Status, ordering: Ordering) {
101            // SAFETY: While not directly unsafe, this is safe because the value was retrieved from
102            // a status, thus making transmutation safe.
103            self.0.store(status as u8, ordering);
104        }
105        #[inline(always)]
106        pub fn compare_exchange(
107            &self,
108            old: Status,
109            new: Status,
110            success: Ordering,
111            failure: Ordering,
112        ) -> Result<Status, Status> {
113            match self
114                .0
115                .compare_exchange(old as u8, new as u8, success, failure)
116            {
117                // SAFETY: A compare exchange will always return a value that was later stored into
118                // the atomic u8, but due to the invariant that it must be a valid Status, we know
119                // that both Ok(_) and Err(_) will be safely transmutable.
120                Ok(ok) => Ok(unsafe { Status::new_unchecked(ok) }),
121                Err(err) => Err(unsafe { Status::new_unchecked(err) }),
122            }
123        }
124        #[inline(always)]
125        pub fn get_mut(&mut self) -> &mut Status {
126            // SAFETY: Since we know that the u8 inside must be a valid Status, we can safely cast
127            // it to a &mut Status.
128            unsafe { &mut *((self.0.get_mut() as *mut u8).cast::<Status>()) }
129        }
130    }
131}
132use self::status::{AtomicStatus, Status};
133
134impl<T, R: RelaxStrategy> Once<T, R> {
135    /// Performs an initialization routine once and only once. The given closure
136    /// will be executed if this is the first time `call_once` has been called,
137    /// and otherwise the routine will *not* be invoked.
138    ///
139    /// This method will block the calling thread if another initialization
140    /// routine is currently running.
141    ///
142    /// When this function returns, it is guaranteed that some initialization
143    /// has run and completed (it may not be the closure specified). The
144    /// returned pointer will point to the result from the closure that was
145    /// run.
146    ///
147    /// # Panics
148    ///
149    /// This function will panic if the [`Once`] previously panicked while attempting
150    /// to initialize. This is similar to the poisoning behaviour of `std::sync`'s
151    /// primitives.
152    ///
153    /// # Examples
154    ///
155    /// ```
156    /// use spin;
157    ///
158    /// static INIT: spin::Once<usize> = spin::Once::new();
159    ///
160    /// fn get_cached_val() -> usize {
161    ///     *INIT.call_once(expensive_computation)
162    /// }
163    ///
164    /// fn expensive_computation() -> usize {
165    ///     // ...
166    /// # 2
167    /// }
168    /// ```
169    pub fn call_once<F: FnOnce() -> T>(&self, f: F) -> &T {
170        match self.try_call_once(|| Ok::<T, core::convert::Infallible>(f())) {
171            Ok(x) => x,
172            Err(void) => match void {},
173        }
174    }
175
176    /// This method is similar to `call_once`, but allows the given closure to
177    /// fail, and lets the `Once` in a uninitialized state if it does.
178    ///
179    /// This method will block the calling thread if another initialization
180    /// routine is currently running.
181    ///
182    /// When this function returns without error, it is guaranteed that some
183    /// initialization has run and completed (it may not be the closure
184    /// specified). The returned reference will point to the result from the
185    /// closure that was run.
186    ///
187    /// # Panics
188    ///
189    /// This function will panic if the [`Once`] previously panicked while attempting
190    /// to initialize. This is similar to the poisoning behaviour of `std::sync`'s
191    /// primitives.
192    ///
193    /// # Examples
194    ///
195    /// ```
196    /// use spin;
197    ///
198    /// static INIT: spin::Once<usize> = spin::Once::new();
199    ///
200    /// fn get_cached_val() -> Result<usize, String> {
201    ///     INIT.try_call_once(expensive_fallible_computation).map(|x| *x)
202    /// }
203    ///
204    /// fn expensive_fallible_computation() -> Result<usize, String> {
205    ///     // ...
206    /// # Ok(2)
207    /// }
208    /// ```
209    pub fn try_call_once<F: FnOnce() -> Result<T, E>, E>(&self, f: F) -> Result<&T, E> {
210        if let Some(value) = self.get() {
211            Ok(value)
212        } else {
213            self.try_call_once_slow(f)
214        }
215    }
216
217    #[cold]
218    fn try_call_once_slow<F: FnOnce() -> Result<T, E>, E>(&self, f: F) -> Result<&T, E> {
219        loop {
220            let xchg = self.status.compare_exchange(
221                Status::Incomplete,
222                Status::Running,
223                Ordering::Acquire,
224                Ordering::Acquire,
225            );
226
227            match xchg {
228                Ok(_must_be_state_incomplete) => {
229                    // Impl is defined after the match for readability
230                }
231                Err(Status::Panicked) => panic!("Once panicked"),
232                Err(Status::Running) => match self.poll() {
233                    Some(v) => return Ok(v),
234                    None => continue,
235                },
236                Err(Status::Complete) => {
237                    return Ok(unsafe {
238                        // SAFETY: The status is Complete
239                        self.force_get()
240                    });
241                }
242                Err(Status::Incomplete) => {
243                    // The compare_exchange failed, so this shouldn't ever be reached,
244                    // however if we decide to switch to compare_exchange_weak it will
245                    // be safer to leave this here than hit an unreachable
246                    continue;
247                }
248            }
249
250            // The compare-exchange succeeded, so we shall initialize it.
251
252            // We use a guard (Finish) to catch panics caused by builder
253            let finish = Finish {
254                status: &self.status,
255            };
256            let val = match f() {
257                Ok(val) => val,
258                Err(err) => {
259                    // If an error occurs, clean up everything and leave.
260                    core::mem::forget(finish);
261                    self.status.store(Status::Incomplete, Ordering::Release);
262                    return Err(err);
263                }
264            };
265            unsafe {
266                // SAFETY:
267                // `UnsafeCell`/deref: currently the only accessor, mutably
268                // and immutably by cas exclusion.
269                // `write`: pointer comes from `MaybeUninit`.
270                (*self.data.get()).as_mut_ptr().write(val);
271            };
272            // If there were to be a panic with unwind enabled, the code would
273            // short-circuit and never reach the point where it writes the inner data.
274            // The destructor for Finish will run, and poison the Once to ensure that other
275            // threads accessing it do not exhibit unwanted behavior, if there were to be
276            // any inconsistency in data structures caused by the panicking thread.
277            //
278            // However, f() is expected in the general case not to panic. In that case, we
279            // simply forget the guard, bypassing its destructor. We could theoretically
280            // clear a flag instead, but this eliminates the call to the destructor at
281            // compile time, and unconditionally poisons during an eventual panic, if
282            // unwinding is enabled.
283            core::mem::forget(finish);
284
285            // SAFETY: Release is required here, so that all memory accesses done in the
286            // closure when initializing, become visible to other threads that perform Acquire
287            // loads.
288            //
289            // And, we also know that the changes this thread has done will not magically
290            // disappear from our cache, so it does not need to be AcqRel.
291            self.status.store(Status::Complete, Ordering::Release);
292
293            // This next line is mainly an optimization.
294            return unsafe { Ok(self.force_get()) };
295        }
296    }
297
298    /// Spins until the [`Once`] contains a value.
299    ///
300    /// Note that in releases prior to `0.7`, this function had the behaviour of [`Once::poll`].
301    ///
302    /// # Panics
303    ///
304    /// This function will panic if the [`Once`] previously panicked while attempting
305    /// to initialize. This is similar to the poisoning behaviour of `std::sync`'s
306    /// primitives.
307    pub fn wait(&self) -> &T {
308        loop {
309            match self.poll() {
310                Some(x) => break x,
311                None => R::relax(),
312            }
313        }
314    }
315
316    /// Like [`Once::get`], but will spin if the [`Once`] is in the process of being
317    /// initialized. If initialization has not even begun, `None` will be returned.
318    ///
319    /// Note that in releases prior to `0.7`, this function was named `wait`.
320    ///
321    /// # Panics
322    ///
323    /// This function will panic if the [`Once`] previously panicked while attempting
324    /// to initialize. This is similar to the poisoning behaviour of `std::sync`'s
325    /// primitives.
326    pub fn poll(&self) -> Option<&T> {
327        loop {
328            // SAFETY: Acquire is safe here, because if the status is COMPLETE, then we want to make
329            // sure that all memory accessed done while initializing that value, are visible when
330            // we return a reference to the inner data after this load.
331            match self.status.load(Ordering::Acquire) {
332                Status::Incomplete => return None,
333                Status::Running => R::relax(), // We spin
334                Status::Complete => return Some(unsafe { self.force_get() }),
335                Status::Panicked => panic!("Once previously poisoned by a panicked"),
336            }
337        }
338    }
339}
340
341impl<T, R> Once<T, R> {
342    /// Initialization constant of [`Once`].
343    #[allow(clippy::declare_interior_mutable_const)]
344    pub const INIT: Self = Self {
345        phantom: PhantomData,
346        status: AtomicStatus::new(Status::Incomplete),
347        data: UnsafeCell::new(MaybeUninit::uninit()),
348    };
349
350    /// Creates a new [`Once`].
351    pub const fn new() -> Self {
352        Self::INIT
353    }
354
355    /// Creates a new initialized [`Once`].
356    pub const fn initialized(data: T) -> Self {
357        Self {
358            phantom: PhantomData,
359            status: AtomicStatus::new(Status::Complete),
360            data: UnsafeCell::new(MaybeUninit::new(data)),
361        }
362    }
363
364    /// Retrieve a pointer to the inner data.
365    ///
366    /// While this method itself is safe, accessing the pointer before the [`Once`] has been
367    /// initialized is UB, unless this method has already been written to from a pointer coming
368    /// from this method.
369    pub fn as_mut_ptr(&self) -> *mut T {
370        // SAFETY:
371        // * MaybeUninit<T> always has exactly the same layout as T
372        self.data.get().cast::<T>()
373    }
374
375    /// Get a reference to the initialized instance. Must only be called once COMPLETE.
376    unsafe fn force_get(&self) -> &T {
377        // SAFETY:
378        // * `UnsafeCell`/inner deref: data never changes again
379        // * `MaybeUninit`/outer deref: data was initialized
380        &*(*self.data.get()).as_ptr()
381    }
382
383    /// Get a reference to the initialized instance. Must only be called once COMPLETE.
384    unsafe fn force_get_mut(&mut self) -> &mut T {
385        // SAFETY:
386        // * `UnsafeCell`/inner deref: data never changes again
387        // * `MaybeUninit`/outer deref: data was initialized
388        &mut *(*self.data.get()).as_mut_ptr()
389    }
390
391    /// Get a reference to the initialized instance. Must only be called once COMPLETE.
392    unsafe fn force_into_inner(self) -> T {
393        // SAFETY:
394        // * `UnsafeCell`/inner deref: data never changes again
395        // * `MaybeUninit`/outer deref: data was initialized
396        (*self.data.get()).as_ptr().read()
397    }
398
399    /// Returns a reference to the inner value if the [`Once`] has been initialized.
400    pub fn get(&self) -> Option<&T> {
401        // SAFETY: Just as with `poll`, Acquire is safe here because we want to be able to see the
402        // nonatomic stores done when initializing, once we have loaded and checked the status.
403        match self.status.load(Ordering::Acquire) {
404            Status::Complete => Some(unsafe { self.force_get() }),
405            _ => None,
406        }
407    }
408
409    /// Returns a reference to the inner value on the unchecked assumption that the  [`Once`] has been initialized.
410    ///
411    /// # Safety
412    ///
413    /// This is *extremely* unsafe if the `Once` has not already been initialized because a reference to uninitialized
414    /// memory will be returned, immediately triggering undefined behaviour (even if the reference goes unused).
415    /// However, this can be useful in some instances for exposing the `Once` to FFI or when the overhead of atomically
416    /// checking initialization is unacceptable and the `Once` has already been initialized.
417    pub unsafe fn get_unchecked(&self) -> &T {
418        debug_assert_eq!(
419            self.status.load(Ordering::SeqCst),
420            Status::Complete,
421            "Attempted to access an uninitialized Once. If this was run without debug checks, this would be undefined behaviour. This is a serious bug and you must fix it.",
422        );
423        self.force_get()
424    }
425
426    /// Returns a mutable reference to the inner value if the [`Once`] has been initialized.
427    ///
428    /// Because this method requires a mutable reference to the [`Once`], no synchronization
429    /// overhead is required to access the inner value. In effect, it is zero-cost.
430    pub fn get_mut(&mut self) -> Option<&mut T> {
431        match *self.status.get_mut() {
432            Status::Complete => Some(unsafe { self.force_get_mut() }),
433            _ => None,
434        }
435    }
436
437    /// Returns a mutable reference to the inner value
438    ///
439    /// # Safety
440    ///
441    /// This is *extremely* unsafe if the `Once` has not already been initialized because a reference to uninitialized
442    /// memory will be returned, immediately triggering undefined behaviour (even if the reference goes unused).
443    /// However, this can be useful in some instances for exposing the `Once` to FFI or when the overhead of atomically
444    /// checking initialization is unacceptable and the `Once` has already been initialized.
445    pub unsafe fn get_mut_unchecked(&mut self) -> &mut T {
446        debug_assert_eq!(
447            self.status.load(Ordering::SeqCst),
448            Status::Complete,
449            "Attempted to access an unintialized Once.  If this was to run without debug checks, this would be undefined behavior.  This is a serious bug and you must fix it.",
450        );
451        self.force_get_mut()
452    }
453
454    /// Returns a the inner value if the [`Once`] has been initialized.
455    ///
456    /// Because this method requires ownership of the [`Once`], no synchronization overhead
457    /// is required to access the inner value. In effect, it is zero-cost.
458    pub fn try_into_inner(mut self) -> Option<T> {
459        match *self.status.get_mut() {
460            Status::Complete => Some(unsafe { self.force_into_inner() }),
461            _ => None,
462        }
463    }
464
465    /// Returns a the inner value if the [`Once`] has been initialized.
466    /// # Safety
467    ///
468    /// This is *extremely* unsafe if the `Once` has not already been initialized because a reference to uninitialized
469    /// memory will be returned, immediately triggering undefined behaviour (even if the reference goes unused)
470    /// This can be useful, if `Once` has already been initialized, and you want to bypass an
471    /// option check.
472    pub unsafe fn into_inner_unchecked(self) -> T {
473        debug_assert_eq!(
474            self.status.load(Ordering::SeqCst),
475            Status::Complete,
476            "Attempted to access an unintialized Once.  If this was to run without debug checks, this would be undefined behavior.  This is a serious bug and you must fix it.",
477        );
478        self.force_into_inner()
479    }
480
481    /// Checks whether the value has been initialized.
482    ///
483    /// This is done using [`Acquire`](core::sync::atomic::Ordering::Acquire) ordering, and
484    /// therefore it is safe to access the value directly via
485    /// [`get_unchecked`](Self::get_unchecked) if this returns true.
486    pub fn is_completed(&self) -> bool {
487        // TODO: Add a similar variant for Relaxed?
488        self.status.load(Ordering::Acquire) == Status::Complete
489    }
490}
491
492impl<T, R> From<T> for Once<T, R> {
493    fn from(data: T) -> Self {
494        Self::initialized(data)
495    }
496}
497
498impl<T, R> Drop for Once<T, R> {
499    fn drop(&mut self) {
500        // No need to do any atomic access here, we have &mut!
501        if *self.status.get_mut() == Status::Complete {
502            unsafe {
503                //TODO: Use MaybeUninit::assume_init_drop once stabilised
504                core::ptr::drop_in_place((*self.data.get()).as_mut_ptr());
505            }
506        }
507    }
508}
509
510struct Finish<'a> {
511    status: &'a AtomicStatus,
512}
513
514impl<'a> Drop for Finish<'a> {
515    fn drop(&mut self) {
516        // While using Relaxed here would most likely not be an issue, we use SeqCst anyway.
517        // This is mainly because panics are not meant to be fast at all, but also because if
518        // there were to be a compiler bug which reorders accesses within the same thread,
519        // where it should not, we want to be sure that the panic really is handled, and does
520        // not cause additional problems. SeqCst will therefore help guarding against such
521        // bugs.
522        self.status.store(Status::Panicked, Ordering::SeqCst);
523    }
524}
525
526#[cfg(test)]
527mod tests {
528    use std::prelude::v1::*;
529
530    use std::sync::atomic::AtomicU32;
531    use std::sync::mpsc::channel;
532    use std::sync::Arc;
533    use std::thread;
534
535    use super::*;
536
537    #[test]
538    fn smoke_once() {
539        static O: Once = Once::new();
540        let mut a = 0;
541        O.call_once(|| a += 1);
542        assert_eq!(a, 1);
543        O.call_once(|| a += 1);
544        assert_eq!(a, 1);
545    }
546
547    #[test]
548    fn smoke_once_value() {
549        static O: Once<usize> = Once::new();
550        let a = O.call_once(|| 1);
551        assert_eq!(*a, 1);
552        let b = O.call_once(|| 2);
553        assert_eq!(*b, 1);
554    }
555
556    #[test]
557    fn stampede_once() {
558        static O: Once = Once::new();
559        static mut RUN: bool = false;
560
561        let (tx, rx) = channel();
562        let mut ts = Vec::new();
563        for _ in 0..10 {
564            let tx = tx.clone();
565            ts.push(thread::spawn(move || {
566                for _ in 0..4 {
567                    thread::yield_now()
568                }
569                unsafe {
570                    O.call_once(|| {
571                        assert!(!RUN);
572                        RUN = true;
573                    });
574                    assert!(RUN);
575                }
576                tx.send(()).unwrap();
577            }));
578        }
579
580        unsafe {
581            O.call_once(|| {
582                assert!(!RUN);
583                RUN = true;
584            });
585            assert!(RUN);
586        }
587
588        for _ in 0..10 {
589            rx.recv().unwrap();
590        }
591
592        for t in ts {
593            t.join().unwrap();
594        }
595    }
596
597    #[test]
598    fn get() {
599        static INIT: Once<usize> = Once::new();
600
601        assert!(INIT.get().is_none());
602        INIT.call_once(|| 2);
603        assert_eq!(INIT.get().map(|r| *r), Some(2));
604    }
605
606    #[test]
607    fn get_no_wait() {
608        static INIT: Once<usize> = Once::new();
609
610        assert!(INIT.get().is_none());
611        let t = thread::spawn(move || {
612            INIT.call_once(|| {
613                thread::sleep(std::time::Duration::from_secs(3));
614                42
615            });
616        });
617        assert!(INIT.get().is_none());
618
619        t.join().unwrap();
620    }
621
622    #[test]
623    fn poll() {
624        static INIT: Once<usize> = Once::new();
625
626        assert!(INIT.poll().is_none());
627        INIT.call_once(|| 3);
628        assert_eq!(INIT.poll().map(|r| *r), Some(3));
629    }
630
631    #[test]
632    fn wait() {
633        static INIT: Once<usize> = Once::new();
634
635        let t = std::thread::spawn(|| {
636            assert_eq!(*INIT.wait(), 3);
637            assert!(INIT.is_completed());
638        });
639
640        for _ in 0..4 {
641            thread::yield_now()
642        }
643
644        assert!(INIT.poll().is_none());
645        INIT.call_once(|| 3);
646
647        t.join().unwrap();
648    }
649
650    #[test]
651    fn panic() {
652        use std::panic;
653
654        static INIT: Once = Once::new();
655
656        // poison the once
657        let t = panic::catch_unwind(|| {
658            INIT.call_once(|| panic!());
659        });
660        assert!(t.is_err());
661
662        // poisoning propagates
663        let t = panic::catch_unwind(|| {
664            INIT.call_once(|| {});
665        });
666        assert!(t.is_err());
667    }
668
669    #[test]
670    fn init_constant() {
671        static O: Once = Once::INIT;
672        let mut a = 0;
673        O.call_once(|| a += 1);
674        assert_eq!(a, 1);
675        O.call_once(|| a += 1);
676        assert_eq!(a, 1);
677    }
678
679    static mut CALLED: bool = false;
680
681    struct DropTest {}
682
683    impl Drop for DropTest {
684        fn drop(&mut self) {
685            unsafe {
686                CALLED = true;
687            }
688        }
689    }
690
691    #[test]
692    fn try_call_once_err() {
693        let once = Once::<_, Spin>::new();
694        let shared = Arc::new((once, AtomicU32::new(0)));
695
696        let (tx, rx) = channel();
697
698        let t0 = {
699            let shared = shared.clone();
700            thread::spawn(move || {
701                let (once, called) = &*shared;
702
703                once.try_call_once(|| {
704                    called.fetch_add(1, Ordering::AcqRel);
705                    tx.send(()).unwrap();
706                    thread::sleep(std::time::Duration::from_millis(50));
707                    Err(())
708                })
709                .ok();
710            })
711        };
712
713        let t1 = {
714            let shared = shared.clone();
715            thread::spawn(move || {
716                rx.recv().unwrap();
717                let (once, called) = &*shared;
718                assert_eq!(
719                    called.load(Ordering::Acquire),
720                    1,
721                    "leader thread did not run first"
722                );
723
724                once.call_once(|| {
725                    called.fetch_add(1, Ordering::AcqRel);
726                });
727            })
728        };
729
730        t0.join().unwrap();
731        t1.join().unwrap();
732
733        assert_eq!(shared.1.load(Ordering::Acquire), 2);
734    }
735
736    // This is sort of two test cases, but if we write them as separate test methods
737    // they can be executed concurrently and then fail some small fraction of the
738    // time.
739    #[test]
740    fn drop_occurs_and_skip_uninit_drop() {
741        unsafe {
742            CALLED = false;
743        }
744
745        {
746            let once = Once::<_>::new();
747            once.call_once(|| DropTest {});
748        }
749
750        assert!(unsafe { CALLED });
751        // Now test that we skip drops for the uninitialized case.
752        unsafe {
753            CALLED = false;
754        }
755
756        let once = Once::<DropTest>::new();
757        drop(once);
758
759        assert!(unsafe { !CALLED });
760    }
761
762    #[test]
763    fn call_once_test() {
764        for _ in 0..20 {
765            use std::sync::atomic::AtomicUsize;
766            use std::sync::Arc;
767            use std::time::Duration;
768            let share = Arc::new(AtomicUsize::new(0));
769            let once = Arc::new(Once::<_, Spin>::new());
770            let mut hs = Vec::new();
771            for _ in 0..8 {
772                let h = thread::spawn({
773                    let share = share.clone();
774                    let once = once.clone();
775                    move || {
776                        thread::sleep(Duration::from_millis(10));
777                        once.call_once(|| {
778                            share.fetch_add(1, Ordering::SeqCst);
779                        });
780                    }
781                });
782                hs.push(h);
783            }
784            for h in hs {
785                h.join().unwrap();
786            }
787            assert_eq!(1, share.load(Ordering::SeqCst));
788        }
789    }
790}