tracing_subscriber/registry/
sharded.rs

1use sharded_slab::{pool::Ref, Clear, Pool};
2use thread_local::ThreadLocal;
3
4use super::stack::SpanStack;
5use crate::{
6    filter::{FilterId, FilterMap, FilterState},
7    registry::{
8        extensions::{Extensions, ExtensionsInner, ExtensionsMut},
9        LookupSpan, SpanData,
10    },
11    sync::RwLock,
12};
13use std::{
14    cell::{self, Cell, RefCell},
15    sync::atomic::{fence, AtomicUsize, Ordering},
16};
17use tracing_core::{
18    dispatcher::{self, Dispatch},
19    span::{self, Current, Id},
20    Event, Interest, Metadata, Subscriber,
21};
22
23/// A shared, reusable store for spans.
24///
25/// A `Registry` is a [`Subscriber`] around which multiple [`Layer`]s
26/// implementing various behaviors may be [added]. Unlike other types
27/// implementing `Subscriber`, `Registry` does not actually record traces itself:
28/// instead, it collects and stores span data that is exposed to any [`Layer`]s
29/// wrapping it through implementations of the [`LookupSpan`] trait.
30/// The `Registry` is responsible for storing span metadata, recording
31/// relationships between spans, and tracking which spans are active and which
32/// are closed. In addition, it provides a mechanism for [`Layer`]s to store
33/// user-defined per-span data, called [extensions], in the registry. This
34/// allows [`Layer`]-specific data to benefit from the `Registry`'s
35/// high-performance concurrent storage.
36///
37/// This registry is implemented using a [lock-free sharded slab][slab], and is
38/// highly optimized for concurrent access.
39///
40/// # Span ID Generation
41///
42/// Span IDs are not globally unique, but the registry ensures that
43/// no two currently active spans have the same ID within a process.
44///
45/// One of the primary responsibilities of the registry is to generate [span
46/// IDs]. Therefore, it's important for other code that interacts with the
47/// registry, such as [`Layer`]s, to understand the guarantees of the
48/// span IDs that are generated.
49///
50/// The registry's span IDs are guaranteed to be unique **at a given point
51/// in time**. This means that an active span will never be assigned the
52/// same ID as another **currently active** span. However, the registry
53/// **will** eventually reuse the IDs of [closed] spans, although an ID
54/// will never be reassigned immediately after a span has closed.
55///
56/// Spans are not [considered closed] by the `Registry` until *every*
57/// [`Span`] reference with that ID has been dropped.
58///
59/// Thus: span IDs generated by the registry should be considered unique
60/// only at a given point in time, and only relative to other spans
61/// generated by the same process. Two spans with the same ID will not exist
62/// in the same process concurrently. However, if historical span data is
63/// being stored, the same ID may occur for multiple spans times in that
64/// data. If spans must be uniquely identified in historical data, the user
65/// code storing this data must assign its own unique identifiers to those
66/// spans. A counter is generally sufficient for this.
67///
68/// Similarly, span IDs generated by the registry are not unique outside of
69/// a given process. Distributed tracing systems may require identifiers
70/// that are unique across multiple processes on multiple machines (for
71/// example, [OpenTelemetry's `SpanId`s and `TraceId`s][ot]). `tracing` span
72/// IDs generated by the registry should **not** be used for this purpose.
73/// Instead, code which integrates with a distributed tracing system should
74/// generate and propagate its own IDs according to the rules specified by
75/// the distributed tracing system. These IDs can be associated with
76/// `tracing` spans using [fields] and/or [stored span data].
77///
78/// [span IDs]: tracing_core::span::Id
79/// [slab]: sharded_slab
80/// [`Layer`]: crate::Layer
81/// [added]: crate::layer::Layer#composing-layers
82/// [extensions]: super::Extensions
83/// [closed]: https://docs.rs/tracing/latest/tracing/span/index.html#closing-spans
84/// [considered closed]: tracing_core::subscriber::Subscriber::try_close()
85/// [`Span`]: https://docs.rs/tracing/latest/tracing/span/struct.Span.html
86/// [ot]: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#spancontext
87/// [fields]: tracing_core::field
88/// [stored span data]: crate::registry::SpanData::extensions_mut
89#[cfg(feature = "registry")]
90#[cfg_attr(docsrs, doc(cfg(all(feature = "registry", feature = "std"))))]
91#[derive(Debug)]
92pub struct Registry {
93    spans: Pool<DataInner>,
94    current_spans: ThreadLocal<RefCell<SpanStack>>,
95    next_filter_id: u8,
96}
97
98/// Span data stored in a [`Registry`].
99///
100/// The registry stores well-known data defined by tracing: span relationships,
101/// metadata and reference counts. Additional user-defined data provided by
102/// [`Layer`s], such as formatted fields, metrics, or distributed traces should
103/// be stored in the [extensions] typemap.
104///
105/// [`Layer`s]: crate::layer::Layer
106/// [extensions]: Extensions
107#[cfg(feature = "registry")]
108#[cfg_attr(docsrs, doc(cfg(all(feature = "registry", feature = "std"))))]
109#[derive(Debug)]
110pub struct Data<'a> {
111    /// Immutable reference to the pooled `DataInner` entry.
112    inner: Ref<'a, DataInner>,
113}
114
115/// Stored data associated with a span.
116///
117/// This type is pooled using [`sharded_slab::Pool`]; when a span is
118/// dropped, the `DataInner` entry at that span's slab index is cleared
119/// in place and reused by a future span. Thus, the `Default` and
120/// [`sharded_slab::Clear`] implementations for this type are
121/// load-bearing.
122#[derive(Debug)]
123struct DataInner {
124    filter_map: FilterMap,
125    metadata: &'static Metadata<'static>,
126    parent: Option<Id>,
127    ref_count: AtomicUsize,
128    // The span's `Extensions` typemap. Allocations for the `HashMap` backing
129    // this are pooled and reused in place.
130    pub(crate) extensions: RwLock<ExtensionsInner>,
131}
132
133// === impl Registry ===
134
135impl Default for Registry {
136    fn default() -> Self {
137        Self {
138            spans: Pool::new(),
139            current_spans: ThreadLocal::new(),
140            next_filter_id: 0,
141        }
142    }
143}
144
145#[inline]
146fn idx_to_id(idx: usize) -> Id {
147    Id::from_u64(idx as u64 + 1)
148}
149
150#[inline]
151fn id_to_idx(id: &Id) -> usize {
152    id.into_u64() as usize - 1
153}
154
155/// A guard that tracks how many [`Registry`]-backed `Layer`s have
156/// processed an `on_close` event.
157///
158/// This is needed to enable a [`Registry`]-backed Layer to access span
159/// data after the `Layer` has recieved the `on_close` callback.
160///
161/// Once all `Layer`s have processed this event, the [`Registry`] knows
162/// that is able to safely remove the span tracked by `id`. `CloseGuard`
163/// accomplishes this through a two-step process:
164/// 1. Whenever a [`Registry`]-backed `Layer::on_close` method is
165///    called, `Registry::start_close` is closed.
166///    `Registry::start_close` increments a thread-local `CLOSE_COUNT`
167///    by 1 and returns a `CloseGuard`.
168/// 2. The `CloseGuard` is dropped at the end of `Layer::on_close`. On
169///    drop, `CloseGuard` checks thread-local `CLOSE_COUNT`. If
170///    `CLOSE_COUNT` is 0, the `CloseGuard` removes the span with the
171///    `id` from the registry, as all `Layers` that might have seen the
172///    `on_close` notification have processed it. If `CLOSE_COUNT` is
173///    greater than 0, `CloseGuard` decrements the counter by one and
174///    _does not_ remove the span from the [`Registry`].
175///
176pub(crate) struct CloseGuard<'a> {
177    id: Id,
178    registry: &'a Registry,
179    is_closing: bool,
180}
181
182impl Registry {
183    fn get(&self, id: &Id) -> Option<Ref<'_, DataInner>> {
184        self.spans.get(id_to_idx(id))
185    }
186
187    /// Returns a guard which tracks how many `Layer`s have
188    /// processed an `on_close` notification via the `CLOSE_COUNT` thread-local.
189    /// For additional details, see [`CloseGuard`].
190    ///
191    pub(crate) fn start_close(&self, id: Id) -> CloseGuard<'_> {
192        CLOSE_COUNT.with(|count| {
193            let c = count.get();
194            count.set(c + 1);
195        });
196        CloseGuard {
197            id,
198            registry: self,
199            is_closing: false,
200        }
201    }
202
203    pub(crate) fn has_per_layer_filters(&self) -> bool {
204        self.next_filter_id > 0
205    }
206
207    pub(crate) fn span_stack(&self) -> cell::Ref<'_, SpanStack> {
208        self.current_spans.get_or_default().borrow()
209    }
210}
211
212thread_local! {
213    /// `CLOSE_COUNT` is the thread-local counter used by `CloseGuard` to
214    /// track how many layers have processed the close.
215    /// For additional details, see [`CloseGuard`].
216    ///
217    static CLOSE_COUNT: Cell<usize> = const { Cell::new(0) };
218}
219
220impl Subscriber for Registry {
221    fn register_callsite(&self, _: &'static Metadata<'static>) -> Interest {
222        if self.has_per_layer_filters() {
223            return FilterState::take_interest().unwrap_or_else(Interest::always);
224        }
225
226        Interest::always()
227    }
228
229    fn enabled(&self, _: &Metadata<'_>) -> bool {
230        if self.has_per_layer_filters() {
231            return FilterState::event_enabled();
232        }
233        true
234    }
235
236    #[inline]
237    fn new_span(&self, attrs: &span::Attributes<'_>) -> span::Id {
238        let parent = if attrs.is_root() {
239            None
240        } else if attrs.is_contextual() {
241            self.current_span().id().map(|id| self.clone_span(id))
242        } else {
243            attrs.parent().map(|id| self.clone_span(id))
244        };
245
246        let id = self
247            .spans
248            // Check out a `DataInner` entry from the pool for the new span. If
249            // there are free entries already allocated in the pool, this will
250            // preferentially reuse one; otherwise, a new `DataInner` is
251            // allocated and added to the pool.
252            .create_with(|data| {
253                data.metadata = attrs.metadata();
254                data.parent = parent;
255                data.filter_map = crate::filter::FILTERING.with(|filtering| filtering.filter_map());
256                #[cfg(debug_assertions)]
257                {
258                    if data.filter_map != FilterMap::new() {
259                        debug_assert!(self.has_per_layer_filters());
260                    }
261                }
262
263                let refs = data.ref_count.get_mut();
264                debug_assert_eq!(*refs, 0);
265                *refs = 1;
266            })
267            .expect("Unable to allocate another span");
268        idx_to_id(id)
269    }
270
271    /// This is intentionally not implemented, as recording fields
272    /// on a span is the responsibility of layers atop of this registry.
273    #[inline]
274    fn record(&self, _: &span::Id, _: &span::Record<'_>) {}
275
276    fn record_follows_from(&self, _span: &span::Id, _follows: &span::Id) {}
277
278    fn event_enabled(&self, _event: &Event<'_>) -> bool {
279        if self.has_per_layer_filters() {
280            return FilterState::event_enabled();
281        }
282        true
283    }
284
285    /// This is intentionally not implemented, as recording events
286    /// is the responsibility of layers atop of this registry.
287    fn event(&self, _: &Event<'_>) {}
288
289    fn enter(&self, id: &span::Id) {
290        if self
291            .current_spans
292            .get_or_default()
293            .borrow_mut()
294            .push(id.clone())
295        {
296            self.clone_span(id);
297        }
298    }
299
300    fn exit(&self, id: &span::Id) {
301        if let Some(spans) = self.current_spans.get() {
302            if spans.borrow_mut().pop(id) {
303                dispatcher::get_default(|dispatch| dispatch.try_close(id.clone()));
304            }
305        }
306    }
307
308    fn clone_span(&self, id: &span::Id) -> span::Id {
309        let span = self
310            .get(id)
311            .unwrap_or_else(|| panic!(
312                "tried to clone {:?}, but no span exists with that ID\n\
313                This may be caused by consuming a parent span (`parent: span`) rather than borrowing it (`parent: &span`).",
314                id,
315            ));
316        // Like `std::sync::Arc`, adds to the ref count (on clone) don't require
317        // a strong ordering; if we call` clone_span`, the reference count must
318        // always at least 1. The only synchronization necessary is between
319        // calls to `try_close`: we have to ensure that all threads have
320        // dropped their refs to the span before the span is closed.
321        let refs = span.ref_count.fetch_add(1, Ordering::Relaxed);
322        assert_ne!(
323            refs, 0,
324            "tried to clone a span ({:?}) that already closed",
325            id
326        );
327        id.clone()
328    }
329
330    fn current_span(&self) -> Current {
331        self.current_spans
332            .get()
333            .and_then(|spans| {
334                let spans = spans.borrow();
335                let id = spans.current()?;
336                let span = self.get(id)?;
337                Some(Current::new(id.clone(), span.metadata))
338            })
339            .unwrap_or_else(Current::none)
340    }
341
342    /// Decrements the reference count of the span with the given `id`, and
343    /// removes the span if it is zero.
344    ///
345    /// The allocated span slot will be reused when a new span is created.
346    fn try_close(&self, id: span::Id) -> bool {
347        let span = match self.get(&id) {
348            Some(span) => span,
349            None if std::thread::panicking() => return false,
350            None => panic!("tried to drop a ref to {:?}, but no such span exists!", id),
351        };
352
353        let refs = span.ref_count.fetch_sub(1, Ordering::Release);
354        if !std::thread::panicking() {
355            assert!(refs < usize::MAX, "reference count overflow!");
356        }
357        if refs > 1 {
358            return false;
359        }
360
361        // Synchronize if we are actually removing the span (stolen
362        // from std::Arc); this ensures that all other `try_close` calls on
363        // other threads happen-before we actually remove the span.
364        fence(Ordering::Acquire);
365        true
366    }
367}
368
369impl<'a> LookupSpan<'a> for Registry {
370    type Data = Data<'a>;
371
372    fn span_data(&'a self, id: &Id) -> Option<Self::Data> {
373        let inner = self.get(id)?;
374        Some(Data { inner })
375    }
376
377    fn register_filter(&mut self) -> FilterId {
378        let id = FilterId::new(self.next_filter_id);
379        self.next_filter_id += 1;
380        id
381    }
382}
383
384// === impl CloseGuard ===
385
386impl CloseGuard<'_> {
387    pub(crate) fn set_closing(&mut self) {
388        self.is_closing = true;
389    }
390}
391
392impl Drop for CloseGuard<'_> {
393    fn drop(&mut self) {
394        // If this returns with an error, we are already panicking. At
395        // this point, there's nothing we can really do to recover
396        // except by avoiding a double-panic.
397        let _ = CLOSE_COUNT.try_with(|count| {
398            let c = count.get();
399            // Decrement the count to indicate that _this_ guard's
400            // `on_close` callback has completed.
401            //
402            // Note that we *must* do this before we actually remove the span
403            // from the registry, since dropping the `DataInner` may trigger a
404            // new close, if this span is the last reference to a parent span.
405            count.set(c - 1);
406
407            // If the current close count is 1, this stack frame is the last
408            // `on_close` call. If the span is closing, it's okay to remove the
409            // span.
410            if c == 1 && self.is_closing {
411                self.registry.spans.clear(id_to_idx(&self.id));
412            }
413        });
414    }
415}
416
417// === impl Data ===
418
419impl<'a> SpanData<'a> for Data<'a> {
420    fn id(&self) -> Id {
421        idx_to_id(self.inner.key())
422    }
423
424    fn metadata(&self) -> &'static Metadata<'static> {
425        self.inner.metadata
426    }
427
428    fn parent(&self) -> Option<&Id> {
429        self.inner.parent.as_ref()
430    }
431
432    fn extensions(&self) -> Extensions<'_> {
433        Extensions::new(self.inner.extensions.read().expect("Mutex poisoned"))
434    }
435
436    fn extensions_mut(&self) -> ExtensionsMut<'_> {
437        ExtensionsMut::new(self.inner.extensions.write().expect("Mutex poisoned"))
438    }
439
440    #[inline]
441    fn is_enabled_for(&self, filter: FilterId) -> bool {
442        self.inner.filter_map.is_enabled(filter)
443    }
444}
445
446// === impl DataInner ===
447
448impl Default for DataInner {
449    fn default() -> Self {
450        // Since `DataInner` owns a `&'static Callsite` pointer, we need
451        // something to use as the initial default value for that callsite.
452        // Since we can't access a `DataInner` until it has had actual span data
453        // inserted into it, the null metadata will never actually be accessed.
454        struct NullCallsite;
455        impl tracing_core::callsite::Callsite for NullCallsite {
456            fn set_interest(&self, _: Interest) {
457                unreachable!(
458                    "/!\\ Tried to register the null callsite /!\\\n \
459                    This should never have happened and is definitely a bug. \
460                    A `tracing` bug report would be appreciated."
461                )
462            }
463
464            fn metadata(&self) -> &Metadata<'_> {
465                unreachable!(
466                    "/!\\ Tried to access the null callsite's metadata /!\\\n \
467                    This should never have happened and is definitely a bug. \
468                    A `tracing` bug report would be appreciated."
469                )
470            }
471        }
472
473        static NULL_CALLSITE: NullCallsite = NullCallsite;
474        static NULL_METADATA: Metadata<'static> = tracing_core::metadata! {
475            name: "",
476            target: "",
477            level: tracing_core::Level::TRACE,
478            fields: &[],
479            callsite: &NULL_CALLSITE,
480            kind: tracing_core::metadata::Kind::SPAN,
481        };
482
483        Self {
484            filter_map: FilterMap::new(),
485            metadata: &NULL_METADATA,
486            parent: None,
487            ref_count: AtomicUsize::new(0),
488            extensions: RwLock::new(ExtensionsInner::new()),
489        }
490    }
491}
492
493impl Clear for DataInner {
494    /// Clears the span's data in place, dropping the parent's reference count.
495    fn clear(&mut self) {
496        // A span is not considered closed until all of its children have closed.
497        // Therefore, each span's `DataInner` holds a "reference" to the parent
498        // span, keeping the parent span open until all its children have closed.
499        // When we close a span, we must then decrement the parent's ref count
500        // (potentially, allowing it to close, if this child is the last reference
501        // to that span).
502        // We have to actually unpack the option inside the `get_default`
503        // closure, since it is a `FnMut`, but testing that there _is_ a value
504        // here lets us avoid the thread-local access if we don't need the
505        // dispatcher at all.
506        if self.parent.is_some() {
507            // Note that --- because `Layered::try_close` works by calling
508            // `try_close` on the inner subscriber and using the return value to
509            // determine whether to call the `Layer`'s `on_close` callback ---
510            // we must call `try_close` on the entire subscriber stack, rather
511            // than just on the registry. If the registry called `try_close` on
512            // itself directly, the layers wouldn't see the close notification.
513            let subscriber = dispatcher::get_default(Dispatch::clone);
514            if let Some(parent) = self.parent.take() {
515                let _ = subscriber.try_close(parent);
516            }
517        }
518
519        // Clear (but do not deallocate!) the pooled `HashMap` for the span's extensions.
520        self.extensions
521            .get_mut()
522            .unwrap_or_else(|l| {
523                // This function can be called in a `Drop` impl, such as while
524                // panicking, so ignore lock poisoning.
525                l.into_inner()
526            })
527            .clear();
528
529        self.filter_map = FilterMap::new();
530    }
531}
532
533#[cfg(test)]
534mod tests {
535    use super::*;
536    use crate::{layer::Context, registry::LookupSpan, Layer};
537    use std::{
538        collections::HashMap,
539        sync::{Arc, Mutex, Weak},
540    };
541    use tracing::{self, subscriber::with_default};
542    use tracing_core::{
543        dispatcher,
544        span::{Attributes, Id},
545        Subscriber,
546    };
547
548    struct AssertionLayer;
549    impl<S> Layer<S> for AssertionLayer
550    where
551        S: Subscriber + for<'a> LookupSpan<'a>,
552    {
553        fn on_close(&self, id: Id, ctx: Context<'_, S>) {
554            dbg!(format_args!("closing {:?}", id));
555            assert!(&ctx.span(&id).is_some());
556        }
557    }
558
559    #[test]
560    fn single_layer_can_access_closed_span() {
561        let subscriber = AssertionLayer.with_subscriber(Registry::default());
562
563        with_default(subscriber, || {
564            let span = tracing::debug_span!("span");
565            drop(span);
566        });
567    }
568
569    #[test]
570    fn multiple_layers_can_access_closed_span() {
571        let subscriber = AssertionLayer
572            .and_then(AssertionLayer)
573            .with_subscriber(Registry::default());
574
575        with_default(subscriber, || {
576            let span = tracing::debug_span!("span");
577            drop(span);
578        });
579    }
580
581    struct CloseLayer {
582        inner: Arc<Mutex<CloseState>>,
583    }
584
585    struct CloseHandle {
586        state: Arc<Mutex<CloseState>>,
587    }
588
589    #[derive(Default)]
590    struct CloseState {
591        open: HashMap<&'static str, Weak<()>>,
592        closed: Vec<(&'static str, Weak<()>)>,
593    }
594
595    #[allow(dead_code)] // Field is exercised via checking `Arc::downgrade()`
596    struct SetRemoved(Arc<()>);
597
598    impl<S> Layer<S> for CloseLayer
599    where
600        S: Subscriber + for<'a> LookupSpan<'a>,
601    {
602        fn on_new_span(&self, _: &Attributes<'_>, id: &Id, ctx: Context<'_, S>) {
603            let span = ctx.span(id).expect("Missing span; this is a bug");
604            let mut lock = self.inner.lock().unwrap();
605            let is_removed = Arc::new(());
606            assert!(
607                lock.open
608                    .insert(span.name(), Arc::downgrade(&is_removed))
609                    .is_none(),
610                "test layer saw multiple spans with the same name, the test is probably messed up"
611            );
612            let mut extensions = span.extensions_mut();
613            extensions.insert(SetRemoved(is_removed));
614        }
615
616        fn on_close(&self, id: Id, ctx: Context<'_, S>) {
617            let span = if let Some(span) = ctx.span(&id) {
618                span
619            } else {
620                println!(
621                    "span {:?} did not exist in `on_close`, are we panicking?",
622                    id
623                );
624                return;
625            };
626            let name = span.name();
627            println!("close {} ({:?})", name, id);
628            if let Ok(mut lock) = self.inner.lock() {
629                if let Some(is_removed) = lock.open.remove(name) {
630                    assert!(is_removed.upgrade().is_some());
631                    lock.closed.push((name, is_removed));
632                }
633            }
634        }
635    }
636
637    impl CloseLayer {
638        fn new() -> (Self, CloseHandle) {
639            let state = Arc::new(Mutex::new(CloseState::default()));
640            (
641                Self {
642                    inner: state.clone(),
643                },
644                CloseHandle { state },
645            )
646        }
647    }
648
649    impl CloseState {
650        fn is_open(&self, span: &str) -> bool {
651            self.open.contains_key(span)
652        }
653
654        fn is_closed(&self, span: &str) -> bool {
655            self.closed.iter().any(|(name, _)| name == &span)
656        }
657    }
658
659    impl CloseHandle {
660        fn assert_closed(&self, span: &str) {
661            let lock = self.state.lock().unwrap();
662            assert!(
663                lock.is_closed(span),
664                "expected {} to be closed{}",
665                span,
666                if lock.is_open(span) {
667                    " (it was still open)"
668                } else {
669                    ", but it never existed (is there a problem with the test?)"
670                }
671            )
672        }
673
674        fn assert_open(&self, span: &str) {
675            let lock = self.state.lock().unwrap();
676            assert!(
677                lock.is_open(span),
678                "expected {} to be open{}",
679                span,
680                if lock.is_closed(span) {
681                    " (it was still open)"
682                } else {
683                    ", but it never existed (is there a problem with the test?)"
684                }
685            )
686        }
687
688        fn assert_removed(&self, span: &str) {
689            let lock = self.state.lock().unwrap();
690            let is_removed = match lock.closed.iter().find(|(name, _)| name == &span) {
691                Some((_, is_removed)) => is_removed,
692                None => panic!(
693                    "expected {} to be removed from the registry, but it was not closed {}",
694                    span,
695                    if lock.is_closed(span) {
696                        " (it was still open)"
697                    } else {
698                        ", but it never existed (is there a problem with the test?)"
699                    }
700                ),
701            };
702            assert!(
703                is_removed.upgrade().is_none(),
704                "expected {} to have been removed from the registry",
705                span
706            )
707        }
708
709        fn assert_not_removed(&self, span: &str) {
710            let lock = self.state.lock().unwrap();
711            let is_removed = match lock.closed.iter().find(|(name, _)| name == &span) {
712                Some((_, is_removed)) => is_removed,
713                None if lock.is_open(span) => return,
714                None => unreachable!(),
715            };
716            assert!(
717                is_removed.upgrade().is_some(),
718                "expected {} to have been removed from the registry",
719                span
720            )
721        }
722
723        #[allow(unused)] // may want this for future tests
724        fn assert_last_closed(&self, span: Option<&str>) {
725            let lock = self.state.lock().unwrap();
726            let last = lock.closed.last().map(|(span, _)| span);
727            assert_eq!(
728                last,
729                span.as_ref(),
730                "expected {:?} to have closed last",
731                span
732            );
733        }
734
735        fn assert_closed_in_order(&self, order: impl AsRef<[&'static str]>) {
736            let lock = self.state.lock().unwrap();
737            let order = order.as_ref();
738            for (i, name) in order.iter().enumerate() {
739                assert_eq!(
740                    lock.closed.get(i).map(|(span, _)| span),
741                    Some(name),
742                    "expected close order: {:?}, actual: {:?}",
743                    order,
744                    lock.closed.iter().map(|(name, _)| name).collect::<Vec<_>>()
745                );
746            }
747        }
748    }
749
750    #[test]
751    fn spans_are_removed_from_registry() {
752        let (close_layer, state) = CloseLayer::new();
753        let subscriber = AssertionLayer
754            .and_then(close_layer)
755            .with_subscriber(Registry::default());
756
757        // Create a `Dispatch` (which is internally reference counted) so that
758        // the subscriber lives to the end of the test. Otherwise, if we just
759        // passed the subscriber itself to `with_default`, we could see the span
760        // be dropped when the subscriber itself is dropped, destroying the
761        // registry.
762        let dispatch = dispatcher::Dispatch::new(subscriber);
763
764        dispatcher::with_default(&dispatch, || {
765            let span = tracing::debug_span!("span1");
766            drop(span);
767            let span = tracing::info_span!("span2");
768            drop(span);
769        });
770
771        state.assert_removed("span1");
772        state.assert_removed("span2");
773
774        // Ensure the registry itself outlives the span.
775        drop(dispatch);
776    }
777
778    #[test]
779    fn spans_are_only_closed_when_the_last_ref_drops() {
780        let (close_layer, state) = CloseLayer::new();
781        let subscriber = AssertionLayer
782            .and_then(close_layer)
783            .with_subscriber(Registry::default());
784
785        // Create a `Dispatch` (which is internally reference counted) so that
786        // the subscriber lives to the end of the test. Otherwise, if we just
787        // passed the subscriber itself to `with_default`, we could see the span
788        // be dropped when the subscriber itself is dropped, destroying the
789        // registry.
790        let dispatch = dispatcher::Dispatch::new(subscriber);
791
792        let span2 = dispatcher::with_default(&dispatch, || {
793            let span = tracing::debug_span!("span1");
794            drop(span);
795            let span2 = tracing::info_span!("span2");
796            let span2_clone = span2.clone();
797            drop(span2);
798            span2_clone
799        });
800
801        state.assert_removed("span1");
802        state.assert_not_removed("span2");
803
804        drop(span2);
805        state.assert_removed("span1");
806
807        // Ensure the registry itself outlives the span.
808        drop(dispatch);
809    }
810
811    #[test]
812    fn span_enter_guards_are_dropped_out_of_order() {
813        let (close_layer, state) = CloseLayer::new();
814        let subscriber = AssertionLayer
815            .and_then(close_layer)
816            .with_subscriber(Registry::default());
817
818        // Create a `Dispatch` (which is internally reference counted) so that
819        // the subscriber lives to the end of the test. Otherwise, if we just
820        // passed the subscriber itself to `with_default`, we could see the span
821        // be dropped when the subscriber itself is dropped, destroying the
822        // registry.
823        let dispatch = dispatcher::Dispatch::new(subscriber);
824
825        dispatcher::with_default(&dispatch, || {
826            let span1 = tracing::debug_span!("span1");
827            let span2 = tracing::info_span!("span2");
828
829            let enter1 = span1.enter();
830            let enter2 = span2.enter();
831
832            drop(enter1);
833            drop(span1);
834
835            state.assert_removed("span1");
836            state.assert_not_removed("span2");
837
838            drop(enter2);
839            state.assert_not_removed("span2");
840
841            drop(span2);
842            state.assert_removed("span1");
843            state.assert_removed("span2");
844        });
845    }
846
847    #[test]
848    fn child_closes_parent() {
849        // This test asserts that if a parent span's handle is dropped before
850        // a child span's handle, the parent will remain open until child
851        // closes, and will then be closed.
852
853        let (close_layer, state) = CloseLayer::new();
854        let subscriber = close_layer.with_subscriber(Registry::default());
855
856        let dispatch = dispatcher::Dispatch::new(subscriber);
857
858        dispatcher::with_default(&dispatch, || {
859            let span1 = tracing::info_span!("parent");
860            let span2 = tracing::info_span!(parent: &span1, "child");
861
862            state.assert_open("parent");
863            state.assert_open("child");
864
865            drop(span1);
866            state.assert_open("parent");
867            state.assert_open("child");
868
869            drop(span2);
870            state.assert_closed("parent");
871            state.assert_closed("child");
872        });
873    }
874
875    #[test]
876    fn child_closes_grandparent() {
877        // This test asserts that, when a span is kept open by a child which
878        // is *itself* kept open by a child, closing the grandchild will close
879        // both the parent *and* the grandparent.
880        let (close_layer, state) = CloseLayer::new();
881        let subscriber = close_layer.with_subscriber(Registry::default());
882
883        let dispatch = dispatcher::Dispatch::new(subscriber);
884
885        dispatcher::with_default(&dispatch, || {
886            let span1 = tracing::info_span!("grandparent");
887            let span2 = tracing::info_span!(parent: &span1, "parent");
888            let span3 = tracing::info_span!(parent: &span2, "child");
889
890            state.assert_open("grandparent");
891            state.assert_open("parent");
892            state.assert_open("child");
893
894            drop(span1);
895            drop(span2);
896            state.assert_open("grandparent");
897            state.assert_open("parent");
898            state.assert_open("child");
899
900            drop(span3);
901
902            state.assert_closed_in_order(["child", "parent", "grandparent"]);
903        });
904    }
905}