1 | use sharded_slab::{pool::Ref, Clear, Pool}; |
2 | use thread_local::ThreadLocal; |
3 | |
4 | use super::stack::SpanStack; |
5 | use crate::{ |
6 | filter::{FilterId, FilterMap, FilterState}, |
7 | registry::{ |
8 | extensions::{Extensions, ExtensionsInner, ExtensionsMut}, |
9 | LookupSpan, SpanData, |
10 | }, |
11 | sync::RwLock, |
12 | }; |
13 | use std::{ |
14 | cell::{self, Cell, RefCell}, |
15 | sync::atomic::{fence, AtomicUsize, Ordering}, |
16 | }; |
17 | use tracing_core::{ |
18 | dispatcher::{self, Dispatch}, |
19 | span::{self, Current, Id}, |
20 | Event, Interest, Metadata, Subscriber, |
21 | }; |
22 | |
23 | /// A shared, reusable store for spans. |
24 | /// |
25 | /// A `Registry` is a [`Subscriber`] around which multiple [`Layer`]s |
26 | /// implementing various behaviors may be [added]. Unlike other types |
27 | /// implementing `Subscriber`, `Registry` does not actually record traces itself: |
28 | /// instead, it collects and stores span data that is exposed to any [`Layer`]s |
29 | /// wrapping it through implementations of the [`LookupSpan`] trait. |
30 | /// The `Registry` is responsible for storing span metadata, recording |
31 | /// relationships between spans, and tracking which spans are active and which |
32 | /// are closed. In addition, it provides a mechanism for [`Layer`]s to store |
33 | /// user-defined per-span data, called [extensions], in the registry. This |
34 | /// allows [`Layer`]-specific data to benefit from the `Registry`'s |
35 | /// high-performance concurrent storage. |
36 | /// |
37 | /// This registry is implemented using a [lock-free sharded slab][slab], and is |
38 | /// highly optimized for concurrent access. |
39 | /// |
40 | /// # Span ID Generation |
41 | /// |
42 | /// Span IDs are not globally unique, but the registry ensures that |
43 | /// no two currently active spans have the same ID within a process. |
44 | /// |
45 | /// One of the primary responsibilities of the registry is to generate [span |
46 | /// IDs]. Therefore, it's important for other code that interacts with the |
47 | /// registry, such as [`Layer`]s, to understand the guarantees of the |
48 | /// span IDs that are generated. |
49 | /// |
50 | /// The registry's span IDs are guaranteed to be unique **at a given point |
51 | /// in time**. This means that an active span will never be assigned the |
52 | /// same ID as another **currently active** span. However, the registry |
53 | /// **will** eventually reuse the IDs of [closed] spans, although an ID |
54 | /// will never be reassigned immediately after a span has closed. |
55 | /// |
56 | /// Spans are not [considered closed] by the `Registry` until *every* |
57 | /// [`Span`] reference with that ID has been dropped. |
58 | /// |
59 | /// Thus: span IDs generated by the registry should be considered unique |
60 | /// only at a given point in time, and only relative to other spans |
61 | /// generated by the same process. Two spans with the same ID will not exist |
62 | /// in the same process concurrently. However, if historical span data is |
63 | /// being stored, the same ID may occur for multiple spans times in that |
64 | /// data. If spans must be uniquely identified in historical data, the user |
65 | /// code storing this data must assign its own unique identifiers to those |
66 | /// spans. A counter is generally sufficient for this. |
67 | /// |
68 | /// Similarly, span IDs generated by the registry are not unique outside of |
69 | /// a given process. Distributed tracing systems may require identifiers |
70 | /// that are unique across multiple processes on multiple machines (for |
71 | /// example, [OpenTelemetry's `SpanId`s and `TraceId`s][ot]). `tracing` span |
72 | /// IDs generated by the registry should **not** be used for this purpose. |
73 | /// Instead, code which integrates with a distributed tracing system should |
74 | /// generate and propagate its own IDs according to the rules specified by |
75 | /// the distributed tracing system. These IDs can be associated with |
76 | /// `tracing` spans using [fields] and/or [stored span data]. |
77 | /// |
78 | /// [span IDs]: tracing_core::span::Id |
79 | /// [slab]: sharded_slab |
80 | /// [`Layer`]: crate::Layer |
81 | /// [added]: crate::layer::Layer#composing-layers |
82 | /// [extensions]: super::Extensions |
83 | /// [closed]: https://docs.rs/tracing/latest/tracing/span/index.html#closing-spans |
84 | /// [considered closed]: tracing_core::subscriber::Subscriber::try_close() |
85 | /// [`Span`]: https://docs.rs/tracing/latest/tracing/span/struct.Span.html |
86 | /// [ot]: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#spancontext |
87 | /// [fields]: tracing_core::field |
88 | /// [stored span data]: crate::registry::SpanData::extensions_mut |
89 | #[cfg (feature = "registry" )] |
90 | #[cfg_attr (docsrs, doc(cfg(all(feature = "registry" , feature = "std" ))))] |
91 | #[derive (Debug)] |
92 | pub struct Registry { |
93 | spans: Pool<DataInner>, |
94 | current_spans: ThreadLocal<RefCell<SpanStack>>, |
95 | next_filter_id: u8, |
96 | } |
97 | |
98 | /// Span data stored in a [`Registry`]. |
99 | /// |
100 | /// The registry stores well-known data defined by tracing: span relationships, |
101 | /// metadata and reference counts. Additional user-defined data provided by |
102 | /// [`Layer`s], such as formatted fields, metrics, or distributed traces should |
103 | /// be stored in the [extensions] typemap. |
104 | /// |
105 | /// [`Layer`s]: crate::layer::Layer |
106 | /// [extensions]: Extensions |
107 | #[cfg (feature = "registry" )] |
108 | #[cfg_attr (docsrs, doc(cfg(all(feature = "registry" , feature = "std" ))))] |
109 | #[derive (Debug)] |
110 | pub struct Data<'a> { |
111 | /// Immutable reference to the pooled `DataInner` entry. |
112 | inner: Ref<'a, DataInner>, |
113 | } |
114 | |
115 | /// Stored data associated with a span. |
116 | /// |
117 | /// This type is pooled using [`sharded_slab::Pool`]; when a span is |
118 | /// dropped, the `DataInner` entry at that span's slab index is cleared |
119 | /// in place and reused by a future span. Thus, the `Default` and |
120 | /// [`sharded_slab::Clear`] implementations for this type are |
121 | /// load-bearing. |
122 | #[derive (Debug)] |
123 | struct DataInner { |
124 | filter_map: FilterMap, |
125 | metadata: &'static Metadata<'static>, |
126 | parent: Option<Id>, |
127 | ref_count: AtomicUsize, |
128 | // The span's `Extensions` typemap. Allocations for the `HashMap` backing |
129 | // this are pooled and reused in place. |
130 | pub(crate) extensions: RwLock<ExtensionsInner>, |
131 | } |
132 | |
133 | // === impl Registry === |
134 | |
135 | impl Default for Registry { |
136 | fn default() -> Self { |
137 | Self { |
138 | spans: Pool::new(), |
139 | current_spans: ThreadLocal::new(), |
140 | next_filter_id: 0, |
141 | } |
142 | } |
143 | } |
144 | |
145 | #[inline ] |
146 | fn idx_to_id(idx: usize) -> Id { |
147 | Id::from_u64(idx as u64 + 1) |
148 | } |
149 | |
150 | #[inline ] |
151 | fn id_to_idx(id: &Id) -> usize { |
152 | id.into_u64() as usize - 1 |
153 | } |
154 | |
155 | /// A guard that tracks how many [`Registry`]-backed `Layer`s have |
156 | /// processed an `on_close` event. |
157 | /// |
158 | /// This is needed to enable a [`Registry`]-backed Layer to access span |
159 | /// data after the `Layer` has recieved the `on_close` callback. |
160 | /// |
161 | /// Once all `Layer`s have processed this event, the [`Registry`] knows |
162 | /// that is able to safely remove the span tracked by `id`. `CloseGuard` |
163 | /// accomplishes this through a two-step process: |
164 | /// 1. Whenever a [`Registry`]-backed `Layer::on_close` method is |
165 | /// called, `Registry::start_close` is closed. |
166 | /// `Registry::start_close` increments a thread-local `CLOSE_COUNT` |
167 | /// by 1 and returns a `CloseGuard`. |
168 | /// 2. The `CloseGuard` is dropped at the end of `Layer::on_close`. On |
169 | /// drop, `CloseGuard` checks thread-local `CLOSE_COUNT`. If |
170 | /// `CLOSE_COUNT` is 0, the `CloseGuard` removes the span with the |
171 | /// `id` from the registry, as all `Layers` that might have seen the |
172 | /// `on_close` notification have processed it. If `CLOSE_COUNT` is |
173 | /// greater than 0, `CloseGuard` decrements the counter by one and |
174 | /// _does not_ remove the span from the [`Registry`]. |
175 | /// |
176 | pub(crate) struct CloseGuard<'a> { |
177 | id: Id, |
178 | registry: &'a Registry, |
179 | is_closing: bool, |
180 | } |
181 | |
182 | impl Registry { |
183 | fn get(&self, id: &Id) -> Option<Ref<'_, DataInner>> { |
184 | self.spans.get(id_to_idx(id)) |
185 | } |
186 | |
187 | /// Returns a guard which tracks how many `Layer`s have |
188 | /// processed an `on_close` notification via the `CLOSE_COUNT` thread-local. |
189 | /// For additional details, see [`CloseGuard`]. |
190 | /// |
191 | pub(crate) fn start_close(&self, id: Id) -> CloseGuard<'_> { |
192 | CLOSE_COUNT.with(|count| { |
193 | let c = count.get(); |
194 | count.set(c + 1); |
195 | }); |
196 | CloseGuard { |
197 | id, |
198 | registry: self, |
199 | is_closing: false, |
200 | } |
201 | } |
202 | |
203 | pub(crate) fn has_per_layer_filters(&self) -> bool { |
204 | self.next_filter_id > 0 |
205 | } |
206 | |
207 | pub(crate) fn span_stack(&self) -> cell::Ref<'_, SpanStack> { |
208 | self.current_spans.get_or_default().borrow() |
209 | } |
210 | } |
211 | |
212 | thread_local! { |
213 | /// `CLOSE_COUNT` is the thread-local counter used by `CloseGuard` to |
214 | /// track how many layers have processed the close. |
215 | /// For additional details, see [`CloseGuard`]. |
216 | /// |
217 | static CLOSE_COUNT: Cell<usize> = Cell::new(0); |
218 | } |
219 | |
220 | impl Subscriber for Registry { |
221 | fn register_callsite(&self, _: &'static Metadata<'static>) -> Interest { |
222 | if self.has_per_layer_filters() { |
223 | return FilterState::take_interest().unwrap_or_else(Interest::always); |
224 | } |
225 | |
226 | Interest::always() |
227 | } |
228 | |
229 | fn enabled(&self, _: &Metadata<'_>) -> bool { |
230 | if self.has_per_layer_filters() { |
231 | return FilterState::event_enabled(); |
232 | } |
233 | true |
234 | } |
235 | |
236 | #[inline ] |
237 | fn new_span(&self, attrs: &span::Attributes<'_>) -> span::Id { |
238 | let parent = if attrs.is_root() { |
239 | None |
240 | } else if attrs.is_contextual() { |
241 | self.current_span().id().map(|id| self.clone_span(id)) |
242 | } else { |
243 | attrs.parent().map(|id| self.clone_span(id)) |
244 | }; |
245 | |
246 | let id = self |
247 | .spans |
248 | // Check out a `DataInner` entry from the pool for the new span. If |
249 | // there are free entries already allocated in the pool, this will |
250 | // preferentially reuse one; otherwise, a new `DataInner` is |
251 | // allocated and added to the pool. |
252 | .create_with(|data| { |
253 | data.metadata = attrs.metadata(); |
254 | data.parent = parent; |
255 | data.filter_map = crate::filter::FILTERING.with(|filtering| filtering.filter_map()); |
256 | #[cfg (debug_assertions)] |
257 | { |
258 | if data.filter_map != FilterMap::default() { |
259 | debug_assert!(self.has_per_layer_filters()); |
260 | } |
261 | } |
262 | |
263 | let refs = data.ref_count.get_mut(); |
264 | debug_assert_eq!(*refs, 0); |
265 | *refs = 1; |
266 | }) |
267 | .expect("Unable to allocate another span" ); |
268 | idx_to_id(id) |
269 | } |
270 | |
271 | /// This is intentionally not implemented, as recording fields |
272 | /// on a span is the responsibility of layers atop of this registry. |
273 | #[inline ] |
274 | fn record(&self, _: &span::Id, _: &span::Record<'_>) {} |
275 | |
276 | fn record_follows_from(&self, _span: &span::Id, _follows: &span::Id) {} |
277 | |
278 | fn event_enabled(&self, _event: &Event<'_>) -> bool { |
279 | if self.has_per_layer_filters() { |
280 | return FilterState::event_enabled(); |
281 | } |
282 | true |
283 | } |
284 | |
285 | /// This is intentionally not implemented, as recording events |
286 | /// is the responsibility of layers atop of this registry. |
287 | fn event(&self, _: &Event<'_>) {} |
288 | |
289 | fn enter(&self, id: &span::Id) { |
290 | if self |
291 | .current_spans |
292 | .get_or_default() |
293 | .borrow_mut() |
294 | .push(id.clone()) |
295 | { |
296 | self.clone_span(id); |
297 | } |
298 | } |
299 | |
300 | fn exit(&self, id: &span::Id) { |
301 | if let Some(spans) = self.current_spans.get() { |
302 | if spans.borrow_mut().pop(id) { |
303 | dispatcher::get_default(|dispatch| dispatch.try_close(id.clone())); |
304 | } |
305 | } |
306 | } |
307 | |
308 | fn clone_span(&self, id: &span::Id) -> span::Id { |
309 | let span = self |
310 | .get(id) |
311 | .unwrap_or_else(|| panic!( |
312 | "tried to clone {:?}, but no span exists with that ID \n\ |
313 | This may be caused by consuming a parent span (`parent: span`) rather than borrowing it (`parent: &span`)." , |
314 | id, |
315 | )); |
316 | // Like `std::sync::Arc`, adds to the ref count (on clone) don't require |
317 | // a strong ordering; if we call` clone_span`, the reference count must |
318 | // always at least 1. The only synchronization necessary is between |
319 | // calls to `try_close`: we have to ensure that all threads have |
320 | // dropped their refs to the span before the span is closed. |
321 | let refs = span.ref_count.fetch_add(1, Ordering::Relaxed); |
322 | assert_ne!( |
323 | refs, 0, |
324 | "tried to clone a span ( {:?}) that already closed" , |
325 | id |
326 | ); |
327 | id.clone() |
328 | } |
329 | |
330 | fn current_span(&self) -> Current { |
331 | self.current_spans |
332 | .get() |
333 | .and_then(|spans| { |
334 | let spans = spans.borrow(); |
335 | let id = spans.current()?; |
336 | let span = self.get(id)?; |
337 | Some(Current::new(id.clone(), span.metadata)) |
338 | }) |
339 | .unwrap_or_else(Current::none) |
340 | } |
341 | |
342 | /// Decrements the reference count of the span with the given `id`, and |
343 | /// removes the span if it is zero. |
344 | /// |
345 | /// The allocated span slot will be reused when a new span is created. |
346 | fn try_close(&self, id: span::Id) -> bool { |
347 | let span = match self.get(&id) { |
348 | Some(span) => span, |
349 | None if std::thread::panicking() => return false, |
350 | None => panic!("tried to drop a ref to {:?}, but no such span exists!" , id), |
351 | }; |
352 | |
353 | let refs = span.ref_count.fetch_sub(1, Ordering::Release); |
354 | if !std::thread::panicking() { |
355 | assert!(refs < std::usize::MAX, "reference count overflow!" ); |
356 | } |
357 | if refs > 1 { |
358 | return false; |
359 | } |
360 | |
361 | // Synchronize if we are actually removing the span (stolen |
362 | // from std::Arc); this ensures that all other `try_close` calls on |
363 | // other threads happen-before we actually remove the span. |
364 | fence(Ordering::Acquire); |
365 | true |
366 | } |
367 | } |
368 | |
369 | impl<'a> LookupSpan<'a> for Registry { |
370 | type Data = Data<'a>; |
371 | |
372 | fn span_data(&'a self, id: &Id) -> Option<Self::Data> { |
373 | let inner: Ref<'_, DataInner> = self.get(id)?; |
374 | Some(Data { inner }) |
375 | } |
376 | |
377 | fn register_filter(&mut self) -> FilterId { |
378 | let id: FilterId = FilterId::new(self.next_filter_id); |
379 | self.next_filter_id += 1; |
380 | id |
381 | } |
382 | } |
383 | |
384 | // === impl CloseGuard === |
385 | |
386 | impl<'a> CloseGuard<'a> { |
387 | pub(crate) fn set_closing(&mut self) { |
388 | self.is_closing = true; |
389 | } |
390 | } |
391 | |
392 | impl<'a> Drop for CloseGuard<'a> { |
393 | fn drop(&mut self) { |
394 | // If this returns with an error, we are already panicking. At |
395 | // this point, there's nothing we can really do to recover |
396 | // except by avoiding a double-panic. |
397 | let _ = CLOSE_COUNT.try_with(|count: &Cell| { |
398 | let c: usize = count.get(); |
399 | // Decrement the count to indicate that _this_ guard's |
400 | // `on_close` callback has completed. |
401 | // |
402 | // Note that we *must* do this before we actually remove the span |
403 | // from the registry, since dropping the `DataInner` may trigger a |
404 | // new close, if this span is the last reference to a parent span. |
405 | count.set(val:c - 1); |
406 | |
407 | // If the current close count is 1, this stack frame is the last |
408 | // `on_close` call. If the span is closing, it's okay to remove the |
409 | // span. |
410 | if c == 1 && self.is_closing { |
411 | self.registry.spans.clear(key:id_to_idx(&self.id)); |
412 | } |
413 | }); |
414 | } |
415 | } |
416 | |
417 | // === impl Data === |
418 | |
419 | impl<'a> SpanData<'a> for Data<'a> { |
420 | fn id(&self) -> Id { |
421 | idx_to_id(self.inner.key()) |
422 | } |
423 | |
424 | fn metadata(&self) -> &'static Metadata<'static> { |
425 | self.inner.metadata |
426 | } |
427 | |
428 | fn parent(&self) -> Option<&Id> { |
429 | self.inner.parent.as_ref() |
430 | } |
431 | |
432 | fn extensions(&self) -> Extensions<'_> { |
433 | Extensions::new(self.inner.extensions.read().expect("Mutex poisoned" )) |
434 | } |
435 | |
436 | fn extensions_mut(&self) -> ExtensionsMut<'_> { |
437 | ExtensionsMut::new(self.inner.extensions.write().expect("Mutex poisoned" )) |
438 | } |
439 | |
440 | #[inline ] |
441 | fn is_enabled_for(&self, filter: FilterId) -> bool { |
442 | self.inner.filter_map.is_enabled(filter) |
443 | } |
444 | } |
445 | |
446 | // === impl DataInner === |
447 | |
448 | impl Default for DataInner { |
449 | fn default() -> Self { |
450 | // Since `DataInner` owns a `&'static Callsite` pointer, we need |
451 | // something to use as the initial default value for that callsite. |
452 | // Since we can't access a `DataInner` until it has had actual span data |
453 | // inserted into it, the null metadata will never actually be accessed. |
454 | struct NullCallsite; |
455 | impl tracing_core::callsite::Callsite for NullCallsite { |
456 | fn set_interest(&self, _: Interest) { |
457 | unreachable!( |
458 | "/! \\ Tried to register the null callsite /! \\\n \ |
459 | This should never have happened and is definitely a bug. \ |
460 | A `tracing` bug report would be appreciated." |
461 | ) |
462 | } |
463 | |
464 | fn metadata(&self) -> &Metadata<'_> { |
465 | unreachable!( |
466 | "/! \\ Tried to access the null callsite's metadata /! \\\n \ |
467 | This should never have happened and is definitely a bug. \ |
468 | A `tracing` bug report would be appreciated." |
469 | ) |
470 | } |
471 | } |
472 | |
473 | static NULL_CALLSITE: NullCallsite = NullCallsite; |
474 | static NULL_METADATA: Metadata<'static> = tracing_core::metadata! { |
475 | name: "" , |
476 | target: "" , |
477 | level: tracing_core::Level::TRACE, |
478 | fields: &[], |
479 | callsite: &NULL_CALLSITE, |
480 | kind: tracing_core::metadata::Kind::SPAN, |
481 | }; |
482 | |
483 | Self { |
484 | filter_map: FilterMap::default(), |
485 | metadata: &NULL_METADATA, |
486 | parent: None, |
487 | ref_count: AtomicUsize::new(0), |
488 | extensions: RwLock::new(ExtensionsInner::new()), |
489 | } |
490 | } |
491 | } |
492 | |
493 | impl Clear for DataInner { |
494 | /// Clears the span's data in place, dropping the parent's reference count. |
495 | fn clear(&mut self) { |
496 | // A span is not considered closed until all of its children have closed. |
497 | // Therefore, each span's `DataInner` holds a "reference" to the parent |
498 | // span, keeping the parent span open until all its children have closed. |
499 | // When we close a span, we must then decrement the parent's ref count |
500 | // (potentially, allowing it to close, if this child is the last reference |
501 | // to that span). |
502 | // We have to actually unpack the option inside the `get_default` |
503 | // closure, since it is a `FnMut`, but testing that there _is_ a value |
504 | // here lets us avoid the thread-local access if we don't need the |
505 | // dispatcher at all. |
506 | if self.parent.is_some() { |
507 | // Note that --- because `Layered::try_close` works by calling |
508 | // `try_close` on the inner subscriber and using the return value to |
509 | // determine whether to call the `Layer`'s `on_close` callback --- |
510 | // we must call `try_close` on the entire subscriber stack, rather |
511 | // than just on the registry. If the registry called `try_close` on |
512 | // itself directly, the layers wouldn't see the close notification. |
513 | let subscriber = dispatcher::get_default(Dispatch::clone); |
514 | if let Some(parent) = self.parent.take() { |
515 | let _ = subscriber.try_close(parent); |
516 | } |
517 | } |
518 | |
519 | // Clear (but do not deallocate!) the pooled `HashMap` for the span's extensions. |
520 | self.extensions |
521 | .get_mut() |
522 | .unwrap_or_else(|l| { |
523 | // This function can be called in a `Drop` impl, such as while |
524 | // panicking, so ignore lock poisoning. |
525 | l.into_inner() |
526 | }) |
527 | .clear(); |
528 | |
529 | self.filter_map = FilterMap::default(); |
530 | } |
531 | } |
532 | |
533 | #[cfg (test)] |
534 | mod tests { |
535 | use super::*; |
536 | use crate::{layer::Context, registry::LookupSpan, Layer}; |
537 | use std::{ |
538 | collections::HashMap, |
539 | sync::{Arc, Mutex, Weak}, |
540 | }; |
541 | use tracing::{self, subscriber::with_default}; |
542 | use tracing_core::{ |
543 | dispatcher, |
544 | span::{Attributes, Id}, |
545 | Subscriber, |
546 | }; |
547 | |
548 | #[derive (Debug)] |
549 | struct DoesNothing; |
550 | impl<S: Subscriber> Layer<S> for DoesNothing {} |
551 | |
552 | struct AssertionLayer; |
553 | impl<S> Layer<S> for AssertionLayer |
554 | where |
555 | S: Subscriber + for<'a> LookupSpan<'a>, |
556 | { |
557 | fn on_close(&self, id: Id, ctx: Context<'_, S>) { |
558 | dbg!(format_args!("closing {:?}" , id)); |
559 | assert!(&ctx.span(&id).is_some()); |
560 | } |
561 | } |
562 | |
563 | #[test ] |
564 | fn single_layer_can_access_closed_span() { |
565 | let subscriber = AssertionLayer.with_subscriber(Registry::default()); |
566 | |
567 | with_default(subscriber, || { |
568 | let span = tracing::debug_span!("span" ); |
569 | drop(span); |
570 | }); |
571 | } |
572 | |
573 | #[test ] |
574 | fn multiple_layers_can_access_closed_span() { |
575 | let subscriber = AssertionLayer |
576 | .and_then(AssertionLayer) |
577 | .with_subscriber(Registry::default()); |
578 | |
579 | with_default(subscriber, || { |
580 | let span = tracing::debug_span!("span" ); |
581 | drop(span); |
582 | }); |
583 | } |
584 | |
585 | struct CloseLayer { |
586 | inner: Arc<Mutex<CloseState>>, |
587 | } |
588 | |
589 | struct CloseHandle { |
590 | state: Arc<Mutex<CloseState>>, |
591 | } |
592 | |
593 | #[derive (Default)] |
594 | struct CloseState { |
595 | open: HashMap<&'static str, Weak<()>>, |
596 | closed: Vec<(&'static str, Weak<()>)>, |
597 | } |
598 | |
599 | struct SetRemoved(Arc<()>); |
600 | |
601 | impl<S> Layer<S> for CloseLayer |
602 | where |
603 | S: Subscriber + for<'a> LookupSpan<'a>, |
604 | { |
605 | fn on_new_span(&self, _: &Attributes<'_>, id: &Id, ctx: Context<'_, S>) { |
606 | let span = ctx.span(id).expect("Missing span; this is a bug" ); |
607 | let mut lock = self.inner.lock().unwrap(); |
608 | let is_removed = Arc::new(()); |
609 | assert!( |
610 | lock.open |
611 | .insert(span.name(), Arc::downgrade(&is_removed)) |
612 | .is_none(), |
613 | "test layer saw multiple spans with the same name, the test is probably messed up" |
614 | ); |
615 | let mut extensions = span.extensions_mut(); |
616 | extensions.insert(SetRemoved(is_removed)); |
617 | } |
618 | |
619 | fn on_close(&self, id: Id, ctx: Context<'_, S>) { |
620 | let span = if let Some(span) = ctx.span(&id) { |
621 | span |
622 | } else { |
623 | println!( |
624 | "span {:?} did not exist in `on_close`, are we panicking?" , |
625 | id |
626 | ); |
627 | return; |
628 | }; |
629 | let name = span.name(); |
630 | println!("close {} ( {:?})" , name, id); |
631 | if let Ok(mut lock) = self.inner.lock() { |
632 | if let Some(is_removed) = lock.open.remove(name) { |
633 | assert!(is_removed.upgrade().is_some()); |
634 | lock.closed.push((name, is_removed)); |
635 | } |
636 | } |
637 | } |
638 | } |
639 | |
640 | impl CloseLayer { |
641 | fn new() -> (Self, CloseHandle) { |
642 | let state = Arc::new(Mutex::new(CloseState::default())); |
643 | ( |
644 | Self { |
645 | inner: state.clone(), |
646 | }, |
647 | CloseHandle { state }, |
648 | ) |
649 | } |
650 | } |
651 | |
652 | impl CloseState { |
653 | fn is_open(&self, span: &str) -> bool { |
654 | self.open.contains_key(span) |
655 | } |
656 | |
657 | fn is_closed(&self, span: &str) -> bool { |
658 | self.closed.iter().any(|(name, _)| name == &span) |
659 | } |
660 | } |
661 | |
662 | impl CloseHandle { |
663 | fn assert_closed(&self, span: &str) { |
664 | let lock = self.state.lock().unwrap(); |
665 | assert!( |
666 | lock.is_closed(span), |
667 | "expected {} to be closed {}" , |
668 | span, |
669 | if lock.is_open(span) { |
670 | " (it was still open)" |
671 | } else { |
672 | ", but it never existed (is there a problem with the test?)" |
673 | } |
674 | ) |
675 | } |
676 | |
677 | fn assert_open(&self, span: &str) { |
678 | let lock = self.state.lock().unwrap(); |
679 | assert!( |
680 | lock.is_open(span), |
681 | "expected {} to be open {}" , |
682 | span, |
683 | if lock.is_closed(span) { |
684 | " (it was still open)" |
685 | } else { |
686 | ", but it never existed (is there a problem with the test?)" |
687 | } |
688 | ) |
689 | } |
690 | |
691 | fn assert_removed(&self, span: &str) { |
692 | let lock = self.state.lock().unwrap(); |
693 | let is_removed = match lock.closed.iter().find(|(name, _)| name == &span) { |
694 | Some((_, is_removed)) => is_removed, |
695 | None => panic!( |
696 | "expected {} to be removed from the registry, but it was not closed {}" , |
697 | span, |
698 | if lock.is_closed(span) { |
699 | " (it was still open)" |
700 | } else { |
701 | ", but it never existed (is there a problem with the test?)" |
702 | } |
703 | ), |
704 | }; |
705 | assert!( |
706 | is_removed.upgrade().is_none(), |
707 | "expected {} to have been removed from the registry" , |
708 | span |
709 | ) |
710 | } |
711 | |
712 | fn assert_not_removed(&self, span: &str) { |
713 | let lock = self.state.lock().unwrap(); |
714 | let is_removed = match lock.closed.iter().find(|(name, _)| name == &span) { |
715 | Some((_, is_removed)) => is_removed, |
716 | None if lock.is_open(span) => return, |
717 | None => unreachable!(), |
718 | }; |
719 | assert!( |
720 | is_removed.upgrade().is_some(), |
721 | "expected {} to have been removed from the registry" , |
722 | span |
723 | ) |
724 | } |
725 | |
726 | #[allow (unused)] // may want this for future tests |
727 | fn assert_last_closed(&self, span: Option<&str>) { |
728 | let lock = self.state.lock().unwrap(); |
729 | let last = lock.closed.last().map(|(span, _)| span); |
730 | assert_eq!( |
731 | last, |
732 | span.as_ref(), |
733 | "expected {:?} to have closed last" , |
734 | span |
735 | ); |
736 | } |
737 | |
738 | fn assert_closed_in_order(&self, order: impl AsRef<[&'static str]>) { |
739 | let lock = self.state.lock().unwrap(); |
740 | let order = order.as_ref(); |
741 | for (i, name) in order.iter().enumerate() { |
742 | assert_eq!( |
743 | lock.closed.get(i).map(|(span, _)| span), |
744 | Some(name), |
745 | "expected close order: {:?}, actual: {:?}" , |
746 | order, |
747 | lock.closed.iter().map(|(name, _)| name).collect::<Vec<_>>() |
748 | ); |
749 | } |
750 | } |
751 | } |
752 | |
753 | #[test ] |
754 | fn spans_are_removed_from_registry() { |
755 | let (close_layer, state) = CloseLayer::new(); |
756 | let subscriber = AssertionLayer |
757 | .and_then(close_layer) |
758 | .with_subscriber(Registry::default()); |
759 | |
760 | // Create a `Dispatch` (which is internally reference counted) so that |
761 | // the subscriber lives to the end of the test. Otherwise, if we just |
762 | // passed the subscriber itself to `with_default`, we could see the span |
763 | // be dropped when the subscriber itself is dropped, destroying the |
764 | // registry. |
765 | let dispatch = dispatcher::Dispatch::new(subscriber); |
766 | |
767 | dispatcher::with_default(&dispatch, || { |
768 | let span = tracing::debug_span!("span1" ); |
769 | drop(span); |
770 | let span = tracing::info_span!("span2" ); |
771 | drop(span); |
772 | }); |
773 | |
774 | state.assert_removed("span1" ); |
775 | state.assert_removed("span2" ); |
776 | |
777 | // Ensure the registry itself outlives the span. |
778 | drop(dispatch); |
779 | } |
780 | |
781 | #[test ] |
782 | fn spans_are_only_closed_when_the_last_ref_drops() { |
783 | let (close_layer, state) = CloseLayer::new(); |
784 | let subscriber = AssertionLayer |
785 | .and_then(close_layer) |
786 | .with_subscriber(Registry::default()); |
787 | |
788 | // Create a `Dispatch` (which is internally reference counted) so that |
789 | // the subscriber lives to the end of the test. Otherwise, if we just |
790 | // passed the subscriber itself to `with_default`, we could see the span |
791 | // be dropped when the subscriber itself is dropped, destroying the |
792 | // registry. |
793 | let dispatch = dispatcher::Dispatch::new(subscriber); |
794 | |
795 | let span2 = dispatcher::with_default(&dispatch, || { |
796 | let span = tracing::debug_span!("span1" ); |
797 | drop(span); |
798 | let span2 = tracing::info_span!("span2" ); |
799 | let span2_clone = span2.clone(); |
800 | drop(span2); |
801 | span2_clone |
802 | }); |
803 | |
804 | state.assert_removed("span1" ); |
805 | state.assert_not_removed("span2" ); |
806 | |
807 | drop(span2); |
808 | state.assert_removed("span1" ); |
809 | |
810 | // Ensure the registry itself outlives the span. |
811 | drop(dispatch); |
812 | } |
813 | |
814 | #[test ] |
815 | fn span_enter_guards_are_dropped_out_of_order() { |
816 | let (close_layer, state) = CloseLayer::new(); |
817 | let subscriber = AssertionLayer |
818 | .and_then(close_layer) |
819 | .with_subscriber(Registry::default()); |
820 | |
821 | // Create a `Dispatch` (which is internally reference counted) so that |
822 | // the subscriber lives to the end of the test. Otherwise, if we just |
823 | // passed the subscriber itself to `with_default`, we could see the span |
824 | // be dropped when the subscriber itself is dropped, destroying the |
825 | // registry. |
826 | let dispatch = dispatcher::Dispatch::new(subscriber); |
827 | |
828 | dispatcher::with_default(&dispatch, || { |
829 | let span1 = tracing::debug_span!("span1" ); |
830 | let span2 = tracing::info_span!("span2" ); |
831 | |
832 | let enter1 = span1.enter(); |
833 | let enter2 = span2.enter(); |
834 | |
835 | drop(enter1); |
836 | drop(span1); |
837 | |
838 | state.assert_removed("span1" ); |
839 | state.assert_not_removed("span2" ); |
840 | |
841 | drop(enter2); |
842 | state.assert_not_removed("span2" ); |
843 | |
844 | drop(span2); |
845 | state.assert_removed("span1" ); |
846 | state.assert_removed("span2" ); |
847 | }); |
848 | } |
849 | |
850 | #[test ] |
851 | fn child_closes_parent() { |
852 | // This test asserts that if a parent span's handle is dropped before |
853 | // a child span's handle, the parent will remain open until child |
854 | // closes, and will then be closed. |
855 | |
856 | let (close_layer, state) = CloseLayer::new(); |
857 | let subscriber = close_layer.with_subscriber(Registry::default()); |
858 | |
859 | let dispatch = dispatcher::Dispatch::new(subscriber); |
860 | |
861 | dispatcher::with_default(&dispatch, || { |
862 | let span1 = tracing::info_span!("parent" ); |
863 | let span2 = tracing::info_span!(parent: &span1, "child" ); |
864 | |
865 | state.assert_open("parent" ); |
866 | state.assert_open("child" ); |
867 | |
868 | drop(span1); |
869 | state.assert_open("parent" ); |
870 | state.assert_open("child" ); |
871 | |
872 | drop(span2); |
873 | state.assert_closed("parent" ); |
874 | state.assert_closed("child" ); |
875 | }); |
876 | } |
877 | |
878 | #[test ] |
879 | fn child_closes_grandparent() { |
880 | // This test asserts that, when a span is kept open by a child which |
881 | // is *itself* kept open by a child, closing the grandchild will close |
882 | // both the parent *and* the grandparent. |
883 | let (close_layer, state) = CloseLayer::new(); |
884 | let subscriber = close_layer.with_subscriber(Registry::default()); |
885 | |
886 | let dispatch = dispatcher::Dispatch::new(subscriber); |
887 | |
888 | dispatcher::with_default(&dispatch, || { |
889 | let span1 = tracing::info_span!("grandparent" ); |
890 | let span2 = tracing::info_span!(parent: &span1, "parent" ); |
891 | let span3 = tracing::info_span!(parent: &span2, "child" ); |
892 | |
893 | state.assert_open("grandparent" ); |
894 | state.assert_open("parent" ); |
895 | state.assert_open("child" ); |
896 | |
897 | drop(span1); |
898 | drop(span2); |
899 | state.assert_open("grandparent" ); |
900 | state.assert_open("parent" ); |
901 | state.assert_open("child" ); |
902 | |
903 | drop(span3); |
904 | |
905 | state.assert_closed_in_order(["child" , "parent" , "grandparent" ]); |
906 | }); |
907 | } |
908 | } |
909 | |