thread_local/
thread_id.rs1use crate::POINTER_WIDTH;
9use std::cell::Cell;
10use std::cmp::Reverse;
11use std::collections::BinaryHeap;
12use std::sync::Mutex;
13
14struct ThreadIdManager {
18 free_from: usize,
19 free_list: Option<BinaryHeap<Reverse<usize>>>,
20}
21
22impl ThreadIdManager {
23 const fn new() -> Self {
24 Self {
25 free_from: 0,
26 free_list: None,
27 }
28 }
29
30 fn alloc(&mut self) -> usize {
31 if let Some(id) = self.free_list.as_mut().and_then(|heap| heap.pop()) {
32 id.0
33 } else {
34 let id = self.free_from;
38 self.free_from += 1;
39 id
40 }
41 }
42
43 fn free(&mut self, id: usize) {
44 self.free_list
45 .get_or_insert_with(BinaryHeap::new)
46 .push(Reverse(id));
47 }
48}
49
50static THREAD_ID_MANAGER: Mutex<ThreadIdManager> = Mutex::new(ThreadIdManager::new());
51
52#[derive(Clone, Copy)]
55pub(crate) struct Thread {
56 pub(crate) id: usize,
58 pub(crate) bucket: usize,
60 pub(crate) bucket_size: usize,
62 pub(crate) index: usize,
64}
65impl Thread {
66 pub(crate) fn new(id: usize) -> Self {
67 let bucket = usize::from(POINTER_WIDTH) - ((id + 1).leading_zeros() as usize) - 1;
68 let bucket_size = 1 << bucket;
69 let index = id - (bucket_size - 1);
70
71 Self {
72 id,
73 bucket,
74 bucket_size,
75 index,
76 }
77 }
78}
79
80cfg_if::cfg_if! {
81 if #[cfg(feature = "nightly")] {
82 #[thread_local]
87 static mut THREAD: Option<Thread> = None;
88 thread_local! { static THREAD_GUARD: ThreadGuard = const { ThreadGuard { id: Cell::new(0) } }; }
89
90 struct ThreadGuard {
92 id: Cell<usize>,
96 }
97
98 impl Drop for ThreadGuard {
99 fn drop(&mut self) {
100 unsafe {
104 THREAD = None;
105 }
106 THREAD_ID_MANAGER.lock().unwrap().free(self.id.get());
107 }
108 }
109
110 #[inline]
112 pub(crate) fn get() -> Thread {
113 if let Some(thread) = unsafe { THREAD } {
114 thread
115 } else {
116 get_slow()
117 }
118 }
119
120 #[cold]
122 fn get_slow() -> Thread {
123 let new = Thread::new(THREAD_ID_MANAGER.lock().unwrap().alloc());
124 unsafe {
125 THREAD = Some(new);
126 }
127 THREAD_GUARD.with(|guard| guard.id.set(new.id));
128 new
129 }
130 } else {
131 thread_local! { static THREAD: Cell<Option<Thread>> = const { Cell::new(None) }; }
136 thread_local! { static THREAD_GUARD: ThreadGuard = const { ThreadGuard { id: Cell::new(0) } }; }
137
138 struct ThreadGuard {
140 id: Cell<usize>,
144 }
145
146 impl Drop for ThreadGuard {
147 fn drop(&mut self) {
148 let _ = THREAD.try_with(|thread| thread.set(None));
152 THREAD_ID_MANAGER.lock().unwrap().free(self.id.get());
153 }
154 }
155
156 #[inline]
158 pub(crate) fn get() -> Thread {
159 THREAD.with(|thread| {
160 if let Some(thread) = thread.get() {
161 thread
162 } else {
163 get_slow(thread)
164 }
165 })
166 }
167
168 #[cold]
170 fn get_slow(thread: &Cell<Option<Thread>>) -> Thread {
171 let new = Thread::new(THREAD_ID_MANAGER.lock().unwrap().alloc());
172 thread.set(Some(new));
173 THREAD_GUARD.with(|guard| guard.id.set(new.id));
174 new
175 }
176 }
177}
178
179#[test]
180fn test_thread() {
181 let thread = Thread::new(0);
182 assert_eq!(thread.id, 0);
183 assert_eq!(thread.bucket, 0);
184 assert_eq!(thread.bucket_size, 1);
185 assert_eq!(thread.index, 0);
186
187 let thread = Thread::new(1);
188 assert_eq!(thread.id, 1);
189 assert_eq!(thread.bucket, 1);
190 assert_eq!(thread.bucket_size, 2);
191 assert_eq!(thread.index, 0);
192
193 let thread = Thread::new(2);
194 assert_eq!(thread.id, 2);
195 assert_eq!(thread.bucket, 1);
196 assert_eq!(thread.bucket_size, 2);
197 assert_eq!(thread.index, 1);
198
199 let thread = Thread::new(3);
200 assert_eq!(thread.id, 3);
201 assert_eq!(thread.bucket, 2);
202 assert_eq!(thread.bucket_size, 4);
203 assert_eq!(thread.index, 0);
204
205 let thread = Thread::new(19);
206 assert_eq!(thread.id, 19);
207 assert_eq!(thread.bucket, 4);
208 assert_eq!(thread.bucket_size, 16);
209 assert_eq!(thread.index, 4);
210}