bitvec/
index.rs

1#![doc = include_str!("../doc/index.md")]
2
3use core::{
4	any,
5	fmt::{
6		self,
7		Binary,
8		Debug,
9		Display,
10		Formatter,
11	},
12	iter::{
13		FusedIterator,
14		Sum,
15	},
16	marker::PhantomData,
17	ops::{
18		BitAnd,
19		BitOr,
20		Not,
21	},
22};
23
24use crate::{
25	mem::{
26		bits_of,
27		BitRegister,
28	},
29	order::BitOrder,
30};
31
32#[repr(transparent)]
33#[doc = include_str!("../doc/index/BitIdx.md")]
34#[derive(Clone, Copy, Default, Eq, Hash, Ord, PartialEq, PartialOrd)]
35pub struct BitIdx<R = usize>
36where R: BitRegister
37{
38	/// Semantic index counter within a register, constrained to `0 .. R::BITS`.
39	idx: u8,
40	/// Marker for the register type.
41	_ty: PhantomData<R>,
42}
43
44impl<R> BitIdx<R>
45where R: BitRegister
46{
47	/// The inclusive maximum index within an `R` element.
48	pub const MAX: Self = Self {
49		idx: R::MASK,
50		_ty: PhantomData,
51	};
52	/// The inclusive minimum index within an `R` element.
53	pub const MIN: Self = Self {
54		idx: 0,
55		_ty: PhantomData,
56	};
57
58	/// Wraps a counter value as a known-good index into an `R` register.
59	///
60	/// ## Parameters
61	///
62	/// - `idx`: The counter value to mark as an index. This must be in the
63	///   range `0 .. R::BITS`.
64	///
65	/// ## Returns
66	///
67	/// This returns `idx`, either marked as a valid `BitIdx` or an invalid
68	/// `BitIdxError` by whether it is within the valid range `0 .. R::BITS`.
69	#[inline]
70	pub fn new(idx: u8) -> Result<Self, BitIdxError<R>> {
71		if idx >= bits_of::<R>() as u8 {
72			return Err(BitIdxError::new(idx));
73		}
74		Ok(unsafe { Self::new_unchecked(idx) })
75	}
76
77	/// Wraps a counter value as an assumed-good index into an `R` register.
78	///
79	/// ## Parameters
80	///
81	/// - `idx`: The counter value to mark as an index. This must be in the
82	///   range `0 .. R::BITS`.
83	///
84	/// ## Returns
85	///
86	/// This unconditionally marks `idx` as a valid bit-index.
87	///
88	/// ## Safety
89	///
90	/// If the `idx` value is outside the valid range, then the program is
91	/// incorrect. Debug builds will panic; release builds do not inspect the
92	/// value or specify a behavior.
93	#[inline]
94	pub unsafe fn new_unchecked(idx: u8) -> Self {
95		debug_assert!(
96			idx < bits_of::<R>() as u8,
97			"Bit index {} cannot exceed type width {}",
98			idx,
99			bits_of::<R>(),
100		);
101		Self {
102			idx,
103			_ty: PhantomData,
104		}
105	}
106
107	/// Removes the index wrapper, leaving the internal counter.
108	#[inline]
109	#[cfg(not(tarpaulin_include))]
110	pub fn into_inner(self) -> u8 {
111		self.idx
112	}
113
114	/// Increments an index counter, wrapping at the back edge of the register.
115	///
116	/// ## Parameters
117	///
118	/// - `self`: The index to increment.
119	///
120	/// ## Returns
121	///
122	/// - `.0`: The next index after `self`.
123	/// - `.1`: Indicates whether the new index is in the next memory address.
124	#[inline]
125	pub fn next(self) -> (Self, bool) {
126		let next = self.idx + 1;
127		(
128			unsafe { Self::new_unchecked(next & R::MASK) },
129			next == bits_of::<R>() as u8,
130		)
131	}
132
133	/// Decrements an index counter, wrapping at the front edge of the register.
134	///
135	/// ## Parameters
136	///
137	/// - `self`: The index to decrement.
138	///
139	/// ## Returns
140	///
141	/// - `.0`: The previous index before `self`.
142	/// - `.1`: Indicates whether the new index is in the previous memory
143	///   address.
144	#[inline]
145	pub fn prev(self) -> (Self, bool) {
146		let prev = self.idx.wrapping_sub(1);
147		(
148			unsafe { Self::new_unchecked(prev & R::MASK) },
149			self.idx == 0,
150		)
151	}
152
153	/// Computes the bit position corresponding to `self` under some ordering.
154	///
155	/// This forwards to [`O::at::<R>`], which is the only public, safe,
156	/// constructor for a position counter.
157	///
158	/// [`O::at::<R>`]: crate::order::BitOrder::at
159	#[inline]
160	#[cfg(not(tarpaulin_include))]
161	pub fn position<O>(self) -> BitPos<R>
162	where O: BitOrder {
163		O::at::<R>(self)
164	}
165
166	/// Computes the bit selector corresponding to `self` under an ordering.
167	///
168	/// This forwards to [`O::select::<R>`], which is the only public, safe,
169	/// constructor for a bit selector.
170	///
171	/// [`O::select::<R>`]: crate::order::BitOrder::select
172	#[inline]
173	#[cfg(not(tarpaulin_include))]
174	pub fn select<O>(self) -> BitSel<R>
175	where O: BitOrder {
176		O::select::<R>(self)
177	}
178
179	/// Computes the bit selector for `self` as an accessor mask.
180	///
181	/// This is a type-cast over [`Self::select`].
182	///
183	/// [`Self::select`]: Self::select
184	#[inline]
185	#[cfg(not(tarpaulin_include))]
186	pub fn mask<O>(self) -> BitMask<R>
187	where O: BitOrder {
188		self.select::<O>().mask()
189	}
190
191	/// Iterates over all indices between an inclusive start and exclusive end
192	/// point.
193	///
194	/// Because implementation details of the range type family, including the
195	/// [`RangeBounds`] trait, are not yet stable, and heterogeneous ranges are
196	/// not supported, this must be an opaque iterator rather than a direct
197	/// [`Range<BitIdx<R>>`].
198	///
199	/// # Parameters
200	///
201	/// - `from`: The inclusive low bound of the range. This will be the first
202	///   index produced by the iterator.
203	/// - `upto`: The exclusive high bound of the range. The iterator will halt
204	///   before yielding an index of this value.
205	///
206	/// # Returns
207	///
208	/// An opaque iterator that is equivalent to the range `from .. upto`.
209	///
210	/// # Requirements
211	///
212	/// `from` must be no greater than `upto`.
213	///
214	/// [`RangeBounds`]: core::ops::RangeBounds
215	/// [`Range<BitIdx<R>>`]: core::ops::Range
216	#[inline]
217	pub fn range(
218		self,
219		upto: BitEnd<R>,
220	) -> impl Iterator<Item = Self>
221	+ DoubleEndedIterator
222	+ ExactSizeIterator
223	+ FusedIterator {
224		let (from, upto) = (self.into_inner(), upto.into_inner());
225		debug_assert!(from <= upto, "Ranges must run from low to high");
226		(from .. upto).map(|val| unsafe { Self::new_unchecked(val) })
227	}
228
229	/// Iterates over all possible index values.
230	#[inline]
231	pub fn range_all() -> impl Iterator<Item = Self>
232	+ DoubleEndedIterator
233	+ ExactSizeIterator
234	+ FusedIterator {
235		BitIdx::MIN.range(BitEnd::MAX)
236	}
237
238	/// Computes the jump distance for some number of bits away from a starting
239	/// index.
240	///
241	/// This computes the number of elements by which to adjust a base pointer,
242	/// and then the bit index of the destination bit in the new referent
243	/// register element.
244	///
245	/// # Parameters
246	///
247	/// - `self`: An index within some element, from which the offset is
248	///   computed.
249	/// - `by`: The distance by which to jump. Negative values move lower in the
250	///   index and element-pointer space; positive values move higher.
251	///
252	/// # Returns
253	///
254	/// - `.0`: The number of elements `R` by which to adjust a base pointer.
255	///   This value can be passed directly into [`ptr::offset`].
256	/// - `.1`: The index of the destination bit within the destination element.
257	///
258	/// [`ptr::offset`]: https://doc.rust-lang.org/stable/std/primitive.pointer.html#method.offset
259	pub(crate) fn offset(self, by: isize) -> (isize, Self) {
260		/* Signed-add `self.idx` to the jump distance. This will almost
261		 * certainly not wrap (as the crate imposes restrictions well below
262		 * `isize::MAX`), but correctness never hurts. The resulting sum is a
263		 * bit distance that is then broken into an element distance and final
264		 * bit index.
265		 */
266		let far = by.wrapping_add(self.into_inner() as isize);
267
268		let (elts, head) = (far >> R::INDX, far as u8 & R::MASK);
269
270		(elts, unsafe { Self::new_unchecked(head) })
271	}
272
273	/// Computes the span information for a region beginning at `self` for `len`
274	/// bits.
275	///
276	/// The span information is the number of elements in the region that hold
277	/// live bits, and the position of the tail marker after the live bits.
278	///
279	/// This forwards to [`BitEnd::span`], as the computation is identical for
280	/// the two types. Beginning a span at any `Idx` is equivalent to beginning
281	/// it at the tail of a previous span.
282	///
283	/// # Parameters
284	///
285	/// - `self`: The start bit of the span.
286	/// - `len`: The number of bits in the span.
287	///
288	/// # Returns
289	///
290	/// - `.0`: The number of elements, starting in the element that contains
291	///   `self`, that contain live bits of the span.
292	/// - `.1`: The tail counter of the span’s end point.
293	///
294	/// [`BitEnd::span`]: crate::index::BitEnd::span
295	pub(crate) fn span(self, len: usize) -> (usize, BitEnd<R>) {
296		unsafe { BitEnd::<R>::new_unchecked(self.into_inner()) }.span(len)
297	}
298}
299
300impl<R> Binary for BitIdx<R>
301where R: BitRegister
302{
303	#[inline]
304	fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
305		write!(fmt, "{:0>1$b}", self.idx, R::INDX as usize)
306	}
307}
308
309impl<R> Debug for BitIdx<R>
310where R: BitRegister
311{
312	#[inline]
313	fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
314		write!(fmt, "BitIdx<{}>({})", any::type_name::<R>(), self)
315	}
316}
317
318impl<R> Display for BitIdx<R>
319where R: BitRegister
320{
321	#[inline]
322	fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
323		Binary::fmt(self, fmt)
324	}
325}
326
327#[repr(transparent)]
328#[doc = include_str!("../doc/index/BitIdxError.md")]
329#[derive(Clone, Copy, Default, Eq, Hash, Ord, PartialEq, PartialOrd)]
330pub struct BitIdxError<R = usize>
331where R: BitRegister
332{
333	/// The value that is invalid as a [`BitIdx<R>`].
334	///
335	/// [`BitIdx<R>`]: crate::index::BitIdx
336	err: u8,
337	/// Marker for the register type.
338	_ty: PhantomData<R>,
339}
340
341impl<R> BitIdxError<R>
342where R: BitRegister
343{
344	/// Marks a counter value as invalid to be an index for an `R` register.
345	///
346	/// ## Parameters
347	///
348	/// - `err`: The counter value to mark as an error. This must be greater
349	///   than `R::BITS`.
350	///
351	/// ## Returns
352	///
353	/// This returns `err`, marked as an invalid index for `R`.
354	///
355	/// ## Panics
356	///
357	/// Debug builds panic when `err` is a valid index for `R`.
358	pub(crate) fn new(err: u8) -> Self {
359		debug_assert!(
360			err >= bits_of::<R>() as u8,
361			"Bit index {} is valid for type width {}",
362			err,
363			bits_of::<R>(),
364		);
365		Self {
366			err,
367			_ty: PhantomData,
368		}
369	}
370
371	/// Removes the error wrapper, leaving the internal counter.
372	#[inline]
373	#[cfg(not(tarpaulin_include))]
374	pub fn into_inner(self) -> u8 {
375		self.err
376	}
377}
378
379impl<R> Debug for BitIdxError<R>
380where R: BitRegister
381{
382	#[inline]
383	fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
384		write!(fmt, "BitIdxError<{}>({})", any::type_name::<R>(), self.err)
385	}
386}
387
388#[cfg(not(tarpaulin_include))]
389impl<R> Display for BitIdxError<R>
390where R: BitRegister
391{
392	#[inline]
393	fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
394		write!(
395			fmt,
396			"the value {} is too large to index into {} ({} bits wide)",
397			self.err,
398			any::type_name::<R>(),
399			bits_of::<R>(),
400		)
401	}
402}
403
404#[cfg(feature = "std")]
405impl<R> std::error::Error for BitIdxError<R> where R: BitRegister {}
406
407#[repr(transparent)]
408#[doc = include_str!("../doc/index/BitEnd.md")]
409#[derive(Clone, Copy, Default, Eq, Hash, Ord, PartialEq, PartialOrd)]
410pub struct BitEnd<R = usize>
411where R: BitRegister
412{
413	/// Semantic tail counter within or after a register, contained to `0 ..=
414	/// R::BITS`.
415	end: u8,
416	/// Marker for the register type.
417	_ty: PhantomData<R>,
418}
419
420impl<R> BitEnd<R>
421where R: BitRegister
422{
423	/// The inclusive maximum tail within (or after) an `R` element.
424	pub const MAX: Self = Self {
425		end: bits_of::<R>() as u8,
426		_ty: PhantomData,
427	};
428	/// The inclusive minimum tail within (or after) an `R` element.
429	pub const MIN: Self = Self {
430		end: 0,
431		_ty: PhantomData,
432	};
433
434	/// Wraps a counter value as a known-good tail of an `R` register.
435	///
436	/// ## Parameters
437	///
438	/// - `end`: The counter value to mark as a tail. This must be in the range
439	///   `0 ..= R::BITS`.
440	///
441	/// ## Returns
442	///
443	/// This returns `Some(end)` when it is in the valid range `0 ..= R::BITS`,
444	/// and `None` when it is not.
445	#[inline]
446	pub fn new(end: u8) -> Option<Self> {
447		if end > bits_of::<R>() as u8 {
448			return None;
449		}
450		Some(unsafe { Self::new_unchecked(end) })
451	}
452
453	/// Wraps a counter value as an assumed-good tail of an `R` register.
454	///
455	/// ## Parameters
456	///
457	/// - `end`: The counter value to mark as a tail. This must be in the range
458	///   `0 ..= R::BITS`.
459	///
460	/// ## Returns
461	///
462	/// This unconditionally marks `end` as a valid tail index.
463	///
464	/// ## Safety
465	///
466	/// If the `end` value is outside the valid range, then the program is
467	/// incorrect. Debug builds will panic; release builds do not inspect the
468	/// value or specify a behavior.
469	pub(crate) unsafe fn new_unchecked(end: u8) -> Self {
470		debug_assert!(
471			end <= bits_of::<R>() as u8,
472			"Bit tail {} cannot exceed type width {}",
473			end,
474			bits_of::<R>(),
475		);
476		Self {
477			end,
478			_ty: PhantomData,
479		}
480	}
481
482	/// Removes the tail wrapper, leaving the internal counter.
483	#[inline]
484	#[cfg(not(tarpaulin_include))]
485	pub fn into_inner(self) -> u8 {
486		self.end
487	}
488
489	/// Iterates over all tail indices at and after an inclusive starting point.
490	///
491	/// Because implementation details of the range type family, including the
492	/// [`RangeBounds`] trait, are not yet stable, and heterogeneous ranges are
493	/// not yet supported, this must be an opaque iterator rather than a direct
494	/// [`Range<BitEnd<R>>`].
495	///
496	/// # Parameters
497	///
498	/// - `from`: The inclusive low bound of the range. This will be the first
499	///   tail produced by the iterator.
500	///
501	/// # Returns
502	///
503	/// An opaque iterator that is equivalent to the range `from ..=
504	/// Self::MAX`.
505	///
506	/// [`RangeBounds`]: core::ops::RangeBounds
507	/// [`Range<BitEnd<R>>`]: core::ops::Range
508	#[inline]
509	pub fn range_from(
510		from: BitIdx<R>,
511	) -> impl Iterator<Item = Self>
512	+ DoubleEndedIterator
513	+ ExactSizeIterator
514	+ FusedIterator {
515		(from.idx ..= Self::MAX.end)
516			.map(|tail| unsafe { BitEnd::new_unchecked(tail) })
517	}
518
519	/// Computes the span information for a region.
520	///
521	/// The computed span of `len` bits begins at `self` and extends upwards in
522	/// memory. The return value is the number of memory elements that contain
523	/// bits of the span, and the first dead bit after the span.
524	///
525	/// ## Parameters
526	///
527	/// - `self`: A dead bit which is used as the first live bit of the new
528	///   span.
529	/// - `len`: The number of live bits in the span starting at `self`.
530	///
531	/// ## Returns
532	///
533	/// - `.0`: The number of `R` elements that contain live bits in the
534	///   computed span.
535	/// - `.1`: The dead-bit tail index ending the computed span.
536	///
537	/// ## Behavior
538	///
539	/// If `len` is `0`, this returns `(0, self)`, as the span has no live bits.
540	/// If `self` is [`BitEnd::MAX`], then the new region starts at
541	/// [`BitIdx::MIN`] in the next element.
542	///
543	/// [`BitEnd::MAX`]: Self::MAX
544	/// [`BitIdx::MIN`]: Self::MIN
545	pub(crate) fn span(self, len: usize) -> (usize, Self) {
546		if len == 0 {
547			return (0, self);
548		}
549
550		let head = self.end & R::MASK;
551		let bits_in_head = (bits_of::<R>() as u8 - head) as usize;
552
553		if len <= bits_in_head {
554			return (1, unsafe { Self::new_unchecked(head + len as u8) });
555		}
556
557		let bits_after_head = len - bits_in_head;
558		let elts = bits_after_head >> R::INDX;
559		let tail = bits_after_head as u8 & R::MASK;
560
561		let is_zero = (tail == 0) as u8;
562		let edges = 2 - is_zero as usize;
563		(elts + edges, unsafe {
564			Self::new_unchecked((is_zero << R::INDX) | tail)
565		})
566	}
567}
568
569impl<R> Binary for BitEnd<R>
570where R: BitRegister
571{
572	#[inline]
573	fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
574		write!(fmt, "{:0>1$b}", self.end, R::INDX as usize + 1)
575	}
576}
577
578impl<R> Debug for BitEnd<R>
579where R: BitRegister
580{
581	#[inline]
582	fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
583		write!(fmt, "BitEnd<{}>({})", any::type_name::<R>(), self)
584	}
585}
586
587impl<R> Display for BitEnd<R>
588where R: BitRegister
589{
590	#[inline]
591	fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
592		Binary::fmt(self, fmt)
593	}
594}
595
596#[repr(transparent)]
597#[doc = include_str!("../doc/index/BitPos.md")]
598// #[rustc_layout_scalar_valid_range_end(R::BITS)]
599#[derive(Clone, Copy, Default, Eq, Hash, Ord, PartialEq, PartialOrd)]
600pub struct BitPos<R = usize>
601where R: BitRegister
602{
603	/// Electrical position counter within a register, constrained to `0 ..
604	/// R::BITS`.
605	pos: u8,
606	/// Marker for the register type.
607	_ty: PhantomData<R>,
608}
609
610impl<R> BitPos<R>
611where R: BitRegister
612{
613	/// The position value of the most significant bit in an `R` element.
614	pub const MAX: Self = Self {
615		pos: R::MASK as u8,
616		_ty: PhantomData,
617	};
618	/// The position value of the least significant bit in an `R` element.
619	pub const MIN: Self = Self {
620		pos: 0,
621		_ty: PhantomData,
622	};
623
624	/// Wraps a counter value as a known-good position within an `R` register.
625	///
626	/// ## Parameters
627	///
628	/// - `pos`: The counter value to mark as a position. This must be in the
629	///   range `0 .. R::BITS`.
630	///
631	/// ## Returns
632	///
633	/// This returns `Some(pos)` when it is in the valid range `0 .. R::BITS`,
634	/// and `None` when it is not.
635	#[inline]
636	pub fn new(pos: u8) -> Option<Self> {
637		if pos >= bits_of::<R>() as u8 {
638			return None;
639		}
640		Some(unsafe { Self::new_unchecked(pos) })
641	}
642
643	/// Wraps a counter value as an assumed-good position within an `R`
644	/// register.
645	///
646	/// ## Parameters
647	///
648	/// - `value`: The counter value to mark as a position. This must be in the
649	///   range `0 .. R::BITS`.
650	///
651	/// ## Returns
652	///
653	/// This unconditionally marks `pos` as a valid bit-position.
654	///
655	/// ## Safety
656	///
657	/// If the `pos` value is outside the valid range, then the program is
658	/// incorrect. Debug builds will panic; release builds do not inspect the
659	/// value or specify a behavior.
660	#[inline]
661	pub unsafe fn new_unchecked(pos: u8) -> Self {
662		debug_assert!(
663			pos < bits_of::<R>() as u8,
664			"Bit position {} cannot exceed type width {}",
665			pos,
666			bits_of::<R>(),
667		);
668		Self {
669			pos,
670			_ty: PhantomData,
671		}
672	}
673
674	/// Removes the position wrapper, leaving the internal counter.
675	#[inline]
676	#[cfg(not(tarpaulin_include))]
677	pub fn into_inner(self) -> u8 {
678		self.pos
679	}
680
681	/// Computes the bit selector corresponding to `self`.
682	///
683	/// This is always `1 << self.pos`.
684	#[inline]
685	pub fn select(self) -> BitSel<R> {
686		unsafe { BitSel::new_unchecked(R::ONE << self.pos) }
687	}
688
689	/// Computes the bit selector for `self` as an accessor mask.
690	///
691	/// This is a type-cast over [`Self::select`].
692	///
693	/// [`Self::select`]: Self::select
694	#[inline]
695	#[cfg(not(tarpaulin_include))]
696	pub fn mask(self) -> BitMask<R> {
697		self.select().mask()
698	}
699
700	/// Iterates over all possible position values.
701	pub(crate) fn range_all() -> impl Iterator<Item = Self>
702	+ DoubleEndedIterator
703	+ ExactSizeIterator
704	+ FusedIterator {
705		BitIdx::<R>::range_all()
706			.map(|idx| unsafe { Self::new_unchecked(idx.into_inner()) })
707	}
708}
709
710impl<R> Binary for BitPos<R>
711where R: BitRegister
712{
713	#[inline]
714	fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
715		write!(fmt, "{:0>1$b}", self.pos, R::INDX as usize)
716	}
717}
718
719impl<R> Debug for BitPos<R>
720where R: BitRegister
721{
722	#[inline]
723	fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
724		write!(fmt, "BitPos<{}>({})", any::type_name::<R>(), self)
725	}
726}
727
728impl<R> Display for BitPos<R>
729where R: BitRegister
730{
731	#[inline]
732	fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
733		Binary::fmt(self, fmt)
734	}
735}
736
737#[repr(transparent)]
738#[doc = include_str!("../doc/index/BitSel.md")]
739// #[rustc_layout_scalar_valid_range_end(R::BITS)]
740#[derive(Clone, Copy, Default, Eq, Hash, Ord, PartialEq, PartialOrd)]
741pub struct BitSel<R = usize>
742where R: BitRegister
743{
744	/// A one-hot selection mask.
745	sel: R,
746}
747
748impl<R> BitSel<R>
749where R: BitRegister
750{
751	/// Wraps a selector value as a known-good selection in an `R` register.
752	///
753	/// ## Parameters
754	///
755	/// - `sel`: A one-hot selection mask of a bit in an `R` register.
756	///
757	/// ## Returns
758	///
759	/// This returns `Some(sel)` when it is a power of two (exactly one bit set
760	/// and all others cleared), and `None` when it is not.
761	#[inline]
762	pub fn new(sel: R) -> Option<Self> {
763		if sel.count_ones() != 1 {
764			return None;
765		}
766		Some(unsafe { Self::new_unchecked(sel) })
767	}
768
769	/// Wraps a selector value as an assumed-good selection in an `R` register.
770	///
771	/// ## Parameters
772	///
773	/// - `sel`: A one-hot selection mask of a bit in an `R` register.
774	///
775	/// ## Returns
776	///
777	/// This unconditionally marks `sel` as a one-hot bit selector.
778	///
779	/// ## Safety
780	///
781	/// If the `sel` value has zero or multiple bits set, then it is invalid to
782	/// be used as a `BitSel` and the program is incorrect. Debug builds will
783	/// panic; release builds do not inspect the value or specify a behavior.
784	#[inline]
785	pub unsafe fn new_unchecked(sel: R) -> Self {
786		debug_assert!(
787			sel.count_ones() == 1,
788			"Selections are required to have exactly one bit set: {:0>1$b}",
789			sel,
790			bits_of::<R>() as usize,
791		);
792		Self { sel }
793	}
794
795	/// Removes the one-hot selection wrapper, leaving the internal mask.
796	#[inline]
797	#[cfg(not(tarpaulin_include))]
798	pub fn into_inner(self) -> R {
799		self.sel
800	}
801
802	/// Computes a bit-mask for `self`. This is a type-cast.
803	#[inline]
804	#[cfg(not(tarpaulin_include))]
805	pub fn mask(self) -> BitMask<R> {
806		BitMask::new(self.sel)
807	}
808
809	/// Iterates over all possible selector values.
810	#[inline]
811	pub fn range_all() -> impl Iterator<Item = Self>
812	+ DoubleEndedIterator
813	+ ExactSizeIterator
814	+ FusedIterator {
815		BitPos::<R>::range_all().map(BitPos::select)
816	}
817}
818
819impl<R> Binary for BitSel<R>
820where R: BitRegister
821{
822	#[inline]
823	fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
824		write!(fmt, "{:0>1$b}", self.sel, bits_of::<R>() as usize)
825	}
826}
827
828impl<R> Debug for BitSel<R>
829where R: BitRegister
830{
831	#[inline]
832	fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
833		write!(fmt, "BitSel<{}>({})", any::type_name::<R>(), self)
834	}
835}
836
837impl<R> Display for BitSel<R>
838where R: BitRegister
839{
840	#[inline]
841	fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
842		Binary::fmt(self, fmt)
843	}
844}
845
846#[repr(transparent)]
847#[doc = include_str!("../doc/index/BitMask.md")]
848#[derive(Clone, Copy, Default, Eq, Hash, Ord, PartialEq, PartialOrd)]
849pub struct BitMask<R = usize>
850where R: BitRegister
851{
852	/// A mask of any number of bits to select.
853	mask: R,
854}
855
856impl<R> BitMask<R>
857where R: BitRegister
858{
859	/// A full bit-mask with every bit set.
860	pub const ALL: Self = Self { mask: R::ALL };
861	/// An empty bit-mask with every bit cleared.
862	pub const ZERO: Self = Self { mask: R::ZERO };
863
864	/// Wraps any `R` value as a bit-mask.
865	///
866	/// This constructor is provided to explicitly declare that an operation is
867	/// discarding the numeric value of an integer and instead using it only as
868	/// a bit-mask.
869	///
870	/// ## Parameters
871	///
872	/// - `mask`: Some integer to use as a bit-mask.
873	///
874	/// ## Returns
875	///
876	/// The `mask` value wrapped as a bit-mask, with its numeric context
877	/// discarded.
878	///
879	/// Prefer accumulating [`BitSel`] values using its `Sum` implementation.
880	///
881	/// ## Safety
882	///
883	/// The `mask` value must be computed from a set of valid bit positions in
884	/// the caller’s context.
885	///
886	/// [`BitSel`]: crate::index::BitSel
887	#[inline]
888	pub fn new(mask: R) -> Self {
889		Self { mask }
890	}
891
892	/// Removes the mask wrapper, leaving the internal value.
893	#[inline]
894	#[cfg(not(tarpaulin_include))]
895	pub fn into_inner(self) -> R {
896		self.mask
897	}
898
899	/// Tests if a mask contains a given selector bit.
900	///
901	/// ## Parameters
902	///
903	/// - `&self`: The mask being tested.
904	/// - `sel`: A selector bit to test in `self`.
905	///
906	/// ## Returns
907	///
908	/// Whether `self` has set the bit that `sel` indicates.
909	#[inline]
910	pub fn test(&self, sel: BitSel<R>) -> bool {
911		self.mask & sel.sel != R::ZERO
912	}
913
914	/// Inserts a selector bit into a mask.
915	///
916	/// ## Parameters
917	///
918	/// - `&mut self`: The mask being modified.
919	/// - `sel`: A selector bit to insert into `self`.
920	///
921	/// ## Effects
922	///
923	/// The `sel` bit is set in the mask.
924	#[inline]
925	pub fn insert(&mut self, sel: BitSel<R>) {
926		self.mask |= sel.sel;
927	}
928
929	/// Creates a new mask with a selector bit activated.
930	///
931	/// ## Parameters
932	///
933	/// - `self`: The original mask.
934	/// - `sel`: The selector bit being added into the mask.
935	///
936	/// ## Returns
937	///
938	/// A new bit-mask with `sel` activated.
939	#[inline]
940	pub fn combine(self, sel: BitSel<R>) -> Self {
941		Self {
942			mask: self.mask | sel.sel,
943		}
944	}
945}
946
947impl<R> Binary for BitMask<R>
948where R: BitRegister
949{
950	#[inline]
951	fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
952		write!(fmt, "{:0>1$b}", self.mask, bits_of::<R>() as usize)
953	}
954}
955
956impl<R> Debug for BitMask<R>
957where R: BitRegister
958{
959	#[inline]
960	fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
961		write!(fmt, "BitMask<{}>({})", any::type_name::<R>(), self)
962	}
963}
964
965impl<R> Display for BitMask<R>
966where R: BitRegister
967{
968	#[inline]
969	fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
970		Binary::fmt(self, fmt)
971	}
972}
973
974impl<R> Sum<BitSel<R>> for BitMask<R>
975where R: BitRegister
976{
977	#[inline]
978	fn sum<I>(iter: I) -> Self
979	where I: Iterator<Item = BitSel<R>> {
980		iter.fold(Self::ZERO, Self::combine)
981	}
982}
983
984impl<R> BitAnd<R> for BitMask<R>
985where R: BitRegister
986{
987	type Output = Self;
988
989	#[inline]
990	fn bitand(self, rhs: R) -> Self::Output {
991		Self {
992			mask: self.mask & rhs,
993		}
994	}
995}
996
997impl<R> BitOr<R> for BitMask<R>
998where R: BitRegister
999{
1000	type Output = Self;
1001
1002	#[inline]
1003	fn bitor(self, rhs: R) -> Self::Output {
1004		Self {
1005			mask: self.mask | rhs,
1006		}
1007	}
1008}
1009
1010impl<R> Not for BitMask<R>
1011where R: BitRegister
1012{
1013	type Output = Self;
1014
1015	#[inline]
1016	fn not(self) -> Self::Output {
1017		Self { mask: !self.mask }
1018	}
1019}
1020
1021#[cfg(test)]
1022mod tests {
1023	use super::*;
1024	use crate::order::Lsb0;
1025
1026	#[test]
1027	fn index_ctors() {
1028		for n in 0 .. 8 {
1029			assert!(BitIdx::<u8>::new(n).is_ok());
1030		}
1031		assert!(BitIdx::<u8>::new(8).is_err());
1032
1033		for n in 0 .. 16 {
1034			assert!(BitIdx::<u16>::new(n).is_ok());
1035		}
1036		assert!(BitIdx::<u16>::new(16).is_err());
1037
1038		for n in 0 .. 32 {
1039			assert!(BitIdx::<u32>::new(n).is_ok());
1040		}
1041		assert!(BitIdx::<u32>::new(32).is_err());
1042
1043		#[cfg(target_pointer_width = "64")]
1044		{
1045			for n in 0 .. 64 {
1046				assert!(BitIdx::<u64>::new(n).is_ok());
1047			}
1048			assert!(BitIdx::<u64>::new(64).is_err());
1049		}
1050
1051		for n in 0 .. bits_of::<usize>() as u8 {
1052			assert!(BitIdx::<usize>::new(n).is_ok());
1053		}
1054		assert!(BitIdx::<usize>::new(bits_of::<usize>() as u8).is_err());
1055	}
1056
1057	#[test]
1058	fn tail_ctors() {
1059		for n in 0 ..= 8 {
1060			assert!(BitEnd::<u8>::new(n).is_some());
1061		}
1062		assert!(BitEnd::<u8>::new(9).is_none());
1063
1064		for n in 0 ..= 16 {
1065			assert!(BitEnd::<u16>::new(n).is_some());
1066		}
1067		assert!(BitEnd::<u16>::new(17).is_none());
1068
1069		for n in 0 ..= 32 {
1070			assert!(BitEnd::<u32>::new(n).is_some());
1071		}
1072		assert!(BitEnd::<u32>::new(33).is_none());
1073
1074		#[cfg(target_pointer_width = "64")]
1075		{
1076			for n in 0 ..= 64 {
1077				assert!(BitEnd::<u64>::new(n).is_some());
1078			}
1079			assert!(BitEnd::<u64>::new(65).is_none());
1080		}
1081
1082		for n in 0 ..= bits_of::<usize>() as u8 {
1083			assert!(BitEnd::<usize>::new(n).is_some());
1084		}
1085		assert!(BitEnd::<usize>::new(bits_of::<usize>() as u8 + 1).is_none());
1086	}
1087
1088	#[test]
1089	fn position_ctors() {
1090		for n in 0 .. 8 {
1091			assert!(BitPos::<u8>::new(n).is_some());
1092		}
1093		assert!(BitPos::<u8>::new(8).is_none());
1094
1095		for n in 0 .. 16 {
1096			assert!(BitPos::<u16>::new(n).is_some());
1097		}
1098		assert!(BitPos::<u16>::new(16).is_none());
1099
1100		for n in 0 .. 32 {
1101			assert!(BitPos::<u32>::new(n).is_some());
1102		}
1103		assert!(BitPos::<u32>::new(32).is_none());
1104
1105		#[cfg(target_pointer_width = "64")]
1106		{
1107			for n in 0 .. 64 {
1108				assert!(BitPos::<u64>::new(n).is_some());
1109			}
1110			assert!(BitPos::<u64>::new(64).is_none());
1111		}
1112
1113		for n in 0 .. bits_of::<usize>() as u8 {
1114			assert!(BitPos::<usize>::new(n).is_some());
1115		}
1116		assert!(BitPos::<usize>::new(bits_of::<usize>() as u8).is_none());
1117	}
1118
1119	#[test]
1120	fn select_ctors() {
1121		for n in 0 .. 8 {
1122			assert!(BitSel::<u8>::new(1 << n).is_some());
1123		}
1124		assert!(BitSel::<u8>::new(0).is_none());
1125		assert!(BitSel::<u8>::new(3).is_none());
1126
1127		for n in 0 .. 16 {
1128			assert!(BitSel::<u16>::new(1 << n).is_some());
1129		}
1130		assert!(BitSel::<u16>::new(0).is_none());
1131		assert!(BitSel::<u16>::new(3).is_none());
1132
1133		for n in 0 .. 32 {
1134			assert!(BitSel::<u32>::new(1 << n).is_some());
1135		}
1136		assert!(BitSel::<u32>::new(0).is_none());
1137		assert!(BitSel::<u32>::new(3).is_none());
1138
1139		#[cfg(target_pointer_width = "64")]
1140		{
1141			for n in 0 .. 64 {
1142				assert!(BitSel::<u64>::new(1 << n).is_some());
1143			}
1144			assert!(BitSel::<u64>::new(0).is_none());
1145			assert!(BitSel::<u64>::new(3).is_none());
1146		}
1147
1148		for n in 0 .. bits_of::<usize>() as u8 {
1149			assert!(BitSel::<usize>::new(1 << n).is_some());
1150		}
1151		assert!(BitSel::<usize>::new(0).is_none());
1152		assert!(BitSel::<usize>::new(3).is_none());
1153	}
1154
1155	#[test]
1156	fn ranges() {
1157		let mut range = BitIdx::<u16>::range_all();
1158		assert_eq!(range.next(), BitIdx::new(0).ok());
1159		assert_eq!(range.next_back(), BitIdx::new(15).ok());
1160		assert_eq!(range.count(), 14);
1161
1162		let mut range = BitEnd::<u8>::range_from(BitIdx::new(1).unwrap());
1163		assert_eq!(range.next(), BitEnd::new(1));
1164		assert_eq!(range.next_back(), BitEnd::new(8));
1165		assert_eq!(range.count(), 6);
1166
1167		let mut range = BitPos::<u8>::range_all();
1168		assert_eq!(range.next(), BitPos::new(0));
1169		assert_eq!(range.next_back(), BitPos::new(7));
1170		assert_eq!(range.count(), 6);
1171
1172		let mut range = BitSel::<u8>::range_all();
1173		assert_eq!(range.next(), BitSel::new(1));
1174		assert_eq!(range.next_back(), BitSel::new(128));
1175		assert_eq!(range.count(), 6);
1176	}
1177
1178	#[test]
1179	fn index_cycle() {
1180		let six = BitIdx::<u8>::new(6).unwrap();
1181		let (seven, step) = six.next();
1182		assert_eq!(seven, BitIdx::new(7).unwrap());
1183		assert!(!step);
1184
1185		let (zero, step) = seven.next();
1186		assert_eq!(zero, BitIdx::MIN);
1187		assert!(step);
1188
1189		let (seven, step) = zero.prev();
1190		assert_eq!(seven, BitIdx::new(7).unwrap());
1191		assert!(step);
1192
1193		let (six, step) = seven.prev();
1194		assert_eq!(six, BitIdx::new(6).unwrap());
1195		assert!(!step);
1196
1197		let fourteen = BitIdx::<u16>::new(14).unwrap();
1198		let (fifteen, step) = fourteen.next();
1199		assert_eq!(fifteen, BitIdx::new(15).unwrap());
1200		assert!(!step);
1201		let (zero, step) = fifteen.next();
1202		assert_eq!(zero, BitIdx::MIN);
1203		assert!(step);
1204		let (fifteen, step) = zero.prev();
1205		assert_eq!(fifteen, BitIdx::new(15).unwrap());
1206		assert!(step);
1207		let (fourteen, step) = fifteen.prev();
1208		assert_eq!(fourteen, BitIdx::new(14).unwrap());
1209		assert!(!step);
1210	}
1211
1212	#[test]
1213	fn jumps() {
1214		let (jump, head) = BitIdx::<u8>::new(1).unwrap().offset(2);
1215		assert_eq!(jump, 0);
1216		assert_eq!(head, BitIdx::new(3).unwrap());
1217
1218		let (jump, head) = BitIdx::<u8>::MAX.offset(1);
1219		assert_eq!(jump, 1);
1220		assert_eq!(head, BitIdx::MIN);
1221
1222		let (jump, head) = BitIdx::<u16>::new(10).unwrap().offset(40);
1223		// 10 is in 0..16; 10+40 is in 48..64
1224		assert_eq!(jump, 3);
1225		assert_eq!(head, BitIdx::new(2).unwrap());
1226
1227		//  .offset() wraps at the `isize` boundary
1228		let (jump, head) = BitIdx::<u8>::MAX.offset(isize::MAX);
1229		assert_eq!(jump, -(((isize::MAX as usize + 1) >> 3) as isize));
1230		assert_eq!(head, BitIdx::MAX.prev().0);
1231
1232		let (elts, tail) = BitIdx::<u8>::new(4).unwrap().span(0);
1233		assert_eq!(elts, 0);
1234		assert_eq!(tail, BitEnd::new(4).unwrap());
1235
1236		let (elts, tail) = BitIdx::<u8>::new(3).unwrap().span(3);
1237		assert_eq!(elts, 1);
1238		assert_eq!(tail, BitEnd::new(6).unwrap());
1239
1240		let (elts, tail) = BitIdx::<u16>::new(10).unwrap().span(40);
1241		assert_eq!(elts, 4);
1242		assert_eq!(tail, BitEnd::new(2).unwrap());
1243	}
1244
1245	#[test]
1246	fn mask_operators() {
1247		let mut mask = BitIdx::<u8>::new(2)
1248			.unwrap()
1249			.range(BitEnd::new(5).unwrap())
1250			.map(BitIdx::select::<Lsb0>)
1251			.sum::<BitMask<u8>>();
1252		assert_eq!(mask, BitMask::new(28));
1253		assert_eq!(mask & 25, BitMask::new(24));
1254		assert_eq!(mask | 32, BitMask::new(60));
1255		assert_eq!(!mask, BitMask::new(!28));
1256		let yes = BitSel::<u8>::new(16).unwrap();
1257		let no = BitSel::<u8>::new(64).unwrap();
1258		assert!(mask.test(yes));
1259		assert!(!mask.test(no));
1260		mask.insert(no);
1261		assert!(mask.test(no));
1262	}
1263
1264	#[test]
1265	#[cfg(feature = "alloc")]
1266	fn render() {
1267		#[cfg(not(feature = "std"))]
1268		use alloc::format;
1269
1270		assert_eq!(format!("{:?}", BitIdx::<u8>::MAX), "BitIdx<u8>(111)");
1271		assert_eq!(format!("{:?}", BitIdx::<u16>::MAX), "BitIdx<u16>(1111)");
1272		assert_eq!(format!("{:?}", BitIdx::<u32>::MAX), "BitIdx<u32>(11111)");
1273
1274		assert_eq!(
1275			format!("{:?}", BitIdx::<u8>::new(8).unwrap_err()),
1276			"BitIdxError<u8>(8)"
1277		);
1278		assert_eq!(
1279			format!("{:?}", BitIdx::<u16>::new(16).unwrap_err()),
1280			"BitIdxError<u16>(16)"
1281		);
1282		assert_eq!(
1283			format!("{:?}", BitIdx::<u32>::new(32).unwrap_err()),
1284			"BitIdxError<u32>(32)"
1285		);
1286
1287		assert_eq!(format!("{:?}", BitEnd::<u8>::MAX), "BitEnd<u8>(1000)");
1288		assert_eq!(format!("{:?}", BitEnd::<u16>::MAX), "BitEnd<u16>(10000)");
1289		assert_eq!(format!("{:?}", BitEnd::<u32>::MAX), "BitEnd<u32>(100000)");
1290
1291		assert_eq!(format!("{:?}", BitPos::<u8>::MAX), "BitPos<u8>(111)");
1292		assert_eq!(format!("{:?}", BitPos::<u16>::MAX), "BitPos<u16>(1111)");
1293		assert_eq!(format!("{:?}", BitPos::<u32>::MAX), "BitPos<u32>(11111)");
1294
1295		assert_eq!(
1296			format!("{:?}", BitSel::<u8>::new(1).unwrap()),
1297			"BitSel<u8>(00000001)",
1298		);
1299		assert_eq!(
1300			format!("{:?}", BitSel::<u16>::new(1).unwrap()),
1301			"BitSel<u16>(0000000000000001)",
1302		);
1303		assert_eq!(
1304			format!("{:?}", BitSel::<u32>::new(1).unwrap()),
1305			"BitSel<u32>(00000000000000000000000000000001)",
1306		);
1307
1308		assert_eq!(
1309			format!("{:?}", BitMask::<u8>::new(1 | 4 | 32)),
1310			"BitMask<u8>(00100101)",
1311		);
1312		assert_eq!(
1313			format!("{:?}", BitMask::<u16>::new(1 | 4 | 32)),
1314			"BitMask<u16>(0000000000100101)",
1315		);
1316		assert_eq!(
1317			format!("{:?}", BitMask::<u32>::new(1 | 4 | 32)),
1318			"BitMask<u32>(00000000000000000000000000100101)",
1319		);
1320
1321		#[cfg(target_pointer_width = "64")]
1322		{
1323			assert_eq!(
1324				format!("{:?}", BitIdx::<u64>::MAX),
1325				"BitIdx<u64>(111111)",
1326			);
1327			assert_eq!(
1328				format!("{:?}", BitIdx::<u64>::new(64).unwrap_err()),
1329				"BitIdxError<u64>(64)",
1330			);
1331			assert_eq!(
1332				format!("{:?}", BitEnd::<u64>::MAX),
1333				"BitEnd<u64>(1000000)",
1334			);
1335			assert_eq!(
1336				format!("{:?}", BitPos::<u64>::MAX),
1337				"BitPos<u64>(111111)",
1338			);
1339			assert_eq!(
1340				format!("{:?}", BitSel::<u64>::new(1).unwrap()),
1341				"BitSel<u64>(0000000000000000000000000000000000000000000000000000000000000001)",
1342			);
1343			assert_eq!(
1344				format!("{:?}", BitMask::<u64>::new(1 | 4 | 32)),
1345				"BitMask<u64>(0000000000000000000000000000000000000000000000000000000000100101)",
1346			);
1347		}
1348	}
1349}