bitvec/
order.rs

1#![doc = include_str!("../doc/order.md")]
2
3use crate::{
4	index::{
5		BitEnd,
6		BitIdx,
7		BitMask,
8		BitPos,
9		BitSel,
10	},
11	mem::{
12		bits_of,
13		BitRegister,
14	},
15};
16
17#[doc = include_str!("../doc/order/BitOrder.md")]
18pub unsafe trait BitOrder: 'static {
19	/// Translates a semantic bit index into a real bit position.
20	///
21	/// This function is the basis of the trait, and must adhere to a number of
22	/// requirements in order for an implementation to be correct.
23	///
24	/// ## Type Parameters
25	///
26	/// - `R`: The memory element type that the index and position govern.
27	///
28	/// ## Parameters
29	///
30	/// - `index`: A semantic bit-index within some `R` element.
31	///
32	/// ## Returns
33	///
34	/// The real position of the indexed bit within an `R` element. See the
35	/// `BitPos` documentation for what these positions are considered to mean.
36	///
37	/// ## Requirements
38	///
39	/// This function must satisfy the following requirements for all possible
40	/// input and output values, for all possible `R` type parameters:
41	///
42	/// - Totality: The implementation must be able to accept every input in
43	///   [`BitIdx::<R>::range_all()`], and produce some `BitPos` value for
44	///   each.
45	/// - Bijection: There must be an exactly one-to-one correspondence between
46	///   input and output values. No input index may choose its output from a
47	///   set of more than one position, and no output position may be produced
48	///   by more than one input index.
49	/// - Purity: The translation from index to position must be consistent for
50	///   the lifetime of *at least* all data structures in the program. This
51	///   function *may* refer to global state, but that state **must** be
52	///   immutable while any `bitvec` data structures exist, and must not be
53	///   used to violate the totality or bijection requirements.
54	/// - Validity: The produced `BitPos` value must be within the valid range
55	///   of its type. This is enforced by [`BitPos::new`], but not by the
56	///   unsafe constructor [`BitPos::new_unchecked`].
57	///
58	/// [`BitIdx::<R>::range_all()`]: crate::index::BitIdx::range_all
59	/// [`BitPos::new`]: crate::index::BitPos::new
60	/// [`BitPos::new_unchecked`]: crate::index::BitPos::new_unchecked
61	fn at<R>(index: BitIdx<R>) -> BitPos<R>
62	where R: BitRegister;
63
64	/// Produces a single-bit selection mask from a bit-index.
65	///
66	/// This is an optional function: it is implemented as, and must always be
67	/// exactly identical to, `BitOrder::at(index).select()`. If your ordering
68	/// has a faster implementation, you may provide it, but it must be exactly
69	/// numerically equivalent.
70	#[inline]
71	fn select<R>(index: BitIdx<R>) -> BitSel<R>
72	where R: BitRegister {
73		Self::at::<R>(index).select()
74	}
75
76	/// Produces a multi-bit selection mask from a range of bit-indices.
77	///
78	/// This is an optional function: it is implemented as, and must always be
79	/// exactly identical to,
80	/// `BitIdx::range(from, upto).map(BitOrder::select).sum()`. If your
81	/// ordering has a faster implementation, you may provide it, but it must be
82	/// exactly numerically equivalent.
83	///
84	/// ## Parameters
85	///
86	/// - `from`: The inclusive starting value of the indices being selected.
87	///   Defaults to [`BitIdx::MIN`].
88	/// - `upto`: The exclusive ending value of the indices being selected.
89	///   Defaults to [`BitEnd::MAX`].
90	///
91	/// ## Returns
92	///
93	/// A selection mask with all bit-positions corresponding to `from .. upto`
94	/// selected.
95	///
96	/// [`BitEnd::MAX`]: crate::index::BitEnd::MAX
97	/// [`BitIdx::MIN`]: crate::index::BitIdx::MIN
98	#[inline]
99	fn mask<R>(
100		from: impl Into<Option<BitIdx<R>>>,
101		upto: impl Into<Option<BitEnd<R>>>,
102	) -> BitMask<R>
103	where
104		R: BitRegister,
105	{
106		let (from, upto) = match (from.into(), upto.into()) {
107			(None, None) => return BitMask::ALL,
108			(Some(from), None) => (from, BitEnd::MAX),
109			(None, Some(upto)) => (BitIdx::MIN, upto),
110			(Some(from), Some(upto)) => (from, upto),
111		};
112		from.range(upto).map(Self::select::<R>).sum()
113	}
114}
115
116#[doc = include_str!("../doc/order/Lsb0.md")]
117#[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)]
118pub struct Lsb0;
119
120#[doc = include_str!("../doc/order/Msb0.md")]
121#[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)]
122pub struct Msb0;
123
124unsafe impl BitOrder for Lsb0 {
125	#[inline]
126	fn at<R>(index: BitIdx<R>) -> BitPos<R>
127	where R: BitRegister {
128		unsafe { BitPos::new_unchecked(index.into_inner()) }
129	}
130
131	#[inline]
132	fn select<R>(index: BitIdx<R>) -> BitSel<R>
133	where R: BitRegister {
134		unsafe { BitSel::new_unchecked(R::ONE << index.into_inner()) }
135	}
136
137	#[inline]
138	fn mask<R>(
139		from: impl Into<Option<BitIdx<R>>>,
140		upto: impl Into<Option<BitEnd<R>>>,
141	) -> BitMask<R>
142	where
143		R: BitRegister,
144	{
145		let from = from.into().unwrap_or(BitIdx::MIN).into_inner();
146		let upto = upto.into().unwrap_or(BitEnd::MAX).into_inner();
147		debug_assert!(
148			from <= upto,
149			"Ranges must run from low index ({}) to high ({})",
150			from,
151			upto,
152		);
153		let ct = upto - from;
154		if ct == bits_of::<R>() as u8 {
155			return BitMask::ALL;
156		}
157		/* This expression does the following work:
158		 * 1. Set all bits in the mask to `1`.
159		 * 2. Shift left by the number of bits in the mask. The mask bits are
160		 *    now at LSedge and `0`.
161		 * 3. Invert the mask. The mask bits are now at LSedge and `1`; all
162		 *    else are `0`.
163		 * 4. Shift left by the `from` distance from LSedge. The mask bits now
164		 *    begin at `from` left of LSedge and extend to `upto` left of
165		 *    LSedge.
166		 */
167		BitMask::new(!(R::ALL << ct) << from)
168	}
169}
170
171unsafe impl BitOrder for Msb0 {
172	#[inline]
173	fn at<R>(index: BitIdx<R>) -> BitPos<R>
174	where R: BitRegister {
175		unsafe { BitPos::new_unchecked(R::MASK - index.into_inner()) }
176	}
177
178	#[inline]
179	fn select<R>(index: BitIdx<R>) -> BitSel<R>
180	where R: BitRegister {
181		/* Shift the MSbit down by the index count. This is not equivalent to
182		 * the expression `1 << (mask - index)`, because that is required to
183		 * perform a runtime subtraction before the shift, while this produces
184		 * a constant that is shifted.
185		 */
186		let msbit: R = R::ONE << R::MASK;
187		unsafe { BitSel::new_unchecked(msbit >> index.into_inner()) }
188	}
189
190	#[inline]
191	fn mask<R>(
192		from: impl Into<Option<BitIdx<R>>>,
193		upto: impl Into<Option<BitEnd<R>>>,
194	) -> BitMask<R>
195	where
196		R: BitRegister,
197	{
198		let from = from.into().unwrap_or(BitIdx::MIN).into_inner();
199		let upto = upto.into().unwrap_or(BitEnd::MAX).into_inner();
200		debug_assert!(
201			from <= upto,
202			"ranges must run from low index ({}) to high ({})",
203			from,
204			upto,
205		);
206		let ct = upto - from;
207		if ct == bits_of::<R>() as u8 {
208			return BitMask::ALL;
209		}
210		/* This expression does the following work:
211		 * 1. Set all bits in the mask to `1`.
212		 * 2. Shift right by the number of bits in the mask. The mask bits are
213		 *    now at MSedge and `0`.
214		 * 3. Invert the mask. The mask bits are now at MSedge and `1`; all
215		 *    else are `0`.
216		 * 4. Shift right by the `from` distance from MSedge. The mask bits
217		 *    now begin at `from` right of MSedge and extend to `upto` right
218		 *    of MSedge.
219		 */
220		BitMask::new(!(R::ALL >> ct) >> from)
221	}
222}
223
224#[cfg(target_endian = "little")]
225#[doc = include_str!("../doc/order/LocalBits.md")]
226pub use self::Lsb0 as LocalBits;
227#[cfg(target_endian = "big")]
228#[doc = include_str!("../doc/order/LocalBits.md")]
229pub use self::Msb0 as LocalBits;
230
231#[cfg(not(any(target_endian = "big", target_endian = "little")))]
232compile_fail!(
233	"This architecture is not supported! Please consider filing an issue"
234);
235
236#[inline]
237#[cfg(not(tarpaulin_include))]
238#[doc = include_str!("../doc/order/verify.md")]
239pub fn verify<O>(verbose: bool)
240where O: BitOrder {
241	verify_for_type::<u8, O>(verbose);
242	verify_for_type::<u16, O>(verbose);
243	verify_for_type::<u32, O>(verbose);
244	verify_for_type::<usize, O>(verbose);
245
246	#[cfg(target_pointer_width = "64")]
247	verify_for_type::<u64, O>(verbose);
248}
249
250/// Verification does not access memory, and is both useless and slow in Miri.
251#[cfg(miri)]
252pub fn verify_for_type<R, O>(_: bool)
253where
254	R: BitRegister,
255	O: BitOrder,
256{
257}
258
259#[cfg(not(miri))]
260#[doc = include_str!("../doc/order/verify_for_type.md")]
261pub fn verify_for_type<R, O>(verbose: bool)
262where
263	R: BitRegister,
264	O: BitOrder,
265{
266	use core::any::type_name;
267	let mut accum = BitMask::<R>::ZERO;
268
269	let ord_name = type_name::<O>();
270	let reg_name = type_name::<R>();
271
272	for n in 0 .. bits_of::<R>() as u8 {
273		//  Wrap the counter as an index.
274		let idx = unsafe { BitIdx::<R>::new_unchecked(n) };
275
276		//  Compute the bit position for the index.
277		let pos = O::at::<R>(idx);
278		if verbose {
279			#[cfg(feature = "std")]
280			println!(
281				"`<{} as BitOrder>::at::<{}>({})` produces {}",
282				ord_name,
283				reg_name,
284				n,
285				pos.into_inner(),
286			);
287		}
288
289		//  If the computed position exceeds the valid range, fail.
290		assert!(
291			pos.into_inner() < bits_of::<R>() as u8,
292			"Error when verifying the implementation of `BitOrder` for `{}`: \
293			 Index {} produces a bit position ({}) that exceeds the type width \
294			 {}",
295			ord_name,
296			n,
297			pos.into_inner(),
298			bits_of::<R>(),
299		);
300
301		//  Check `O`’s implementation of `select`
302		let sel = O::select::<R>(idx);
303		if verbose {
304			#[cfg(feature = "std")]
305			println!(
306				"`<{} as BitOrder>::select::<{}>({})` produces {:b}",
307				ord_name, reg_name, n, sel,
308			);
309		}
310
311		//  If the selector bit is not one-hot, fail.
312		assert_eq!(
313			sel.into_inner().count_ones(),
314			1,
315			"Error when verifying the implementation of `BitOrder` for `{}`: \
316			 Index {} produces a bit selector ({:b}) that is not a one-hot mask",
317			ord_name,
318			n,
319			sel,
320		);
321
322		//  Check that the selection computed from the index matches the
323		//  selection computed from the position.
324		let shl = pos.select();
325		//  If `O::select(idx)` does not produce `1 << pos`, fail.
326		assert_eq!(
327			sel,
328			shl,
329			"Error when verifying the implementation of `BitOrder` for `{}`: \
330			 Index {} produces a bit selector ({:b}) that is not equal to `1 \
331			 << {}` ({:b})",
332			ord_name,
333			n,
334			sel,
335			pos.into_inner(),
336			shl,
337		);
338
339		//  Check that the produced selector bit has not already been added to
340		//  the accumulator.
341		assert!(
342			!accum.test(sel),
343			"Error when verifying the implementation of `BitOrder` for `{}`: \
344			 Index {} produces a bit position ({}) that has already been \
345			 produced by a prior index",
346			ord_name,
347			n,
348			pos.into_inner(),
349		);
350		accum.insert(sel);
351		if verbose {
352			#[cfg(feature = "std")]
353			println!(
354				"`<{} as BitOrder>::at::<{}>({})` accumulates  {:b}",
355				ord_name, reg_name, n, accum,
356			);
357		}
358	}
359
360	//  Check that all indices produced all positions.
361	assert_eq!(
362		accum,
363		BitMask::ALL,
364		"Error when verifying the implementation of `BitOrder` for `{}`: The \
365		 bit positions marked with a `0` here were never produced from an \
366		 index, despite all possible indices being passed in for translation: \
367		 {:b}",
368		ord_name,
369		accum,
370	);
371
372	//  Check that `O::mask` is correct for all range combinations.
373	for from in BitIdx::<R>::range_all() {
374		for upto in BitEnd::<R>::range_from(from) {
375			let mask = O::mask(from, upto);
376			let check = from
377				.range(upto)
378				.map(O::at)
379				.map(BitPos::select)
380				.sum::<BitMask<R>>();
381			assert_eq!(
382				mask,
383				check,
384				"Error when verifying the implementation of `BitOrder` for \
385				 `{o}`: `{o}::mask::<{m}>({f}, {u})` produced {bad:b}, but \
386				 expected {good:b}",
387				o = ord_name,
388				m = reg_name,
389				f = from,
390				u = upto,
391				bad = mask,
392				good = check,
393			);
394		}
395	}
396}
397
398/// An ordering that does not provide a contiguous index map or `BitField`
399/// acceleration.
400#[cfg(test)]
401pub struct HiLo;
402
403#[cfg(test)]
404unsafe impl BitOrder for HiLo {
405	fn at<R>(index: BitIdx<R>) -> BitPos<R>
406	where R: BitRegister {
407		BitPos::new(index.into_inner() ^ 4).unwrap()
408	}
409}
410
411#[cfg(test)]
412mod tests {
413	use super::*;
414
415	#[test]
416	fn default_impl() {
417		assert_eq!(Lsb0::mask(None, None), BitMask::<u8>::ALL);
418		assert_eq!(Msb0::mask(None, None), BitMask::<u8>::ALL);
419		assert_eq!(HiLo::mask(None, None), BitMask::<u8>::ALL);
420
421		assert_eq!(
422			HiLo::mask(None, BitEnd::<u8>::new(3).unwrap()),
423			BitMask::new(0b0111_0000),
424		);
425		assert_eq!(
426			HiLo::mask(BitIdx::<u8>::new(3).unwrap(), None),
427			BitMask::new(0b1000_1111),
428		);
429	}
430
431	//  Split these out into individual test functions so they can parallelize.
432
433	mod lsb0 {
434		use super::*;
435
436		#[test]
437		fn verify_u8() {
438			verify_for_type::<u8, Lsb0>(cfg!(feature = "verbose"));
439		}
440
441		#[test]
442		#[cfg(not(tarpaulin))]
443		fn verify_u16() {
444			verify_for_type::<u16, Lsb0>(cfg!(feature = "verbose"));
445		}
446
447		#[test]
448		#[cfg(not(tarpaulin))]
449		fn verify_u32() {
450			verify_for_type::<u32, Lsb0>(cfg!(feature = "verbose"));
451		}
452
453		#[test]
454		#[cfg(all(target_pointer_width = "64", not(tarpaulin)))]
455		fn verify_u64() {
456			verify_for_type::<u64, Lsb0>(cfg!(feature = "verbose"));
457		}
458
459		#[test]
460		#[cfg(not(tarpaulin))]
461		fn verify_usize() {
462			verify_for_type::<usize, Lsb0>(cfg!(feature = "verbose"));
463		}
464	}
465
466	mod msb0 {
467		use super::*;
468
469		#[test]
470		fn verify_u8() {
471			verify_for_type::<u8, Msb0>(cfg!(feature = "verbose"));
472		}
473
474		#[test]
475		#[cfg(not(tarpaulin))]
476		fn verify_u16() {
477			verify_for_type::<u16, Msb0>(cfg!(feature = "verbose"));
478		}
479
480		#[test]
481		#[cfg(not(tarpaulin))]
482		fn verify_u32() {
483			verify_for_type::<u32, Msb0>(cfg!(feature = "verbose"));
484		}
485
486		#[test]
487		#[cfg(all(target_pointer_width = "64", not(tarpaulin)))]
488		fn verify_u64() {
489			verify_for_type::<u64, Msb0>(cfg!(feature = "verbose"));
490		}
491
492		#[test]
493		#[cfg(not(tarpaulin))]
494		fn verify_usize() {
495			verify_for_type::<usize, Msb0>(cfg!(feature = "verbose"));
496		}
497	}
498
499	mod hilo {
500		use super::*;
501
502		#[test]
503		fn verify_u8() {
504			verify_for_type::<u8, HiLo>(cfg!(feature = "verbose"));
505		}
506
507		#[test]
508		#[cfg(not(tarpaulin))]
509		fn verify_u16() {
510			verify_for_type::<u16, HiLo>(cfg!(feature = "verbose"));
511		}
512
513		#[test]
514		#[cfg(not(tarpaulin))]
515		fn verify_u32() {
516			verify_for_type::<u32, HiLo>(cfg!(feature = "verbose"));
517		}
518
519		#[test]
520		#[cfg(all(target_pointer_width = "64", not(tarpaulin)))]
521		fn verify_u64() {
522			verify_for_type::<u64, HiLo>(cfg!(feature = "verbose"));
523		}
524
525		#[test]
526		#[cfg(not(tarpaulin))]
527		fn verify_usize() {
528			verify_for_type::<usize, HiLo>(cfg!(feature = "verbose"));
529		}
530	}
531}