#![doc = include_str!("../doc/store.md")]
use core::{
cell::Cell,
fmt::Debug,
};
use funty::Integral;
use crate::{
access::*,
index::BitIdx,
mem::{
self,
BitRegister,
},
order::BitOrder,
};
#[doc = include_str!("../doc/store/BitStore.md")]
pub trait BitStore: 'static + Debug {
type Mem: BitRegister + BitStore<Mem = Self::Mem>;
type Access: BitAccess<Item = Self::Mem> + BitStore<Mem = Self::Mem>;
type Alias: BitStore<Mem = Self::Mem>;
type Unalias: BitStore<Mem = Self::Mem>;
const ZERO: Self;
fn new(value: Self::Mem) -> Self;
fn load_value(&self) -> Self::Mem;
fn store_value(&mut self, value: Self::Mem);
#[inline]
fn get_bit<O>(&self, index: BitIdx<Self::Mem>) -> bool
where O: BitOrder {
self.load_value() & index.select::<O>().into_inner()
!= <Self::Mem as Integral>::ZERO
}
const ALIGNED_TO_SIZE: [(); 1];
const ALIAS_WIDTH: [(); 1];
}
macro_rules! store {
($($base:ty => $safe:ty);+ $(;)?) => { $(
impl BitStore for $base {
type Mem = Self;
type Access = Cell<Self>;
type Alias = $safe;
type Unalias = Self;
const ZERO: Self = 0;
#[inline]
fn new(value: Self::Mem) -> Self { value }
#[inline]
fn load_value(&self) -> Self::Mem {
*self
}
#[inline]
fn store_value(&mut self, value: Self::Mem) {
*self = value;
}
const ALIGNED_TO_SIZE: [(); 1]
= [(); mem::aligned_to_size::<Self>() as usize];
const ALIAS_WIDTH: [(); 1]
= [(); mem::layout_eq::<Self, Self::Alias>() as usize];
}
impl BitStore for $safe {
type Mem = $base;
type Access = <Self as BitSafe>::Rad;
type Alias = Self;
type Unalias = $base;
const ZERO: Self = <Self as BitSafe>::ZERO;
#[inline]
fn new(value: Self::Mem) -> Self { <Self>::new(value) }
#[inline]
fn load_value(&self) -> Self::Mem {
self.load()
}
#[inline]
fn store_value(&mut self, value: Self::Mem) {
*self = Self::new(value);
}
const ALIGNED_TO_SIZE: [(); 1]
= [(); mem::aligned_to_size::<Self>() as usize];
const ALIAS_WIDTH: [(); 1] = [()];
}
impl BitStore for Cell<$base> {
type Mem = $base;
type Access = Self;
type Alias = Self;
type Unalias = Self;
const ZERO: Self = Self::new(0);
#[inline]
fn new(value: Self::Mem) -> Self { <Self>::new(value) }
#[inline]
fn load_value(&self) -> Self::Mem {
self.get()
}
#[inline]
fn store_value(&mut self, value: Self::Mem) {
*self = Self::new(value);
}
const ALIGNED_TO_SIZE: [(); 1]
= [(); mem::aligned_to_size::<Self>() as usize];
const ALIAS_WIDTH: [(); 1] = [()];
}
)+ };
}
store! {
u8 => BitSafeU8;
u16 => BitSafeU16;
u32 => BitSafeU32;
}
#[cfg(target_pointer_width = "64")]
store!(u64 => BitSafeU64);
store!(usize => BitSafeUsize);
macro_rules! atomic {
($($size:tt, $base:ty => $atom:ident);+ $(;)?) => { $(
radium::if_atomic!(if atomic($size) {
use core::sync::atomic::$atom;
impl BitStore for $atom {
type Mem = $base;
type Access = Self;
type Alias = Self;
type Unalias = Self;
const ZERO: Self = <Self>::new(0);
#[inline]
fn new(value: Self::Mem) -> Self { <Self>::new(value) }
#[inline]
fn load_value(&self) -> Self::Mem {
self.load(core::sync::atomic::Ordering::Relaxed)
}
#[inline]
fn store_value(&mut self, value: Self::Mem) {
*self = Self::new(value);
}
const ALIGNED_TO_SIZE: [(); 1]
= [(); mem::aligned_to_size::<Self>() as usize];
const ALIAS_WIDTH: [(); 1] = [()];
}
});
)+ };
}
atomic! {
8, u8 => AtomicU8;
16, u16 => AtomicU16;
32, u32 => AtomicU32;
}
#[cfg(target_pointer_width = "64")]
atomic!(64, u64 => AtomicU64);
atomic!(size, usize => AtomicUsize);
#[cfg(test)]
mod tests {
use static_assertions::*;
use super::*;
use crate::prelude::*;
#[test]
fn load_store() {
let mut word = 0usize;
word.store_value(39);
assert_eq!(word.load_value(), 39);
let mut safe = BitSafeUsize::new(word);
safe.store_value(57);
assert_eq!(safe.load_value(), 57);
let mut cell = Cell::new(0usize);
cell.store_value(39);
assert_eq!(cell.load_value(), 39);
radium::if_atomic!(if atomic(size) {
let mut atom = AtomicUsize::new(0);
atom.store_value(57);
assert_eq!(atom.load_value(), 57);
});
}
#[test]
fn unaliased_send_sync() {
assert_impl_all!(BitSlice<u8, LocalBits>: Send, Sync);
assert_impl_all!(BitSlice<u16, LocalBits>: Send, Sync);
assert_impl_all!(BitSlice<u32, LocalBits>: Send, Sync);
assert_impl_all!(BitSlice<usize, LocalBits>: Send, Sync);
#[cfg(target_pointer_width = "64")]
assert_impl_all!(BitSlice<u64, LocalBits>: Send, Sync);
}
#[test]
fn cell_unsend_unsync() {
assert_not_impl_any!(BitSlice<Cell<u8>, LocalBits>: Send, Sync);
assert_not_impl_any!(BitSlice<Cell<u16>, LocalBits>: Send, Sync);
assert_not_impl_any!(BitSlice<Cell<u32>, LocalBits>: Send, Sync);
assert_not_impl_any!(BitSlice<Cell<usize>, LocalBits>: Send, Sync);
#[cfg(target_pointer_width = "64")]
assert_not_impl_any!(BitSlice<Cell<u64>, LocalBits>: Send, Sync);
}
#[test]
#[cfg(not(feature = "atomic"))]
fn aliased_non_atomic_unsend_unsync() {
assert_not_impl_any!(BitSlice<BitSafeU8, LocalBits>: Send, Sync);
assert_not_impl_any!(BitSlice<BitSafeU16, LocalBits>: Send, Sync);
assert_not_impl_any!(BitSlice<BitSafeU32, LocalBits>: Send, Sync);
assert_not_impl_any!(BitSlice<BitSafeUsize, LocalBits>: Send, Sync);
#[cfg(target_pointer_width = "64")]
assert_not_impl_any!(BitSlice<BitSafeU64, LocalBits>: Send, Sync);
}
#[test]
#[cfg(feature = "atomic")]
fn aliased_atomic_send_sync() {
assert_impl_all!(BitSlice<AtomicU8, LocalBits>: Send, Sync);
assert_impl_all!(BitSlice<AtomicU16, LocalBits>: Send, Sync);
assert_impl_all!(BitSlice<AtomicU32, LocalBits>: Send, Sync);
assert_impl_all!(BitSlice<AtomicUsize, LocalBits>: Send, Sync);
#[cfg(target_pointer_width = "64")]
assert_impl_all!(BitSlice<AtomicU64, LocalBits>: Send, Sync);
}
}