redb/tree_store/page_store/
in_memory_backend.rs1use crate::StorageBackend;
2use std::io;
3use std::sync::*;
4
5#[derive(Debug, Default)]
7pub struct InMemoryBackend(RwLock<Vec<u8>>);
8
9impl InMemoryBackend {
10 fn out_of_range() -> io::Error {
11 io::Error::new(io::ErrorKind::InvalidInput, "Index out-of-range.")
12 }
13}
14
15impl InMemoryBackend {
16 pub fn new() -> Self {
18 Self::default()
19 }
20
21 fn read(&self) -> RwLockReadGuard<'_, Vec<u8>> {
23 self.0.read().expect("Could not acquire read lock.")
24 }
25
26 fn write(&self) -> RwLockWriteGuard<'_, Vec<u8>> {
28 self.0.write().expect("Could not acquire write lock.")
29 }
30}
31
32impl StorageBackend for InMemoryBackend {
33 fn len(&self) -> Result<u64, io::Error> {
34 Ok(self.read().len() as u64)
35 }
36
37 fn read(&self, offset: u64, len: usize) -> Result<Vec<u8>, io::Error> {
38 let guard = self.read();
39 let offset = usize::try_from(offset).map_err(|_| Self::out_of_range())?;
40 if offset + len <= guard.len() {
41 Ok(guard[offset..offset + len].to_owned())
42 } else {
43 Err(Self::out_of_range())
44 }
45 }
46
47 fn set_len(&self, len: u64) -> Result<(), io::Error> {
48 let mut guard = self.write();
49 let len = usize::try_from(len).map_err(|_| Self::out_of_range())?;
50 if guard.len() < len {
51 let additional = len - guard.len();
52 guard.reserve(additional);
53 for _ in 0..additional {
54 guard.push(0);
55 }
56 } else {
57 guard.truncate(len);
58 }
59
60 Ok(())
61 }
62
63 fn sync_data(&self, _: bool) -> Result<(), io::Error> {
64 Ok(())
65 }
66
67 fn write(&self, offset: u64, data: &[u8]) -> Result<(), io::Error> {
68 let mut guard = self.write();
69 let offset = usize::try_from(offset).map_err(|_| Self::out_of_range())?;
70 if offset + data.len() <= guard.len() {
71 guard[offset..offset + data.len()].copy_from_slice(data);
72 Ok(())
73 } else {
74 Err(Self::out_of_range())
75 }
76 }
77}