1use std::mem::MaybeUninit;
2use std::{fmt, io};
34use crate::mdb::ffi;
56/// A structure that is used to improve the write speed in LMDB.
7///
8/// You must write the exact amount of bytes, no less, no more.
9pub struct ReservedSpace<'a> {
10 bytes: &'a mut [MaybeUninit<u8>],
11/// Number of bytes which have been written: all bytes in `0..written`.
12written: usize,
13/// Index of the byte which should be written next.
14 ///
15 /// # Safety
16 ///
17 /// To ensure there are no unwritten gaps in the buffer this should be kept in the range
18 /// `0..=written` at all times.
19write_head: usize,
20}
2122impl ReservedSpace<'_> {
23pub(crate) unsafe fn from_val<'a>(val: ffi::MDB_val) -> ReservedSpace<'a> {
24let len = val.mv_size;
25let ptr = val.mv_data;
2627 ReservedSpace {
28 bytes: std::slice::from_raw_parts_mut(ptr.cast(), len),
29 written: 0,
30 write_head: 0,
31 }
32 }
3334/// The total number of bytes that this memory buffer has.
35#[inline]
36pub fn size(&self) -> usize {
37self.bytes.len()
38 }
3940/// The remaining number of bytes that this memory buffer has.
41#[inline]
42pub fn remaining(&self) -> usize {
43self.bytes.len() - self.write_head
44 }
4546/// Get a slice of all the bytes that have previously been written.
47 ///
48 /// This can be used to write information which cannot be known until the very end of
49 /// serialization. For example, this method can be used to serialize a value, then compute a
50 /// checksum over the bytes, and then write that checksum to a header at the start of the
51 /// reserved space.
52#[inline]
53pub fn written_mut(&mut self) -> &mut [u8] {
54let ptr = self.bytes.as_mut_ptr();
55let len = self.written;
56unsafe { std::slice::from_raw_parts_mut(ptr.cast(), len) }
57 }
5859/// Fills the remaining reserved space with zeroes.
60 ///
61 /// This can be used together with [`written_mut`](Self::written_mut) to get a mutable view of
62 /// the entire reserved space.
63 ///
64 /// ### Note
65 ///
66 /// After calling this function, the entire space is considered to be filled and any
67 /// further attempt to [`write`](std::io::Write::write) anything else will fail.
68#[inline]
69pub fn fill_zeroes(&mut self) {
70self.bytes[self.write_head..].fill(MaybeUninit::new(0));
71self.written = self.bytes.len();
72self.write_head = self.bytes.len();
73 }
7475/// Get a slice of bytes corresponding to the *entire* reserved space.
76 ///
77 /// It is safe to write to any byte within the slice. However, for a write past the end of the
78 /// prevously written bytes to take effect, [`assume_written`](Self::assume_written) has to be
79 /// called to mark those bytes as initialized.
80 ///
81 /// # Safety
82 ///
83 /// As the memory comes from within the database itself, the bytes may not yet be
84 /// initialized. Thus, it is up to the caller to ensure that only initialized memory is read
85 /// (ensured by the [`MaybeUninit`] API).
86#[inline]
87pub fn as_uninit_mut(&mut self) -> &mut [MaybeUninit<u8>] {
88self.bytes
89 }
9091/// Marks the bytes in the range `0..len` as being initialized by advancing the internal write
92 /// pointer.
93 ///
94 /// # Safety
95 ///
96 /// The caller guarantees that all bytes in the range have been initialized.
97#[inline]
98pub unsafe fn assume_written(&mut self, len: usize) {
99debug_assert!(len <= self.bytes.len());
100self.written = len;
101self.write_head = len;
102 }
103}
104105impl io::Write for ReservedSpace<'_> {
106#[inline]
107fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
108self.write_all(buf)?;
109Ok(buf.len())
110 }
111112#[inline]
113fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
114let remaining = unsafe { self.bytes.get_unchecked_mut(self.write_head..) };
115116if buf.len() > remaining.len() {
117return Err(io::Error::from(io::ErrorKind::WriteZero));
118 }
119120unsafe {
121// SAFETY: we can always cast `T` -> `MaybeUninit<T>` as it's a transparent wrapper
122let buf_uninit = std::slice::from_raw_parts(buf.as_ptr().cast(), buf.len());
123 remaining.as_mut_ptr().copy_from_nonoverlapping(buf_uninit.as_ptr(), buf.len());
124 }
125126self.write_head += buf.len();
127self.written = usize::max(self.written, self.write_head);
128129Ok(())
130 }
131132#[inline(always)]
133fn flush(&mut self) -> io::Result<()> {
134Ok(())
135 }
136}
137138/// ## Note
139///
140/// May only seek within the previously written space.
141/// Attempts to do otherwise will result in an error.
142impl io::Seek for ReservedSpace<'_> {
143#[inline]
144fn seek(&mut self, pos: io::SeekFrom) -> io::Result<u64> {
145let (base, offset) = match pos {
146 io::SeekFrom::Start(start) => (start, 0),
147 io::SeekFrom::End(offset) => (self.written as u64, offset),
148 io::SeekFrom::Current(offset) => (self.write_head as u64, offset),
149 };
150151let Some(new_pos) = base.checked_add_signed(offset) else {
152return Err(std::io::Error::new(
153 std::io::ErrorKind::InvalidInput,
154"cannot seek before start of reserved space",
155 ));
156 };
157158if new_pos > self.written as u64 {
159return Err(std::io::Error::new(
160 std::io::ErrorKind::InvalidInput,
161"cannot seek past end of reserved space",
162 ));
163 }
164165self.write_head = new_pos as usize;
166167Ok(new_pos)
168 }
169170#[inline]
171fn rewind(&mut self) -> io::Result<()> {
172self.write_head = 0;
173Ok(())
174 }
175176#[inline]
177fn stream_position(&mut self) -> io::Result<u64> {
178Ok(self.write_head as u64)
179 }
180}
181182impl fmt::Debug for ReservedSpace<'_> {
183fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
184 f.debug_struct("ReservedSpace").finish()
185 }
186}