heed/
reserved_space.rs

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
use std::mem::MaybeUninit;
use std::{fmt, io};

use crate::mdb::ffi;

/// A structure that is used to improve the write speed in LMDB.
///
/// You must write the exact amount of bytes, no less, no more.
pub struct ReservedSpace<'a> {
    bytes: &'a mut [MaybeUninit<u8>],
    /// Number of bytes which have been written: all bytes in `0..written`.
    written: usize,
    /// Index of the byte which should be written next.
    ///
    /// # Safety
    ///
    /// To ensure there are no unwritten gaps in the buffer this should be kept in the range
    /// `0..=written` at all times.
    write_head: usize,
}

impl ReservedSpace<'_> {
    pub(crate) unsafe fn from_val<'a>(val: ffi::MDB_val) -> ReservedSpace<'a> {
        let len = val.mv_size;
        let ptr = val.mv_data;

        ReservedSpace {
            bytes: std::slice::from_raw_parts_mut(ptr.cast(), len),
            written: 0,
            write_head: 0,
        }
    }

    /// The total number of bytes that this memory buffer has.
    #[inline]
    pub fn size(&self) -> usize {
        self.bytes.len()
    }

    /// The remaining number of bytes that this memory buffer has.
    #[inline]
    pub fn remaining(&self) -> usize {
        self.bytes.len() - self.write_head
    }

    /// Get a slice of all the bytes that have previously been written.
    ///
    /// This can be used to write information which cannot be known until the very end of
    /// serialization. For example, this method can be used to serialize a value, then compute a
    /// checksum over the bytes, and then write that checksum to a header at the start of the
    /// reserved space.
    #[inline]
    pub fn written_mut(&mut self) -> &mut [u8] {
        let ptr = self.bytes.as_mut_ptr();
        let len = self.written;
        unsafe { std::slice::from_raw_parts_mut(ptr.cast(), len) }
    }

    /// Fills the remaining reserved space with zeroes.
    ///
    /// This can be used together with [`written_mut`](Self::written_mut) to get a mutable view of
    /// the entire reserved space.
    ///
    /// ### Note
    ///
    /// After calling this function, the entire space is considered to be filled and any
    /// further attempt to [`write`](std::io::Write::write) anything else will fail.
    #[inline]
    pub fn fill_zeroes(&mut self) {
        self.bytes[self.write_head..].fill(MaybeUninit::new(0));
        self.written = self.bytes.len();
        self.write_head = self.bytes.len();
    }

    /// Get a slice of bytes corresponding to the *entire* reserved space.
    ///
    /// It is safe to write to any byte within the slice. However, for a write past the end of the
    /// prevously written bytes to take effect, [`assume_written`](Self::assume_written) has to be
    /// called to mark those bytes as initialized.
    ///
    /// # Safety
    ///
    /// As the memory comes from within the database itself, the bytes may not yet be
    /// initialized. Thus, it is up to the caller to ensure that only initialized memory is read
    /// (ensured by the [`MaybeUninit`] API).
    #[inline]
    pub fn as_uninit_mut(&mut self) -> &mut [MaybeUninit<u8>] {
        self.bytes
    }

    /// Marks the bytes in the range `0..len` as being initialized by advancing the internal write
    /// pointer.
    ///
    /// # Safety
    ///
    /// The caller guarantees that all bytes in the range have been initialized.
    #[inline]
    pub unsafe fn assume_written(&mut self, len: usize) {
        debug_assert!(len <= self.bytes.len());
        self.written = len;
        self.write_head = len;
    }
}

impl io::Write for ReservedSpace<'_> {
    #[inline]
    fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
        self.write_all(buf)?;
        Ok(buf.len())
    }

    #[inline]
    fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
        let remaining = unsafe { self.bytes.get_unchecked_mut(self.write_head..) };

        if buf.len() > remaining.len() {
            return Err(io::Error::from(io::ErrorKind::WriteZero));
        }

        unsafe {
            // SAFETY: we can always cast `T` -> `MaybeUninit<T>` as it's a transparent wrapper
            let buf_uninit = std::slice::from_raw_parts(buf.as_ptr().cast(), buf.len());
            remaining.as_mut_ptr().copy_from_nonoverlapping(buf_uninit.as_ptr(), buf.len());
        }

        self.write_head += buf.len();
        self.written = usize::max(self.written, self.write_head);

        Ok(())
    }

    #[inline(always)]
    fn flush(&mut self) -> io::Result<()> {
        Ok(())
    }
}

/// ## Note
///
/// May only seek within the previously written space.
/// Attempts to do otherwise will result in an error.
impl io::Seek for ReservedSpace<'_> {
    #[inline]
    fn seek(&mut self, pos: io::SeekFrom) -> io::Result<u64> {
        let (base, offset) = match pos {
            io::SeekFrom::Start(start) => (start, 0),
            io::SeekFrom::End(offset) => (self.written as u64, offset),
            io::SeekFrom::Current(offset) => (self.write_head as u64, offset),
        };

        let Some(new_pos) = base.checked_add_signed(offset) else {
            return Err(std::io::Error::new(
                std::io::ErrorKind::InvalidInput,
                "cannot seek before start of reserved space",
            ));
        };

        if new_pos > self.written as u64 {
            return Err(std::io::Error::new(
                std::io::ErrorKind::InvalidInput,
                "cannot seek past end of reserved space",
            ));
        }

        self.write_head = new_pos as usize;

        Ok(new_pos)
    }

    #[inline]
    fn rewind(&mut self) -> io::Result<()> {
        self.write_head = 0;
        Ok(())
    }

    #[inline]
    fn stream_position(&mut self) -> io::Result<u64> {
        Ok(self.write_head as u64)
    }
}

impl fmt::Debug for ReservedSpace<'_> {
    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
        f.debug_struct("ReservedSpace").finish()
    }
}