rustls/msgs/deframer/
buffers.rs

1use alloc::vec::Vec;
2use core::mem;
3use core::ops::Range;
4#[cfg(feature = "std")]
5use std::io;
6
7#[cfg(feature = "std")]
8use crate::msgs::message::MAX_WIRE_SIZE;
9
10/// Conversion from a slice within a larger buffer into
11/// a `Range` offset within.
12#[derive(Debug)]
13pub(crate) struct Locator {
14    bounds: Range<*const u8>,
15}
16
17impl Locator {
18    #[inline]
19    pub(crate) fn new(slice: &[u8]) -> Self {
20        Self {
21            bounds: slice.as_ptr_range(),
22        }
23    }
24
25    #[inline]
26    pub(crate) fn locate(&self, slice: &[u8]) -> Range<usize> {
27        let bounds = slice.as_ptr_range();
28        debug_assert!(self.fully_contains(slice));
29        let start = bounds.start as usize - self.bounds.start as usize;
30        let len = bounds.end as usize - bounds.start as usize;
31        Range {
32            start,
33            end: start + len,
34        }
35    }
36
37    #[inline]
38    pub(crate) fn fully_contains(&self, slice: &[u8]) -> bool {
39        let bounds = slice.as_ptr_range();
40        bounds.start >= self.bounds.start && bounds.end <= self.bounds.end
41    }
42}
43
44/// Conversion from a `Range` offset to the original slice.
45pub(crate) struct Delocator<'b> {
46    slice: &'b [u8],
47}
48
49impl<'b> Delocator<'b> {
50    #[inline]
51    pub(crate) fn new(slice: &'b [u8]) -> Self {
52        Self { slice }
53    }
54
55    #[inline]
56    pub(crate) fn slice_from_range(&'_ self, range: &Range<usize>) -> &'b [u8] {
57        // safety: this unwrap is safe so long as `range` came from `locate()`
58        // for the same buffer
59        self.slice.get(range.clone()).unwrap()
60    }
61
62    #[inline]
63    pub(crate) fn locator(self) -> Locator {
64        Locator::new(self.slice)
65    }
66}
67
68/// Reordering the underlying buffer based on ranges.
69pub(crate) struct Coalescer<'b> {
70    slice: &'b mut [u8],
71}
72
73impl<'b> Coalescer<'b> {
74    #[inline]
75    pub(crate) fn new(slice: &'b mut [u8]) -> Self {
76        Self { slice }
77    }
78
79    #[inline]
80    pub(crate) fn copy_within(&mut self, from: Range<usize>, to: Range<usize>) {
81        debug_assert!(from.len() == to.len());
82        debug_assert!(self.slice.get(from.clone()).is_some());
83        debug_assert!(self.slice.get(to.clone()).is_some());
84        self.slice.copy_within(from, to.start);
85    }
86
87    #[inline]
88    pub(crate) fn delocator(self) -> Delocator<'b> {
89        Delocator::new(self.slice)
90    }
91}
92
93/// Accounting structure tracking progress in parsing a single buffer.
94#[derive(Clone, Debug)]
95pub(crate) struct BufferProgress {
96    /// Prefix of the buffer that has been processed so far.
97    ///
98    /// `processed` may exceed `discard`, that means we have parsed
99    /// some buffer, but are still using it.  This happens due to
100    /// in-place decryption of incoming records, and in-place
101    /// reassembly of handshake messages.
102    ///
103    /// 0 <= processed <= len
104    processed: usize,
105
106    /// Prefix of the buffer that can be removed.
107    ///
108    /// If `discard` exceeds `processed`, that means we are ignoring
109    /// data without processing it.
110    ///
111    /// 0 <= discard <= len
112    discard: usize,
113}
114
115impl BufferProgress {
116    pub(super) fn new(processed: usize) -> Self {
117        Self {
118            processed,
119            discard: 0,
120        }
121    }
122
123    #[inline]
124    pub(crate) fn add_discard(&mut self, discard: usize) {
125        self.discard += discard;
126    }
127
128    #[inline]
129    pub(crate) fn add_processed(&mut self, processed: usize) {
130        self.processed += processed;
131    }
132
133    #[inline]
134    pub(crate) fn take_discard(&mut self) -> usize {
135        // the caller is about to discard `discard` bytes
136        // from the front of the buffer.  adjust `processed`
137        // down by the same amount.
138        self.processed = self
139            .processed
140            .saturating_sub(self.discard);
141        mem::take(&mut self.discard)
142    }
143
144    #[inline]
145    pub(crate) fn processed(&self) -> usize {
146        self.processed
147    }
148}
149
150#[derive(Default, Debug)]
151pub(crate) struct DeframerVecBuffer {
152    /// Buffer of data read from the socket, in the process of being parsed into messages.
153    ///
154    /// For buffer size management, checkout out the [`DeframerVecBuffer::prepare_read()`] method.
155    buf: Vec<u8>,
156
157    /// What size prefix of `buf` is used.
158    used: usize,
159}
160
161impl DeframerVecBuffer {
162    /// Discard `taken` bytes from the start of our buffer.
163    pub(crate) fn discard(&mut self, taken: usize) {
164        #[allow(clippy::comparison_chain)]
165        if taken < self.used {
166            /* Before:
167             * +----------+----------+----------+
168             * | taken    | pending  |xxxxxxxxxx|
169             * +----------+----------+----------+
170             * 0          ^ taken    ^ self.used
171             *
172             * After:
173             * +----------+----------+----------+
174             * | pending  |xxxxxxxxxxxxxxxxxxxxx|
175             * +----------+----------+----------+
176             * 0          ^ self.used
177             */
178
179            self.buf
180                .copy_within(taken..self.used, 0);
181            self.used -= taken;
182        } else if taken >= self.used {
183            self.used = 0;
184        }
185    }
186
187    pub(crate) fn filled_mut(&mut self) -> &mut [u8] {
188        &mut self.buf[..self.used]
189    }
190
191    pub(crate) fn filled(&self) -> &[u8] {
192        &self.buf[..self.used]
193    }
194}
195
196#[cfg(feature = "std")]
197impl DeframerVecBuffer {
198    /// Read some bytes from `rd`, and add them to the buffer.
199    pub(crate) fn read(&mut self, rd: &mut dyn io::Read, in_handshake: bool) -> io::Result<usize> {
200        if let Err(err) = self.prepare_read(in_handshake) {
201            return Err(io::Error::new(io::ErrorKind::InvalidData, err));
202        }
203
204        // Try to do the largest reads possible. Note that if
205        // we get a message with a length field out of range here,
206        // we do a zero length read.  That looks like an EOF to
207        // the next layer up, which is fine.
208        let new_bytes = rd.read(&mut self.buf[self.used..])?;
209        self.used += new_bytes;
210        Ok(new_bytes)
211    }
212
213    /// Resize the internal `buf` if necessary for reading more bytes.
214    fn prepare_read(&mut self, is_joining_hs: bool) -> Result<(), &'static str> {
215        /// TLS allows for handshake messages of up to 16MB.  We
216        /// restrict that to 64KB to limit potential for denial-of-
217        /// service.
218        const MAX_HANDSHAKE_SIZE: u32 = 0xffff;
219
220        const READ_SIZE: usize = 4096;
221
222        // We allow a maximum of 64k of buffered data for handshake messages only. Enforce this
223        // by varying the maximum allowed buffer size here based on whether a prefix of a
224        // handshake payload is currently being buffered. Given that the first read of such a
225        // payload will only ever be 4k bytes, the next time we come around here we allow a
226        // larger buffer size. Once the large message and any following handshake messages in
227        // the same flight have been consumed, `pop()` will call `discard()` to reset `used`.
228        // At this point, the buffer resizing logic below should reduce the buffer size.
229        let allow_max = match is_joining_hs {
230            true => MAX_HANDSHAKE_SIZE as usize,
231            false => MAX_WIRE_SIZE,
232        };
233
234        if self.used >= allow_max {
235            return Err("message buffer full");
236        }
237
238        // If we can and need to increase the buffer size to allow a 4k read, do so. After
239        // dealing with a large handshake message (exceeding `OutboundOpaqueMessage::MAX_WIRE_SIZE`),
240        // make sure to reduce the buffer size again (large messages should be rare).
241        // Also, reduce the buffer size if there are neither full nor partial messages in it,
242        // which usually means that the other side suspended sending data.
243        let need_capacity = Ord::min(allow_max, self.used + READ_SIZE);
244        if need_capacity > self.buf.len() {
245            self.buf.resize(need_capacity, 0);
246        } else if self.used == 0 || self.buf.len() > allow_max {
247            self.buf.resize(need_capacity, 0);
248            self.buf.shrink_to(need_capacity);
249        }
250
251        Ok(())
252    }
253
254    /// Append `bytes` to the end of this buffer.
255    ///
256    /// Return a `Range` saying where it went.
257    pub(crate) fn extend(&mut self, bytes: &[u8]) -> Range<usize> {
258        let len = bytes.len();
259        let start = self.used;
260        let end = start + len;
261        if self.buf.len() < end {
262            self.buf.resize(end, 0);
263        }
264        self.buf[start..end].copy_from_slice(bytes);
265        self.used += len;
266        Range { start, end }
267    }
268}
269
270/// A borrowed version of [`DeframerVecBuffer`] that tracks discard operations
271#[derive(Debug)]
272pub(crate) struct DeframerSliceBuffer<'a> {
273    // a fully initialized buffer that will be deframed
274    buf: &'a mut [u8],
275    // number of bytes to discard from the front of `buf` at a later time
276    discard: usize,
277}
278
279impl<'a> DeframerSliceBuffer<'a> {
280    pub(crate) fn new(buf: &'a mut [u8]) -> Self {
281        Self { buf, discard: 0 }
282    }
283
284    /// Tracks a pending discard operation of `num_bytes`
285    pub(crate) fn queue_discard(&mut self, num_bytes: usize) {
286        self.discard += num_bytes;
287    }
288
289    pub(crate) fn pending_discard(&self) -> usize {
290        self.discard
291    }
292
293    pub(crate) fn filled_mut(&mut self) -> &mut [u8] {
294        &mut self.buf[self.discard..]
295    }
296}