h2/proto/streams/
streams.rs

1use super::recv::RecvHeaderBlockError;
2use super::store::{self, Entry, Resolve, Store};
3use super::{Buffer, Config, Counts, Prioritized, Recv, Send, Stream, StreamId};
4use crate::codec::{Codec, SendError, UserError};
5use crate::ext::Protocol;
6use crate::frame::{self, Frame, Reason};
7use crate::proto::{peer, Error, Initiator, Open, Peer, WindowSize};
8use crate::{client, proto, server};
9
10use bytes::{Buf, Bytes};
11use http::{HeaderMap, Request, Response};
12use std::task::{Context, Poll, Waker};
13use tokio::io::AsyncWrite;
14
15use std::sync::{Arc, Mutex};
16use std::{fmt, io};
17
18#[derive(Debug)]
19pub(crate) struct Streams<B, P>
20where
21    P: Peer,
22{
23    /// Holds most of the connection and stream related state for processing
24    /// HTTP/2 frames associated with streams.
25    inner: Arc<Mutex<Inner>>,
26
27    /// This is the queue of frames to be written to the wire. This is split out
28    /// to avoid requiring a `B` generic on all public API types even if `B` is
29    /// not technically required.
30    ///
31    /// Currently, splitting this out requires a second `Arc` + `Mutex`.
32    /// However, it should be possible to avoid this duplication with a little
33    /// bit of unsafe code. This optimization has been postponed until it has
34    /// been shown to be necessary.
35    send_buffer: Arc<SendBuffer<B>>,
36
37    _p: ::std::marker::PhantomData<P>,
38}
39
40// Like `Streams` but with a `peer::Dyn` field instead of a static `P: Peer` type parameter.
41// Ensures that the methods only get one instantiation, instead of two (client and server)
42#[derive(Debug)]
43pub(crate) struct DynStreams<'a, B> {
44    inner: &'a Mutex<Inner>,
45
46    send_buffer: &'a SendBuffer<B>,
47
48    peer: peer::Dyn,
49}
50
51/// Reference to the stream state
52#[derive(Debug)]
53pub(crate) struct StreamRef<B> {
54    opaque: OpaqueStreamRef,
55    send_buffer: Arc<SendBuffer<B>>,
56}
57
58/// Reference to the stream state that hides the send data chunk generic
59pub(crate) struct OpaqueStreamRef {
60    inner: Arc<Mutex<Inner>>,
61    key: store::Key,
62}
63
64/// Fields needed to manage state related to managing the set of streams. This
65/// is mostly split out to make ownership happy.
66///
67/// TODO: better name
68#[derive(Debug)]
69struct Inner {
70    /// Tracks send & recv stream concurrency.
71    counts: Counts,
72
73    /// Connection level state and performs actions on streams
74    actions: Actions,
75
76    /// Stores stream state
77    store: Store,
78
79    /// The number of stream refs to this shared state.
80    refs: usize,
81}
82
83#[derive(Debug)]
84struct Actions {
85    /// Manages state transitions initiated by receiving frames
86    recv: Recv,
87
88    /// Manages state transitions initiated by sending frames
89    send: Send,
90
91    /// Task that calls `poll_complete`.
92    task: Option<Waker>,
93
94    /// If the connection errors, a copy is kept for any StreamRefs.
95    conn_error: Option<proto::Error>,
96}
97
98/// Contains the buffer of frames to be written to the wire.
99#[derive(Debug)]
100struct SendBuffer<B> {
101    inner: Mutex<Buffer<Frame<B>>>,
102}
103
104// ===== impl Streams =====
105
106impl<B, P> Streams<B, P>
107where
108    B: Buf,
109    P: Peer,
110{
111    pub fn new(config: Config) -> Self {
112        let peer = P::r#dyn();
113
114        Streams {
115            inner: Inner::new(peer, config),
116            send_buffer: Arc::new(SendBuffer::new()),
117            _p: ::std::marker::PhantomData,
118        }
119    }
120
121    pub fn set_target_connection_window_size(&mut self, size: WindowSize) -> Result<(), Reason> {
122        let mut me = self.inner.lock().unwrap();
123        let me = &mut *me;
124
125        me.actions
126            .recv
127            .set_target_connection_window(size, &mut me.actions.task)
128    }
129
130    pub fn next_incoming(&mut self) -> Option<StreamRef<B>> {
131        let mut me = self.inner.lock().unwrap();
132        let me = &mut *me;
133        me.actions.recv.next_incoming(&mut me.store).map(|key| {
134            let stream = &mut me.store.resolve(key);
135            tracing::trace!(
136                "next_incoming; id={:?}, state={:?}",
137                stream.id,
138                stream.state
139            );
140            // TODO: ideally, OpaqueStreamRefs::new would do this, but we're holding
141            // the lock, so it can't.
142            me.refs += 1;
143
144            // Pending-accepted remotely-reset streams are counted.
145            if stream.state.is_remote_reset() {
146                me.counts.dec_num_remote_reset_streams();
147            }
148
149            StreamRef {
150                opaque: OpaqueStreamRef::new(self.inner.clone(), stream),
151                send_buffer: self.send_buffer.clone(),
152            }
153        })
154    }
155
156    pub fn send_pending_refusal<T>(
157        &mut self,
158        cx: &mut Context,
159        dst: &mut Codec<T, Prioritized<B>>,
160    ) -> Poll<io::Result<()>>
161    where
162        T: AsyncWrite + Unpin,
163    {
164        let mut me = self.inner.lock().unwrap();
165        let me = &mut *me;
166        me.actions.recv.send_pending_refusal(cx, dst)
167    }
168
169    pub fn clear_expired_reset_streams(&mut self) {
170        let mut me = self.inner.lock().unwrap();
171        let me = &mut *me;
172        me.actions
173            .recv
174            .clear_expired_reset_streams(&mut me.store, &mut me.counts);
175    }
176
177    pub fn poll_complete<T>(
178        &mut self,
179        cx: &mut Context,
180        dst: &mut Codec<T, Prioritized<B>>,
181    ) -> Poll<io::Result<()>>
182    where
183        T: AsyncWrite + Unpin,
184    {
185        let mut me = self.inner.lock().unwrap();
186        me.poll_complete(&self.send_buffer, cx, dst)
187    }
188
189    pub fn apply_remote_settings(
190        &mut self,
191        frame: &frame::Settings,
192        is_initial: bool,
193    ) -> Result<(), Error> {
194        let mut me = self.inner.lock().unwrap();
195        let me = &mut *me;
196
197        let mut send_buffer = self.send_buffer.inner.lock().unwrap();
198        let send_buffer = &mut *send_buffer;
199
200        me.counts.apply_remote_settings(frame, is_initial);
201
202        me.actions.send.apply_remote_settings(
203            frame,
204            send_buffer,
205            &mut me.store,
206            &mut me.counts,
207            &mut me.actions.task,
208        )
209    }
210
211    pub fn apply_local_settings(&mut self, frame: &frame::Settings) -> Result<(), Error> {
212        let mut me = self.inner.lock().unwrap();
213        let me = &mut *me;
214
215        me.actions.recv.apply_local_settings(frame, &mut me.store)
216    }
217
218    pub fn send_request(
219        &mut self,
220        mut request: Request<()>,
221        end_of_stream: bool,
222        pending: Option<&OpaqueStreamRef>,
223    ) -> Result<(StreamRef<B>, bool), SendError> {
224        use super::stream::ContentLength;
225        use http::Method;
226
227        let protocol = request.extensions_mut().remove::<Protocol>();
228
229        // Clear before taking lock, incase extensions contain a StreamRef.
230        request.extensions_mut().clear();
231
232        // TODO: There is a hazard with assigning a stream ID before the
233        // prioritize layer. If prioritization reorders new streams, this
234        // implicitly closes the earlier stream IDs.
235        //
236        // See: hyperium/h2#11
237        let mut me = self.inner.lock().unwrap();
238        let me = &mut *me;
239
240        let mut send_buffer = self.send_buffer.inner.lock().unwrap();
241        let send_buffer = &mut *send_buffer;
242
243        me.actions.ensure_no_conn_error()?;
244        me.actions.send.ensure_next_stream_id()?;
245
246        // The `pending` argument is provided by the `Client`, and holds
247        // a store `Key` of a `Stream` that may have been not been opened
248        // yet.
249        //
250        // If that stream is still pending, the Client isn't allowed to
251        // queue up another pending stream. They should use `poll_ready`.
252        if let Some(stream) = pending {
253            if me.store.resolve(stream.key).is_pending_open {
254                return Err(UserError::Rejected.into());
255            }
256        }
257
258        if me.counts.peer().is_server() {
259            // Servers cannot open streams. PushPromise must first be reserved.
260            return Err(UserError::UnexpectedFrameType.into());
261        }
262
263        let stream_id = me.actions.send.open()?;
264
265        let mut stream = Stream::new(
266            stream_id,
267            me.actions.send.init_window_sz(),
268            me.actions.recv.init_window_sz(),
269        );
270
271        if *request.method() == Method::HEAD {
272            stream.content_length = ContentLength::Head;
273        }
274
275        // Convert the message
276        let headers =
277            client::Peer::convert_send_message(stream_id, request, protocol, end_of_stream)?;
278
279        let mut stream = me.store.insert(stream.id, stream);
280
281        let sent = me.actions.send.send_headers(
282            headers,
283            send_buffer,
284            &mut stream,
285            &mut me.counts,
286            &mut me.actions.task,
287        );
288
289        // send_headers can return a UserError, if it does,
290        // we should forget about this stream.
291        if let Err(err) = sent {
292            stream.unlink();
293            stream.remove();
294            return Err(err.into());
295        }
296
297        // Given that the stream has been initialized, it should not be in the
298        // closed state.
299        debug_assert!(!stream.state.is_closed());
300
301        // TODO: ideally, OpaqueStreamRefs::new would do this, but we're holding
302        // the lock, so it can't.
303        me.refs += 1;
304
305        let is_full = me.counts.next_send_stream_will_reach_capacity();
306        Ok((
307            StreamRef {
308                opaque: OpaqueStreamRef::new(self.inner.clone(), &mut stream),
309                send_buffer: self.send_buffer.clone(),
310            },
311            is_full,
312        ))
313    }
314
315    pub(crate) fn is_extended_connect_protocol_enabled(&self) -> bool {
316        self.inner
317            .lock()
318            .unwrap()
319            .actions
320            .send
321            .is_extended_connect_protocol_enabled()
322    }
323
324    pub fn current_max_send_streams(&self) -> usize {
325        let me = self.inner.lock().unwrap();
326        me.counts.max_send_streams()
327    }
328
329    pub fn current_max_recv_streams(&self) -> usize {
330        let me = self.inner.lock().unwrap();
331        me.counts.max_recv_streams()
332    }
333}
334
335impl<B> DynStreams<'_, B> {
336    pub fn is_buffer_empty(&self) -> bool {
337        self.send_buffer.is_empty()
338    }
339
340    pub fn is_server(&self) -> bool {
341        self.peer.is_server()
342    }
343
344    pub fn recv_headers(&mut self, frame: frame::Headers) -> Result<(), Error> {
345        let mut me = self.inner.lock().unwrap();
346
347        me.recv_headers(self.peer, self.send_buffer, frame)
348    }
349
350    pub fn recv_data(&mut self, frame: frame::Data) -> Result<(), Error> {
351        let mut me = self.inner.lock().unwrap();
352        me.recv_data(self.peer, self.send_buffer, frame)
353    }
354
355    pub fn recv_reset(&mut self, frame: frame::Reset) -> Result<(), Error> {
356        let mut me = self.inner.lock().unwrap();
357
358        me.recv_reset(self.send_buffer, frame)
359    }
360
361    /// Notify all streams that a connection-level error happened.
362    pub fn handle_error(&mut self, err: proto::Error) -> StreamId {
363        let mut me = self.inner.lock().unwrap();
364        me.handle_error(self.send_buffer, err)
365    }
366
367    pub fn recv_go_away(&mut self, frame: &frame::GoAway) -> Result<(), Error> {
368        let mut me = self.inner.lock().unwrap();
369        me.recv_go_away(self.send_buffer, frame)
370    }
371
372    pub fn last_processed_id(&self) -> StreamId {
373        self.inner.lock().unwrap().actions.recv.last_processed_id()
374    }
375
376    pub fn recv_window_update(&mut self, frame: frame::WindowUpdate) -> Result<(), Error> {
377        let mut me = self.inner.lock().unwrap();
378        me.recv_window_update(self.send_buffer, frame)
379    }
380
381    pub fn recv_push_promise(&mut self, frame: frame::PushPromise) -> Result<(), Error> {
382        let mut me = self.inner.lock().unwrap();
383        me.recv_push_promise(self.send_buffer, frame)
384    }
385
386    pub fn recv_eof(&mut self, clear_pending_accept: bool) -> Result<(), ()> {
387        let mut me = self.inner.lock().map_err(|_| ())?;
388        me.recv_eof(self.send_buffer, clear_pending_accept)
389    }
390
391    pub fn send_reset(&mut self, id: StreamId, reason: Reason) {
392        let mut me = self.inner.lock().unwrap();
393        me.send_reset(self.send_buffer, id, reason)
394    }
395
396    pub fn send_go_away(&mut self, last_processed_id: StreamId) {
397        let mut me = self.inner.lock().unwrap();
398        me.actions.recv.go_away(last_processed_id);
399    }
400}
401
402impl Inner {
403    fn new(peer: peer::Dyn, config: Config) -> Arc<Mutex<Self>> {
404        Arc::new(Mutex::new(Inner {
405            counts: Counts::new(peer, &config),
406            actions: Actions {
407                recv: Recv::new(peer, &config),
408                send: Send::new(&config),
409                task: None,
410                conn_error: None,
411            },
412            store: Store::new(),
413            refs: 1,
414        }))
415    }
416
417    fn recv_headers<B>(
418        &mut self,
419        peer: peer::Dyn,
420        send_buffer: &SendBuffer<B>,
421        frame: frame::Headers,
422    ) -> Result<(), Error> {
423        let id = frame.stream_id();
424
425        // The GOAWAY process has begun. All streams with a greater ID than
426        // specified as part of GOAWAY should be ignored.
427        if id > self.actions.recv.max_stream_id() {
428            tracing::trace!(
429                "id ({:?}) > max_stream_id ({:?}), ignoring HEADERS",
430                id,
431                self.actions.recv.max_stream_id()
432            );
433            return Ok(());
434        }
435
436        let key = match self.store.find_entry(id) {
437            Entry::Occupied(e) => e.key(),
438            Entry::Vacant(e) => {
439                // Client: it's possible to send a request, and then send
440                // a RST_STREAM while the response HEADERS were in transit.
441                //
442                // Server: we can't reset a stream before having received
443                // the request headers, so don't allow.
444                if !peer.is_server() {
445                    // This may be response headers for a stream we've already
446                    // forgotten about...
447                    if self.actions.may_have_forgotten_stream(peer, id) {
448                        tracing::debug!(
449                            "recv_headers for old stream={:?}, sending STREAM_CLOSED",
450                            id,
451                        );
452                        return Err(Error::library_reset(id, Reason::STREAM_CLOSED));
453                    }
454                }
455
456                match self
457                    .actions
458                    .recv
459                    .open(id, Open::Headers, &mut self.counts)?
460                {
461                    Some(stream_id) => {
462                        let stream = Stream::new(
463                            stream_id,
464                            self.actions.send.init_window_sz(),
465                            self.actions.recv.init_window_sz(),
466                        );
467
468                        e.insert(stream)
469                    }
470                    None => return Ok(()),
471                }
472            }
473        };
474
475        let stream = self.store.resolve(key);
476
477        if stream.state.is_local_error() {
478            // Locally reset streams must ignore frames "for some time".
479            // This is because the remote may have sent trailers before
480            // receiving the RST_STREAM frame.
481            tracing::trace!("recv_headers; ignoring trailers on {:?}", stream.id);
482            return Ok(());
483        }
484
485        let actions = &mut self.actions;
486        let mut send_buffer = send_buffer.inner.lock().unwrap();
487        let send_buffer = &mut *send_buffer;
488
489        self.counts.transition(stream, |counts, stream| {
490            tracing::trace!(
491                "recv_headers; stream={:?}; state={:?}",
492                stream.id,
493                stream.state
494            );
495
496            let res = if stream.state.is_recv_headers() {
497                match actions.recv.recv_headers(frame, stream, counts) {
498                    Ok(()) => Ok(()),
499                    Err(RecvHeaderBlockError::Oversize(resp)) => {
500                        if let Some(resp) = resp {
501                            let sent = actions.send.send_headers(
502                                resp, send_buffer, stream, counts, &mut actions.task);
503                            debug_assert!(sent.is_ok(), "oversize response should not fail");
504
505                            actions.send.schedule_implicit_reset(
506                                stream,
507                                Reason::PROTOCOL_ERROR,
508                                counts,
509                                &mut actions.task);
510
511                            actions.recv.enqueue_reset_expiration(stream, counts);
512
513                            Ok(())
514                        } else {
515                            Err(Error::library_reset(stream.id, Reason::PROTOCOL_ERROR))
516                        }
517                    },
518                    Err(RecvHeaderBlockError::State(err)) => Err(err),
519                }
520            } else {
521                if !frame.is_end_stream() {
522                    // Receiving trailers that don't set EOS is a "malformed"
523                    // message. Malformed messages are a stream error.
524                    proto_err!(stream: "recv_headers: trailers frame was not EOS; stream={:?}", stream.id);
525                    return Err(Error::library_reset(stream.id, Reason::PROTOCOL_ERROR));
526                }
527
528                actions.recv.recv_trailers(frame, stream)
529            };
530
531            actions.reset_on_recv_stream_err(send_buffer, stream, counts, res)
532        })
533    }
534
535    fn recv_data<B>(
536        &mut self,
537        peer: peer::Dyn,
538        send_buffer: &SendBuffer<B>,
539        frame: frame::Data,
540    ) -> Result<(), Error> {
541        let id = frame.stream_id();
542
543        let stream = match self.store.find_mut(&id) {
544            Some(stream) => stream,
545            None => {
546                // The GOAWAY process has begun. All streams with a greater ID
547                // than specified as part of GOAWAY should be ignored.
548                if id > self.actions.recv.max_stream_id() {
549                    tracing::trace!(
550                        "id ({:?}) > max_stream_id ({:?}), ignoring DATA",
551                        id,
552                        self.actions.recv.max_stream_id()
553                    );
554                    return Ok(());
555                }
556
557                if self.actions.may_have_forgotten_stream(peer, id) {
558                    tracing::debug!("recv_data for old stream={:?}, sending STREAM_CLOSED", id,);
559
560                    let sz = frame.payload().len();
561                    // This should have been enforced at the codec::FramedRead layer, so
562                    // this is just a sanity check.
563                    assert!(sz <= super::MAX_WINDOW_SIZE as usize);
564                    let sz = sz as WindowSize;
565
566                    self.actions.recv.ignore_data(sz)?;
567                    return Err(Error::library_reset(id, Reason::STREAM_CLOSED));
568                }
569
570                proto_err!(conn: "recv_data: stream not found; id={:?}", id);
571                return Err(Error::library_go_away(Reason::PROTOCOL_ERROR));
572            }
573        };
574
575        let actions = &mut self.actions;
576        let mut send_buffer = send_buffer.inner.lock().unwrap();
577        let send_buffer = &mut *send_buffer;
578
579        self.counts.transition(stream, |counts, stream| {
580            let sz = frame.payload().len();
581            let res = actions.recv.recv_data(frame, stream);
582
583            // Any stream error after receiving a DATA frame means
584            // we won't give the data to the user, and so they can't
585            // release the capacity. We do it automatically.
586            if let Err(Error::Reset(..)) = res {
587                actions
588                    .recv
589                    .release_connection_capacity(sz as WindowSize, &mut None);
590            }
591            actions.reset_on_recv_stream_err(send_buffer, stream, counts, res)
592        })
593    }
594
595    fn recv_reset<B>(
596        &mut self,
597        send_buffer: &SendBuffer<B>,
598        frame: frame::Reset,
599    ) -> Result<(), Error> {
600        let id = frame.stream_id();
601
602        if id.is_zero() {
603            proto_err!(conn: "recv_reset: invalid stream ID 0");
604            return Err(Error::library_go_away(Reason::PROTOCOL_ERROR));
605        }
606
607        // The GOAWAY process has begun. All streams with a greater ID than
608        // specified as part of GOAWAY should be ignored.
609        if id > self.actions.recv.max_stream_id() {
610            tracing::trace!(
611                "id ({:?}) > max_stream_id ({:?}), ignoring RST_STREAM",
612                id,
613                self.actions.recv.max_stream_id()
614            );
615            return Ok(());
616        }
617
618        let stream = match self.store.find_mut(&id) {
619            Some(stream) => stream,
620            None => {
621                // TODO: Are there other error cases?
622                self.actions
623                    .ensure_not_idle(self.counts.peer(), id)
624                    .map_err(Error::library_go_away)?;
625
626                return Ok(());
627            }
628        };
629
630        let mut send_buffer = send_buffer.inner.lock().unwrap();
631        let send_buffer = &mut *send_buffer;
632
633        let actions = &mut self.actions;
634
635        self.counts.transition(stream, |counts, stream| {
636            actions.recv.recv_reset(frame, stream, counts)?;
637            actions.send.handle_error(send_buffer, stream, counts);
638            assert!(stream.state.is_closed());
639            Ok(())
640        })
641    }
642
643    fn recv_window_update<B>(
644        &mut self,
645        send_buffer: &SendBuffer<B>,
646        frame: frame::WindowUpdate,
647    ) -> Result<(), Error> {
648        let id = frame.stream_id();
649
650        let mut send_buffer = send_buffer.inner.lock().unwrap();
651        let send_buffer = &mut *send_buffer;
652
653        if id.is_zero() {
654            self.actions
655                .send
656                .recv_connection_window_update(frame, &mut self.store, &mut self.counts)
657                .map_err(Error::library_go_away)?;
658        } else {
659            // The remote may send window updates for streams that the local now
660            // considers closed. It's ok...
661            if let Some(mut stream) = self.store.find_mut(&id) {
662                // This result is ignored as there is nothing to do when there
663                // is an error. The stream is reset by the function on error and
664                // the error is informational.
665                let _ = self.actions.send.recv_stream_window_update(
666                    frame.size_increment(),
667                    send_buffer,
668                    &mut stream,
669                    &mut self.counts,
670                    &mut self.actions.task,
671                );
672            } else {
673                self.actions
674                    .ensure_not_idle(self.counts.peer(), id)
675                    .map_err(Error::library_go_away)?;
676            }
677        }
678
679        Ok(())
680    }
681
682    fn handle_error<B>(&mut self, send_buffer: &SendBuffer<B>, err: proto::Error) -> StreamId {
683        let actions = &mut self.actions;
684        let counts = &mut self.counts;
685        let mut send_buffer = send_buffer.inner.lock().unwrap();
686        let send_buffer = &mut *send_buffer;
687
688        let last_processed_id = actions.recv.last_processed_id();
689
690        self.store.for_each(|stream| {
691            counts.transition(stream, |counts, stream| {
692                actions.recv.handle_error(&err, &mut *stream);
693                actions.send.handle_error(send_buffer, stream, counts);
694            })
695        });
696
697        actions.conn_error = Some(err);
698
699        last_processed_id
700    }
701
702    fn recv_go_away<B>(
703        &mut self,
704        send_buffer: &SendBuffer<B>,
705        frame: &frame::GoAway,
706    ) -> Result<(), Error> {
707        let actions = &mut self.actions;
708        let counts = &mut self.counts;
709        let mut send_buffer = send_buffer.inner.lock().unwrap();
710        let send_buffer = &mut *send_buffer;
711
712        let last_stream_id = frame.last_stream_id();
713
714        actions.send.recv_go_away(last_stream_id)?;
715
716        let err = Error::remote_go_away(frame.debug_data().clone(), frame.reason());
717
718        self.store.for_each(|stream| {
719            if stream.id > last_stream_id {
720                counts.transition(stream, |counts, stream| {
721                    actions.recv.handle_error(&err, &mut *stream);
722                    actions.send.handle_error(send_buffer, stream, counts);
723                })
724            }
725        });
726
727        actions.conn_error = Some(err);
728
729        Ok(())
730    }
731
732    fn recv_push_promise<B>(
733        &mut self,
734        send_buffer: &SendBuffer<B>,
735        frame: frame::PushPromise,
736    ) -> Result<(), Error> {
737        let id = frame.stream_id();
738        let promised_id = frame.promised_id();
739
740        // First, ensure that the initiating stream is still in a valid state.
741        let parent_key = match self.store.find_mut(&id) {
742            Some(stream) => {
743                // The GOAWAY process has begun. All streams with a greater ID
744                // than specified as part of GOAWAY should be ignored.
745                if id > self.actions.recv.max_stream_id() {
746                    tracing::trace!(
747                        "id ({:?}) > max_stream_id ({:?}), ignoring PUSH_PROMISE",
748                        id,
749                        self.actions.recv.max_stream_id()
750                    );
751                    return Ok(());
752                }
753
754                // The stream must be receive open
755                if !stream.state.ensure_recv_open()? {
756                    proto_err!(conn: "recv_push_promise: initiating stream is not opened");
757                    return Err(Error::library_go_away(Reason::PROTOCOL_ERROR));
758                }
759
760                stream.key()
761            }
762            None => {
763                proto_err!(conn: "recv_push_promise: initiating stream is in an invalid state");
764                return Err(Error::library_go_away(Reason::PROTOCOL_ERROR));
765            }
766        };
767
768        // TODO: Streams in the reserved states do not count towards the concurrency
769        // limit. However, it seems like there should be a cap otherwise this
770        // could grow in memory indefinitely.
771
772        // Ensure that we can reserve streams
773        self.actions.recv.ensure_can_reserve()?;
774
775        // Next, open the stream.
776        //
777        // If `None` is returned, then the stream is being refused. There is no
778        // further work to be done.
779        if self
780            .actions
781            .recv
782            .open(promised_id, Open::PushPromise, &mut self.counts)?
783            .is_none()
784        {
785            return Ok(());
786        }
787
788        // Try to handle the frame and create a corresponding key for the pushed stream
789        // this requires a bit of indirection to make the borrow checker happy.
790        let child_key: Option<store::Key> = {
791            // Create state for the stream
792            let stream = self.store.insert(promised_id, {
793                Stream::new(
794                    promised_id,
795                    self.actions.send.init_window_sz(),
796                    self.actions.recv.init_window_sz(),
797                )
798            });
799
800            let actions = &mut self.actions;
801
802            self.counts.transition(stream, |counts, stream| {
803                let stream_valid = actions.recv.recv_push_promise(frame, stream);
804
805                match stream_valid {
806                    Ok(()) => Ok(Some(stream.key())),
807                    _ => {
808                        let mut send_buffer = send_buffer.inner.lock().unwrap();
809                        actions
810                            .reset_on_recv_stream_err(
811                                &mut *send_buffer,
812                                stream,
813                                counts,
814                                stream_valid,
815                            )
816                            .map(|()| None)
817                    }
818                }
819            })?
820        };
821        // If we're successful, push the headers and stream...
822        if let Some(child) = child_key {
823            let mut ppp = self.store[parent_key].pending_push_promises.take();
824            ppp.push(&mut self.store.resolve(child));
825
826            let parent = &mut self.store.resolve(parent_key);
827            parent.pending_push_promises = ppp;
828            parent.notify_push();
829        };
830
831        Ok(())
832    }
833
834    fn recv_eof<B>(
835        &mut self,
836        send_buffer: &SendBuffer<B>,
837        clear_pending_accept: bool,
838    ) -> Result<(), ()> {
839        let actions = &mut self.actions;
840        let counts = &mut self.counts;
841        let mut send_buffer = send_buffer.inner.lock().unwrap();
842        let send_buffer = &mut *send_buffer;
843
844        if actions.conn_error.is_none() {
845            actions.conn_error = Some(
846                io::Error::new(
847                    io::ErrorKind::BrokenPipe,
848                    "connection closed because of a broken pipe",
849                )
850                .into(),
851            );
852        }
853
854        tracing::trace!("Streams::recv_eof");
855
856        self.store.for_each(|stream| {
857            counts.transition(stream, |counts, stream| {
858                actions.recv.recv_eof(stream);
859
860                // This handles resetting send state associated with the
861                // stream
862                actions.send.handle_error(send_buffer, stream, counts);
863            })
864        });
865
866        actions.clear_queues(clear_pending_accept, &mut self.store, counts);
867        Ok(())
868    }
869
870    fn poll_complete<T, B>(
871        &mut self,
872        send_buffer: &SendBuffer<B>,
873        cx: &mut Context,
874        dst: &mut Codec<T, Prioritized<B>>,
875    ) -> Poll<io::Result<()>>
876    where
877        T: AsyncWrite + Unpin,
878        B: Buf,
879    {
880        let mut send_buffer = send_buffer.inner.lock().unwrap();
881        let send_buffer = &mut *send_buffer;
882
883        // Send WINDOW_UPDATE frames first
884        //
885        // TODO: It would probably be better to interleave updates w/ data
886        // frames.
887        ready!(self
888            .actions
889            .recv
890            .poll_complete(cx, &mut self.store, &mut self.counts, dst))?;
891
892        // Send any other pending frames
893        ready!(self.actions.send.poll_complete(
894            cx,
895            send_buffer,
896            &mut self.store,
897            &mut self.counts,
898            dst
899        ))?;
900
901        // Nothing else to do, track the task
902        self.actions.task = Some(cx.waker().clone());
903
904        Poll::Ready(Ok(()))
905    }
906
907    fn send_reset<B>(&mut self, send_buffer: &SendBuffer<B>, id: StreamId, reason: Reason) {
908        let key = match self.store.find_entry(id) {
909            Entry::Occupied(e) => e.key(),
910            Entry::Vacant(e) => {
911                // Resetting a stream we don't know about? That could be OK...
912                //
913                // 1. As a server, we just received a request, but that request
914                //    was bad, so we're resetting before even accepting it.
915                //    This is totally fine.
916                //
917                // 2. The remote may have sent us a frame on new stream that
918                //    it's *not* supposed to have done, and thus, we don't know
919                //    the stream. In that case, sending a reset will "open" the
920                //    stream in our store. Maybe that should be a connection
921                //    error instead? At least for now, we need to update what
922                //    our vision of the next stream is.
923                if self.counts.peer().is_local_init(id) {
924                    // We normally would open this stream, so update our
925                    // next-send-id record.
926                    self.actions.send.maybe_reset_next_stream_id(id);
927                } else {
928                    // We normally would recv this stream, so update our
929                    // next-recv-id record.
930                    self.actions.recv.maybe_reset_next_stream_id(id);
931                }
932
933                let stream = Stream::new(id, 0, 0);
934
935                e.insert(stream)
936            }
937        };
938
939        let stream = self.store.resolve(key);
940        let mut send_buffer = send_buffer.inner.lock().unwrap();
941        let send_buffer = &mut *send_buffer;
942        self.actions.send_reset(
943            stream,
944            reason,
945            Initiator::Library,
946            &mut self.counts,
947            send_buffer,
948        );
949    }
950}
951
952impl<B> Streams<B, client::Peer>
953where
954    B: Buf,
955{
956    pub fn poll_pending_open(
957        &mut self,
958        cx: &Context,
959        pending: Option<&OpaqueStreamRef>,
960    ) -> Poll<Result<(), crate::Error>> {
961        let mut me = self.inner.lock().unwrap();
962        let me = &mut *me;
963
964        me.actions.ensure_no_conn_error()?;
965        me.actions.send.ensure_next_stream_id()?;
966
967        if let Some(pending) = pending {
968            let mut stream = me.store.resolve(pending.key);
969            tracing::trace!("poll_pending_open; stream = {:?}", stream.is_pending_open);
970            if stream.is_pending_open {
971                stream.wait_send(cx);
972                return Poll::Pending;
973            }
974        }
975        Poll::Ready(Ok(()))
976    }
977}
978
979impl<B, P> Streams<B, P>
980where
981    P: Peer,
982{
983    pub fn as_dyn(&self) -> DynStreams<B> {
984        let Self {
985            inner,
986            send_buffer,
987            _p,
988        } = self;
989        DynStreams {
990            inner,
991            send_buffer,
992            peer: P::r#dyn(),
993        }
994    }
995
996    /// This function is safe to call multiple times.
997    ///
998    /// A `Result` is returned to avoid panicking if the mutex is poisoned.
999    pub fn recv_eof(&mut self, clear_pending_accept: bool) -> Result<(), ()> {
1000        self.as_dyn().recv_eof(clear_pending_accept)
1001    }
1002
1003    pub(crate) fn max_send_streams(&self) -> usize {
1004        self.inner.lock().unwrap().counts.max_send_streams()
1005    }
1006
1007    pub(crate) fn max_recv_streams(&self) -> usize {
1008        self.inner.lock().unwrap().counts.max_recv_streams()
1009    }
1010
1011    #[cfg(feature = "unstable")]
1012    pub fn num_active_streams(&self) -> usize {
1013        let me = self.inner.lock().unwrap();
1014        me.store.num_active_streams()
1015    }
1016
1017    pub fn has_streams(&self) -> bool {
1018        let me = self.inner.lock().unwrap();
1019        me.counts.has_streams()
1020    }
1021
1022    pub fn has_streams_or_other_references(&self) -> bool {
1023        let me = self.inner.lock().unwrap();
1024        me.counts.has_streams() || me.refs > 1
1025    }
1026
1027    #[cfg(feature = "unstable")]
1028    pub fn num_wired_streams(&self) -> usize {
1029        let me = self.inner.lock().unwrap();
1030        me.store.num_wired_streams()
1031    }
1032}
1033
1034// no derive because we don't need B and P to be Clone.
1035impl<B, P> Clone for Streams<B, P>
1036where
1037    P: Peer,
1038{
1039    fn clone(&self) -> Self {
1040        self.inner.lock().unwrap().refs += 1;
1041        Streams {
1042            inner: self.inner.clone(),
1043            send_buffer: self.send_buffer.clone(),
1044            _p: ::std::marker::PhantomData,
1045        }
1046    }
1047}
1048
1049impl<B, P> Drop for Streams<B, P>
1050where
1051    P: Peer,
1052{
1053    fn drop(&mut self) {
1054        if let Ok(mut inner) = self.inner.lock() {
1055            inner.refs -= 1;
1056            if inner.refs == 1 {
1057                if let Some(task) = inner.actions.task.take() {
1058                    task.wake();
1059                }
1060            }
1061        }
1062    }
1063}
1064
1065// ===== impl StreamRef =====
1066
1067impl<B> StreamRef<B> {
1068    pub fn send_data(&mut self, data: B, end_stream: bool) -> Result<(), UserError>
1069    where
1070        B: Buf,
1071    {
1072        let mut me = self.opaque.inner.lock().unwrap();
1073        let me = &mut *me;
1074
1075        let stream = me.store.resolve(self.opaque.key);
1076        let actions = &mut me.actions;
1077        let mut send_buffer = self.send_buffer.inner.lock().unwrap();
1078        let send_buffer = &mut *send_buffer;
1079
1080        me.counts.transition(stream, |counts, stream| {
1081            // Create the data frame
1082            let mut frame = frame::Data::new(stream.id, data);
1083            frame.set_end_stream(end_stream);
1084
1085            // Send the data frame
1086            actions
1087                .send
1088                .send_data(frame, send_buffer, stream, counts, &mut actions.task)
1089        })
1090    }
1091
1092    pub fn send_trailers(&mut self, trailers: HeaderMap) -> Result<(), UserError> {
1093        let mut me = self.opaque.inner.lock().unwrap();
1094        let me = &mut *me;
1095
1096        let stream = me.store.resolve(self.opaque.key);
1097        let actions = &mut me.actions;
1098        let mut send_buffer = self.send_buffer.inner.lock().unwrap();
1099        let send_buffer = &mut *send_buffer;
1100
1101        me.counts.transition(stream, |counts, stream| {
1102            // Create the trailers frame
1103            let frame = frame::Headers::trailers(stream.id, trailers);
1104
1105            // Send the trailers frame
1106            actions
1107                .send
1108                .send_trailers(frame, send_buffer, stream, counts, &mut actions.task)
1109        })
1110    }
1111
1112    pub fn send_reset(&mut self, reason: Reason) {
1113        let mut me = self.opaque.inner.lock().unwrap();
1114        let me = &mut *me;
1115
1116        let stream = me.store.resolve(self.opaque.key);
1117        let mut send_buffer = self.send_buffer.inner.lock().unwrap();
1118        let send_buffer = &mut *send_buffer;
1119
1120        me.actions
1121            .send_reset(stream, reason, Initiator::User, &mut me.counts, send_buffer);
1122    }
1123
1124    pub fn send_response(
1125        &mut self,
1126        mut response: Response<()>,
1127        end_of_stream: bool,
1128    ) -> Result<(), UserError> {
1129        // Clear before taking lock, incase extensions contain a StreamRef.
1130        response.extensions_mut().clear();
1131        let mut me = self.opaque.inner.lock().unwrap();
1132        let me = &mut *me;
1133
1134        let stream = me.store.resolve(self.opaque.key);
1135        let actions = &mut me.actions;
1136        let mut send_buffer = self.send_buffer.inner.lock().unwrap();
1137        let send_buffer = &mut *send_buffer;
1138
1139        me.counts.transition(stream, |counts, stream| {
1140            let frame = server::Peer::convert_send_message(stream.id, response, end_of_stream);
1141
1142            actions
1143                .send
1144                .send_headers(frame, send_buffer, stream, counts, &mut actions.task)
1145        })
1146    }
1147
1148    pub fn send_push_promise(
1149        &mut self,
1150        mut request: Request<()>,
1151    ) -> Result<StreamRef<B>, UserError> {
1152        // Clear before taking lock, incase extensions contain a StreamRef.
1153        request.extensions_mut().clear();
1154        let mut me = self.opaque.inner.lock().unwrap();
1155        let me = &mut *me;
1156
1157        let mut send_buffer = self.send_buffer.inner.lock().unwrap();
1158        let send_buffer = &mut *send_buffer;
1159
1160        let actions = &mut me.actions;
1161        let promised_id = actions.send.reserve_local()?;
1162
1163        let child_key = {
1164            let mut child_stream = me.store.insert(
1165                promised_id,
1166                Stream::new(
1167                    promised_id,
1168                    actions.send.init_window_sz(),
1169                    actions.recv.init_window_sz(),
1170                ),
1171            );
1172            child_stream.state.reserve_local()?;
1173            child_stream.is_pending_push = true;
1174            child_stream.key()
1175        };
1176
1177        let pushed = {
1178            let mut stream = me.store.resolve(self.opaque.key);
1179
1180            let frame = crate::server::Peer::convert_push_message(stream.id, promised_id, request)?;
1181
1182            actions
1183                .send
1184                .send_push_promise(frame, send_buffer, &mut stream, &mut actions.task)
1185        };
1186
1187        if let Err(err) = pushed {
1188            let mut child_stream = me.store.resolve(child_key);
1189            child_stream.unlink();
1190            child_stream.remove();
1191            return Err(err);
1192        }
1193
1194        me.refs += 1;
1195        let opaque =
1196            OpaqueStreamRef::new(self.opaque.inner.clone(), &mut me.store.resolve(child_key));
1197
1198        Ok(StreamRef {
1199            opaque,
1200            send_buffer: self.send_buffer.clone(),
1201        })
1202    }
1203
1204    /// Called by the server after the stream is accepted. Given that clients
1205    /// initialize streams by sending HEADERS, the request will always be
1206    /// available.
1207    ///
1208    /// # Panics
1209    ///
1210    /// This function panics if the request isn't present.
1211    pub fn take_request(&self) -> Request<()> {
1212        let mut me = self.opaque.inner.lock().unwrap();
1213        let me = &mut *me;
1214
1215        let mut stream = me.store.resolve(self.opaque.key);
1216        me.actions.recv.take_request(&mut stream)
1217    }
1218
1219    /// Called by a client to see if the current stream is pending open
1220    pub fn is_pending_open(&self) -> bool {
1221        let mut me = self.opaque.inner.lock().unwrap();
1222        me.store.resolve(self.opaque.key).is_pending_open
1223    }
1224
1225    /// Request capacity to send data
1226    pub fn reserve_capacity(&mut self, capacity: WindowSize) {
1227        let mut me = self.opaque.inner.lock().unwrap();
1228        let me = &mut *me;
1229
1230        let mut stream = me.store.resolve(self.opaque.key);
1231
1232        me.actions
1233            .send
1234            .reserve_capacity(capacity, &mut stream, &mut me.counts)
1235    }
1236
1237    /// Returns the stream's current send capacity.
1238    pub fn capacity(&self) -> WindowSize {
1239        let mut me = self.opaque.inner.lock().unwrap();
1240        let me = &mut *me;
1241
1242        let mut stream = me.store.resolve(self.opaque.key);
1243
1244        me.actions.send.capacity(&mut stream)
1245    }
1246
1247    /// Request to be notified when the stream's capacity increases
1248    pub fn poll_capacity(&mut self, cx: &Context) -> Poll<Option<Result<WindowSize, UserError>>> {
1249        let mut me = self.opaque.inner.lock().unwrap();
1250        let me = &mut *me;
1251
1252        let mut stream = me.store.resolve(self.opaque.key);
1253
1254        me.actions.send.poll_capacity(cx, &mut stream)
1255    }
1256
1257    /// Request to be notified for if a `RST_STREAM` is received for this stream.
1258    pub(crate) fn poll_reset(
1259        &mut self,
1260        cx: &Context,
1261        mode: proto::PollReset,
1262    ) -> Poll<Result<Reason, crate::Error>> {
1263        let mut me = self.opaque.inner.lock().unwrap();
1264        let me = &mut *me;
1265
1266        let mut stream = me.store.resolve(self.opaque.key);
1267
1268        me.actions.send.poll_reset(cx, &mut stream, mode)
1269    }
1270
1271    pub fn clone_to_opaque(&self) -> OpaqueStreamRef {
1272        self.opaque.clone()
1273    }
1274
1275    pub fn stream_id(&self) -> StreamId {
1276        self.opaque.stream_id()
1277    }
1278}
1279
1280impl<B> Clone for StreamRef<B> {
1281    fn clone(&self) -> Self {
1282        StreamRef {
1283            opaque: self.opaque.clone(),
1284            send_buffer: self.send_buffer.clone(),
1285        }
1286    }
1287}
1288
1289// ===== impl OpaqueStreamRef =====
1290
1291impl OpaqueStreamRef {
1292    fn new(inner: Arc<Mutex<Inner>>, stream: &mut store::Ptr) -> OpaqueStreamRef {
1293        stream.ref_inc();
1294        OpaqueStreamRef {
1295            inner,
1296            key: stream.key(),
1297        }
1298    }
1299    /// Called by a client to check for a received response.
1300    pub fn poll_response(&mut self, cx: &Context) -> Poll<Result<Response<()>, proto::Error>> {
1301        let mut me = self.inner.lock().unwrap();
1302        let me = &mut *me;
1303
1304        let mut stream = me.store.resolve(self.key);
1305
1306        me.actions.recv.poll_response(cx, &mut stream)
1307    }
1308    /// Called by a client to check for a pushed request.
1309    pub fn poll_pushed(
1310        &mut self,
1311        cx: &Context,
1312    ) -> Poll<Option<Result<(Request<()>, OpaqueStreamRef), proto::Error>>> {
1313        let mut me = self.inner.lock().unwrap();
1314        let me = &mut *me;
1315
1316        let mut stream = me.store.resolve(self.key);
1317        me.actions
1318            .recv
1319            .poll_pushed(cx, &mut stream)
1320            .map_ok(|(h, key)| {
1321                me.refs += 1;
1322                let opaque_ref =
1323                    OpaqueStreamRef::new(self.inner.clone(), &mut me.store.resolve(key));
1324                (h, opaque_ref)
1325            })
1326    }
1327
1328    pub fn is_end_stream(&self) -> bool {
1329        let mut me = self.inner.lock().unwrap();
1330        let me = &mut *me;
1331
1332        let stream = me.store.resolve(self.key);
1333
1334        me.actions.recv.is_end_stream(&stream)
1335    }
1336
1337    pub fn poll_data(&mut self, cx: &Context) -> Poll<Option<Result<Bytes, proto::Error>>> {
1338        let mut me = self.inner.lock().unwrap();
1339        let me = &mut *me;
1340
1341        let mut stream = me.store.resolve(self.key);
1342
1343        me.actions.recv.poll_data(cx, &mut stream)
1344    }
1345
1346    pub fn poll_trailers(&mut self, cx: &Context) -> Poll<Option<Result<HeaderMap, proto::Error>>> {
1347        let mut me = self.inner.lock().unwrap();
1348        let me = &mut *me;
1349
1350        let mut stream = me.store.resolve(self.key);
1351
1352        me.actions.recv.poll_trailers(cx, &mut stream)
1353    }
1354
1355    pub(crate) fn available_recv_capacity(&self) -> isize {
1356        let me = self.inner.lock().unwrap();
1357        let me = &*me;
1358
1359        let stream = &me.store[self.key];
1360        stream.recv_flow.available().into()
1361    }
1362
1363    pub(crate) fn used_recv_capacity(&self) -> WindowSize {
1364        let me = self.inner.lock().unwrap();
1365        let me = &*me;
1366
1367        let stream = &me.store[self.key];
1368        stream.in_flight_recv_data
1369    }
1370
1371    /// Releases recv capacity back to the peer. This may result in sending
1372    /// WINDOW_UPDATE frames on both the stream and connection.
1373    pub fn release_capacity(&mut self, capacity: WindowSize) -> Result<(), UserError> {
1374        let mut me = self.inner.lock().unwrap();
1375        let me = &mut *me;
1376
1377        let mut stream = me.store.resolve(self.key);
1378
1379        me.actions
1380            .recv
1381            .release_capacity(capacity, &mut stream, &mut me.actions.task)
1382    }
1383
1384    /// Clear the receive queue and set the status to no longer receive data frames.
1385    pub(crate) fn clear_recv_buffer(&mut self) {
1386        let mut me = self.inner.lock().unwrap();
1387        let me = &mut *me;
1388
1389        let mut stream = me.store.resolve(self.key);
1390        stream.is_recv = false;
1391        me.actions.recv.clear_recv_buffer(&mut stream);
1392    }
1393
1394    pub fn stream_id(&self) -> StreamId {
1395        self.inner.lock().unwrap().store[self.key].id
1396    }
1397}
1398
1399impl fmt::Debug for OpaqueStreamRef {
1400    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
1401        use std::sync::TryLockError::*;
1402
1403        match self.inner.try_lock() {
1404            Ok(me) => {
1405                let stream = &me.store[self.key];
1406                fmt.debug_struct("OpaqueStreamRef")
1407                    .field("stream_id", &stream.id)
1408                    .field("ref_count", &stream.ref_count)
1409                    .finish()
1410            }
1411            Err(Poisoned(_)) => fmt
1412                .debug_struct("OpaqueStreamRef")
1413                .field("inner", &"<Poisoned>")
1414                .finish(),
1415            Err(WouldBlock) => fmt
1416                .debug_struct("OpaqueStreamRef")
1417                .field("inner", &"<Locked>")
1418                .finish(),
1419        }
1420    }
1421}
1422
1423impl Clone for OpaqueStreamRef {
1424    fn clone(&self) -> Self {
1425        // Increment the ref count
1426        let mut inner = self.inner.lock().unwrap();
1427        inner.store.resolve(self.key).ref_inc();
1428        inner.refs += 1;
1429
1430        OpaqueStreamRef {
1431            inner: self.inner.clone(),
1432            key: self.key,
1433        }
1434    }
1435}
1436
1437impl Drop for OpaqueStreamRef {
1438    fn drop(&mut self) {
1439        drop_stream_ref(&self.inner, self.key);
1440    }
1441}
1442
1443// TODO: Move back in fn above
1444fn drop_stream_ref(inner: &Mutex<Inner>, key: store::Key) {
1445    let mut me = match inner.lock() {
1446        Ok(inner) => inner,
1447        Err(_) => {
1448            if ::std::thread::panicking() {
1449                tracing::trace!("StreamRef::drop; mutex poisoned");
1450                return;
1451            } else {
1452                panic!("StreamRef::drop; mutex poisoned");
1453            }
1454        }
1455    };
1456
1457    let me = &mut *me;
1458    me.refs -= 1;
1459    let mut stream = me.store.resolve(key);
1460
1461    tracing::trace!("drop_stream_ref; stream={:?}", stream);
1462
1463    // decrement the stream's ref count by 1.
1464    stream.ref_dec();
1465
1466    let actions = &mut me.actions;
1467
1468    // If the stream is not referenced and it is already
1469    // closed (does not have to go through logic below
1470    // of canceling the stream), we should notify the task
1471    // (connection) so that it can close properly
1472    if stream.ref_count == 0 && stream.is_closed() {
1473        if let Some(task) = actions.task.take() {
1474            task.wake();
1475        }
1476    }
1477
1478    me.counts.transition(stream, |counts, stream| {
1479        maybe_cancel(stream, actions, counts);
1480
1481        if stream.ref_count == 0 {
1482            // Release any recv window back to connection, no one can access
1483            // it anymore.
1484            actions
1485                .recv
1486                .release_closed_capacity(stream, &mut actions.task);
1487
1488            // We won't be able to reach our push promises anymore
1489            let mut ppp = stream.pending_push_promises.take();
1490            while let Some(promise) = ppp.pop(stream.store_mut()) {
1491                counts.transition(promise, |counts, stream| {
1492                    maybe_cancel(stream, actions, counts);
1493                });
1494            }
1495        }
1496    });
1497}
1498
1499fn maybe_cancel(stream: &mut store::Ptr, actions: &mut Actions, counts: &mut Counts) {
1500    if stream.is_canceled_interest() {
1501        // Server is allowed to early respond without fully consuming the client input stream
1502        // But per the RFC, must send a RST_STREAM(NO_ERROR) in such cases. https://www.rfc-editor.org/rfc/rfc7540#section-8.1
1503        // Some other http2 implementation may interpret other error code as fatal if not respected (i.e: nginx https://trac.nginx.org/nginx/ticket/2376)
1504        let reason = if counts.peer().is_server()
1505            && stream.state.is_send_closed()
1506            && stream.state.is_recv_streaming()
1507        {
1508            Reason::NO_ERROR
1509        } else {
1510            Reason::CANCEL
1511        };
1512
1513        actions
1514            .send
1515            .schedule_implicit_reset(stream, reason, counts, &mut actions.task);
1516        actions.recv.enqueue_reset_expiration(stream, counts);
1517    }
1518}
1519
1520// ===== impl SendBuffer =====
1521
1522impl<B> SendBuffer<B> {
1523    fn new() -> Self {
1524        let inner = Mutex::new(Buffer::new());
1525        SendBuffer { inner }
1526    }
1527
1528    pub fn is_empty(&self) -> bool {
1529        let buf = self.inner.lock().unwrap();
1530        buf.is_empty()
1531    }
1532}
1533
1534// ===== impl Actions =====
1535
1536impl Actions {
1537    fn send_reset<B>(
1538        &mut self,
1539        stream: store::Ptr,
1540        reason: Reason,
1541        initiator: Initiator,
1542        counts: &mut Counts,
1543        send_buffer: &mut Buffer<Frame<B>>,
1544    ) {
1545        counts.transition(stream, |counts, stream| {
1546            self.send.send_reset(
1547                reason,
1548                initiator,
1549                send_buffer,
1550                stream,
1551                counts,
1552                &mut self.task,
1553            );
1554            self.recv.enqueue_reset_expiration(stream, counts);
1555            // if a RecvStream is parked, ensure it's notified
1556            stream.notify_recv();
1557        });
1558    }
1559
1560    fn reset_on_recv_stream_err<B>(
1561        &mut self,
1562        buffer: &mut Buffer<Frame<B>>,
1563        stream: &mut store::Ptr,
1564        counts: &mut Counts,
1565        res: Result<(), Error>,
1566    ) -> Result<(), Error> {
1567        if let Err(Error::Reset(stream_id, reason, initiator)) = res {
1568            debug_assert_eq!(stream_id, stream.id);
1569
1570            if counts.can_inc_num_local_error_resets() {
1571                counts.inc_num_local_error_resets();
1572
1573                // Reset the stream.
1574                self.send
1575                    .send_reset(reason, initiator, buffer, stream, counts, &mut self.task);
1576                self.recv.enqueue_reset_expiration(stream, counts);
1577                // if a RecvStream is parked, ensure it's notified
1578                stream.notify_recv();
1579                Ok(())
1580            } else {
1581                tracing::warn!(
1582                    "reset_on_recv_stream_err; locally-reset streams reached limit ({:?})",
1583                    counts.max_local_error_resets().unwrap(),
1584                );
1585                Err(Error::library_go_away_data(
1586                    Reason::ENHANCE_YOUR_CALM,
1587                    "too_many_internal_resets",
1588                ))
1589            }
1590        } else {
1591            res
1592        }
1593    }
1594
1595    fn ensure_not_idle(&mut self, peer: peer::Dyn, id: StreamId) -> Result<(), Reason> {
1596        if peer.is_local_init(id) {
1597            self.send.ensure_not_idle(id)
1598        } else {
1599            self.recv.ensure_not_idle(id)
1600        }
1601    }
1602
1603    fn ensure_no_conn_error(&self) -> Result<(), proto::Error> {
1604        if let Some(ref err) = self.conn_error {
1605            Err(err.clone())
1606        } else {
1607            Ok(())
1608        }
1609    }
1610
1611    /// Check if we possibly could have processed and since forgotten this stream.
1612    ///
1613    /// If we send a RST_STREAM for a stream, we will eventually "forget" about
1614    /// the stream to free up memory. It's possible that the remote peer had
1615    /// frames in-flight, and by the time we receive them, our own state is
1616    /// gone. We *could* tear everything down by sending a GOAWAY, but it
1617    /// is more likely to be latency/memory constraints that caused this,
1618    /// and not a bad actor. So be less catastrophic, the spec allows
1619    /// us to send another RST_STREAM of STREAM_CLOSED.
1620    fn may_have_forgotten_stream(&self, peer: peer::Dyn, id: StreamId) -> bool {
1621        if id.is_zero() {
1622            return false;
1623        }
1624        if peer.is_local_init(id) {
1625            self.send.may_have_created_stream(id)
1626        } else {
1627            self.recv.may_have_created_stream(id)
1628        }
1629    }
1630
1631    fn clear_queues(&mut self, clear_pending_accept: bool, store: &mut Store, counts: &mut Counts) {
1632        self.recv.clear_queues(clear_pending_accept, store, counts);
1633        self.send.clear_queues(store, counts);
1634    }
1635}