rustls/conn/mod.rs
1use alloc::boxed::Box;
2use core::fmt::{self, Debug};
3use core::mem;
4use core::ops::{Deref, DerefMut, Range};
5#[cfg(feature = "std")]
6use std::io;
7
8use kernel::KernelConnection;
9
10use crate::common_state::{CommonState, DEFAULT_BUFFER_LIMIT, IoState, State};
11use crate::crypto::cipher::{Decrypted, EncodedMessage};
12use crate::enums::{ContentType, ProtocolVersion};
13use crate::error::{ApiMisuse, Error, PeerMisbehaved};
14use crate::msgs::deframer::{
15 BufferProgress, DeframerIter, DeframerVecBuffer, Delocator, HandshakeDeframer, Locator,
16};
17use crate::msgs::handshake::Random;
18#[cfg(feature = "std")]
19use crate::msgs::message::Message;
20use crate::suites::ExtractedSecrets;
21use crate::vecbuf::ChunkVecBuffer;
22
23// pub so that it can be re-exported from the crate root
24pub mod kernel;
25pub(crate) mod unbuffered;
26
27#[cfg(feature = "std")]
28mod connection {
29 use alloc::vec::Vec;
30 use core::fmt::Debug;
31 use core::ops::{Deref, DerefMut};
32 use std::io::{self, BufRead, Read};
33
34 use crate::common_state::{CommonState, IoState};
35 use crate::conn::{ConnectionCommon, KeyingMaterialExporter, SideData};
36 use crate::crypto::cipher::OutboundPlain;
37 use crate::error::Error;
38 use crate::suites::ExtractedSecrets;
39 use crate::vecbuf::ChunkVecBuffer;
40
41 /// A client or server connection.
42 #[expect(clippy::exhaustive_enums)]
43 #[derive(Debug)]
44 pub enum Connection {
45 /// A client connection
46 Client(crate::client::ClientConnection),
47 /// A server connection
48 Server(crate::server::ServerConnection),
49 }
50
51 impl Connection {
52 /// Read TLS content from `rd`.
53 ///
54 /// See [`ConnectionCommon::read_tls()`] for more information.
55 pub fn read_tls(&mut self, rd: &mut dyn Read) -> Result<usize, io::Error> {
56 match self {
57 Self::Client(conn) => conn.read_tls(rd),
58 Self::Server(conn) => conn.read_tls(rd),
59 }
60 }
61
62 /// Writes TLS messages to `wr`.
63 ///
64 /// See [`ConnectionCommon::write_tls()`] for more information.
65 pub fn write_tls(&mut self, wr: &mut dyn io::Write) -> Result<usize, io::Error> {
66 self.sendable_tls.write_to(wr)
67 }
68
69 /// Returns an object that allows reading plaintext.
70 pub fn reader(&mut self) -> Reader<'_> {
71 match self {
72 Self::Client(conn) => conn.reader(),
73 Self::Server(conn) => conn.reader(),
74 }
75 }
76
77 /// Returns an object that allows writing plaintext.
78 pub fn writer(&mut self) -> Writer<'_> {
79 match self {
80 Self::Client(conn) => Writer::new(&mut **conn),
81 Self::Server(conn) => Writer::new(&mut **conn),
82 }
83 }
84
85 /// Processes any new packets read by a previous call to [`Connection::read_tls`].
86 ///
87 /// See [`ConnectionCommon::process_new_packets()`] for more information.
88 pub fn process_new_packets(&mut self) -> Result<IoState, Error> {
89 match self {
90 Self::Client(conn) => conn.process_new_packets(),
91 Self::Server(conn) => conn.process_new_packets(),
92 }
93 }
94
95 /// Returns an object that can derive key material from the agreed connection secrets.
96 ///
97 /// See [`ConnectionCommon::exporter()`] for more information.
98 pub fn exporter(&mut self) -> Result<KeyingMaterialExporter, Error> {
99 match self {
100 Self::Client(conn) => conn.exporter(),
101 Self::Server(conn) => conn.exporter(),
102 }
103 }
104
105 /// This function uses `io` to complete any outstanding IO for this connection.
106 ///
107 /// See [`ConnectionCommon::complete_io()`] for more information.
108 pub fn complete_io(
109 &mut self,
110 io: &mut (impl Read + io::Write),
111 ) -> Result<(usize, usize), io::Error> {
112 match self {
113 Self::Client(conn) => conn.complete_io(io),
114 Self::Server(conn) => conn.complete_io(io),
115 }
116 }
117
118 /// Extract secrets, so they can be used when configuring kTLS, for example.
119 /// Should be used with care as it exposes secret key material.
120 pub fn dangerous_extract_secrets(self) -> Result<ExtractedSecrets, Error> {
121 match self {
122 Self::Client(client) => client.dangerous_extract_secrets(),
123 Self::Server(server) => server.dangerous_extract_secrets(),
124 }
125 }
126
127 /// Sets a limit on the internal buffers
128 ///
129 /// See [`ConnectionCommon::set_buffer_limit()`] for more information.
130 pub fn set_buffer_limit(&mut self, limit: Option<usize>) {
131 match self {
132 Self::Client(client) => client.set_buffer_limit(limit),
133 Self::Server(server) => server.set_buffer_limit(limit),
134 }
135 }
136
137 /// Sets a limit on the internal plaintext buffer.
138 ///
139 /// See [`ConnectionCommon::set_plaintext_buffer_limit()`] for more information.
140 pub fn set_plaintext_buffer_limit(&mut self, limit: Option<usize>) {
141 match self {
142 Self::Client(client) => client.set_plaintext_buffer_limit(limit),
143 Self::Server(server) => server.set_plaintext_buffer_limit(limit),
144 }
145 }
146
147 /// Sends a TLS1.3 `key_update` message to refresh a connection's keys
148 ///
149 /// See [`ConnectionCommon::refresh_traffic_keys()`] for more information.
150 pub fn refresh_traffic_keys(&mut self) -> Result<(), Error> {
151 match self {
152 Self::Client(client) => client.refresh_traffic_keys(),
153 Self::Server(server) => server.refresh_traffic_keys(),
154 }
155 }
156 }
157
158 impl Deref for Connection {
159 type Target = CommonState;
160
161 fn deref(&self) -> &Self::Target {
162 match self {
163 Self::Client(conn) => &conn.core.common_state,
164 Self::Server(conn) => &conn.core.common_state,
165 }
166 }
167 }
168
169 impl DerefMut for Connection {
170 fn deref_mut(&mut self) -> &mut Self::Target {
171 match self {
172 Self::Client(conn) => &mut conn.core.common_state,
173 Self::Server(conn) => &mut conn.core.common_state,
174 }
175 }
176 }
177
178 /// A structure that implements [`std::io::Read`] for reading plaintext.
179 pub struct Reader<'a> {
180 pub(super) received_plaintext: &'a mut ChunkVecBuffer,
181 pub(super) has_received_close_notify: bool,
182 pub(super) has_seen_eof: bool,
183 }
184
185 impl<'a> Reader<'a> {
186 /// Check the connection's state if no bytes are available for reading.
187 fn check_no_bytes_state(&self) -> io::Result<()> {
188 match (self.has_received_close_notify, self.has_seen_eof) {
189 // cleanly closed; don't care about TCP EOF: express this as Ok(0)
190 (true, _) => Ok(()),
191 // unclean closure
192 (false, true) => Err(io::Error::new(
193 io::ErrorKind::UnexpectedEof,
194 UNEXPECTED_EOF_MESSAGE,
195 )),
196 // connection still going, but needs more data: signal `WouldBlock` so that
197 // the caller knows this
198 (false, false) => Err(io::ErrorKind::WouldBlock.into()),
199 }
200 }
201
202 /// Obtain a chunk of plaintext data received from the peer over this TLS connection.
203 ///
204 /// This method consumes `self` so that it can return a slice whose lifetime is bounded by
205 /// the [`ConnectionCommon`] that created this `Reader`.
206 pub fn into_first_chunk(self) -> io::Result<&'a [u8]> {
207 match self.received_plaintext.chunk() {
208 Some(chunk) => Ok(chunk),
209 None => {
210 self.check_no_bytes_state()?;
211 Ok(&[])
212 }
213 }
214 }
215 }
216
217 impl Read for Reader<'_> {
218 /// Obtain plaintext data received from the peer over this TLS connection.
219 ///
220 /// If the peer closes the TLS session cleanly, this returns `Ok(0)` once all
221 /// the pending data has been read. No further data can be received on that
222 /// connection, so the underlying TCP connection should be half-closed too.
223 ///
224 /// If the peer closes the TLS session uncleanly (a TCP EOF without sending a
225 /// `close_notify` alert) this function returns a `std::io::Error` of type
226 /// `ErrorKind::UnexpectedEof` once any pending data has been read.
227 ///
228 /// Note that support for `close_notify` varies in peer TLS libraries: many do not
229 /// support it and uncleanly close the TCP connection (this might be
230 /// vulnerable to truncation attacks depending on the application protocol).
231 /// This means applications using rustls must both handle EOF
232 /// from this function, *and* unexpected EOF of the underlying TCP connection.
233 ///
234 /// If there are no bytes to read, this returns `Err(ErrorKind::WouldBlock.into())`.
235 ///
236 /// You may learn the number of bytes available at any time by inspecting
237 /// the return of [`Connection::process_new_packets`].
238 fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
239 let len = self.received_plaintext.read(buf)?;
240 if len > 0 || buf.is_empty() {
241 return Ok(len);
242 }
243
244 self.check_no_bytes_state()
245 .map(|()| len)
246 }
247 }
248
249 impl BufRead for Reader<'_> {
250 /// Obtain a chunk of plaintext data received from the peer over this TLS connection.
251 /// This reads the same data as [`Reader::read()`], but returns a reference instead of
252 /// copying the data.
253 ///
254 /// The caller should call [`Reader::consume()`] afterward to advance the buffer.
255 ///
256 /// See [`Reader::into_first_chunk()`] for a version of this function that returns a
257 /// buffer with a longer lifetime.
258 fn fill_buf(&mut self) -> io::Result<&[u8]> {
259 Reader {
260 // reborrow
261 received_plaintext: self.received_plaintext,
262 ..*self
263 }
264 .into_first_chunk()
265 }
266
267 fn consume(&mut self, amt: usize) {
268 self.received_plaintext
269 .consume_first_chunk(amt)
270 }
271 }
272
273 const UNEXPECTED_EOF_MESSAGE: &str = "peer closed connection without sending TLS close_notify: \
274https://docs.rs/rustls/latest/rustls/manual/_03_howto/index.html#unexpected-eof";
275
276 /// A structure that implements [`std::io::Write`] for writing plaintext.
277 pub struct Writer<'a> {
278 sink: &'a mut dyn PlaintextSink,
279 }
280
281 impl<'a> Writer<'a> {
282 /// Create a new Writer.
283 ///
284 /// This is not an external interface. Get one of these objects
285 /// from [`Connection::writer`].
286 pub(crate) fn new(sink: &'a mut dyn PlaintextSink) -> Self {
287 Writer { sink }
288 }
289 }
290
291 impl io::Write for Writer<'_> {
292 /// Send the plaintext `buf` to the peer, encrypting
293 /// and authenticating it. Once this function succeeds
294 /// you should call [`Connection::write_tls`] which will output the
295 /// corresponding TLS records.
296 ///
297 /// This function buffers plaintext sent before the
298 /// TLS handshake completes, and sends it as soon
299 /// as it can. See [`ConnectionCommon::set_buffer_limit`] to control
300 /// the size of this buffer.
301 fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
302 self.sink.write(buf)
303 }
304
305 fn write_vectored(&mut self, bufs: &[io::IoSlice<'_>]) -> io::Result<usize> {
306 self.sink.write_vectored(bufs)
307 }
308
309 fn flush(&mut self) -> io::Result<()> {
310 self.sink.flush()
311 }
312 }
313
314 /// Internal trait implemented by the [`ServerConnection`]/[`ClientConnection`]
315 /// allowing them to be the subject of a [`Writer`].
316 ///
317 /// [`ServerConnection`]: crate::ServerConnection
318 /// [`ClientConnection`]: crate::ClientConnection
319 pub(crate) trait PlaintextSink {
320 fn write(&mut self, buf: &[u8]) -> io::Result<usize>;
321 fn write_vectored(&mut self, bufs: &[io::IoSlice<'_>]) -> io::Result<usize>;
322 fn flush(&mut self) -> io::Result<()>;
323 }
324
325 impl<Side: SideData> PlaintextSink for ConnectionCommon<Side> {
326 fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
327 let len = self
328 .core
329 .common_state
330 .buffer_plaintext(buf.into(), &mut self.sendable_plaintext);
331 self.core.maybe_refresh_traffic_keys();
332 Ok(len)
333 }
334
335 fn write_vectored(&mut self, bufs: &[io::IoSlice<'_>]) -> io::Result<usize> {
336 let payload_owner: Vec<&[u8]>;
337 let payload = match bufs.len() {
338 0 => return Ok(0),
339 1 => OutboundPlain::Single(bufs[0].deref()),
340 _ => {
341 payload_owner = bufs
342 .iter()
343 .map(|io_slice| io_slice.deref())
344 .collect();
345
346 OutboundPlain::new(&payload_owner)
347 }
348 };
349 let len = self
350 .core
351 .common_state
352 .buffer_plaintext(payload, &mut self.sendable_plaintext);
353 self.core.maybe_refresh_traffic_keys();
354 Ok(len)
355 }
356
357 fn flush(&mut self) -> io::Result<()> {
358 Ok(())
359 }
360 }
361}
362
363#[cfg(feature = "std")]
364pub use connection::{Connection, Reader, Writer};
365
366/// An object of this type can export keying material.
367pub struct KeyingMaterialExporter {
368 pub(crate) inner: Box<dyn Exporter>,
369}
370
371impl KeyingMaterialExporter {
372 /// Derives key material from the agreed connection secrets.
373 ///
374 /// This function fills in `output` with `output.len()` bytes of key
375 /// material derived from a master connection secret using `label`
376 /// and `context` for diversification. Ownership of the buffer is taken
377 /// by the function and returned via the Ok result to ensure no key
378 /// material leaks if the function fails.
379 ///
380 /// See [RFC5705][] for more details on what this does and is for. In
381 /// other libraries this is often named `SSL_export_keying_material()`
382 /// or `SslExportKeyingMaterial()`.
383 ///
384 /// This function is not meaningful if `output.len()` is zero and will
385 /// return an error in that case.
386 ///
387 /// [RFC5705]: https://datatracker.ietf.org/doc/html/rfc5705
388 pub fn derive<T: AsMut<[u8]>>(
389 &self,
390 label: &[u8],
391 context: Option<&[u8]>,
392 mut output: T,
393 ) -> Result<T, Error> {
394 if output.as_mut().is_empty() {
395 return Err(ApiMisuse::ExporterOutputZeroLength.into());
396 }
397
398 self.inner
399 .derive(label, context, output.as_mut())
400 .map(|_| output)
401 }
402}
403
404impl Debug for KeyingMaterialExporter {
405 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
406 f.debug_struct("KeyingMaterialExporter")
407 .finish_non_exhaustive()
408 }
409}
410
411/// This trait is for any object that can export keying material.
412///
413/// The terminology comes from [RFC5705](https://datatracker.ietf.org/doc/html/rfc5705)
414/// but doesn't really involve "exporting" key material (in the usual meaning of "export"
415/// -- of moving an artifact from one domain to another) but is best thought of as key
416/// diversification using an existing secret. That secret is implicit in this interface,
417/// so is assumed to be held by `self`. The secret should be zeroized in `drop()`.
418///
419/// There are several such internal implementations, depending on the context
420/// and protocol version.
421pub(crate) trait Exporter: Send + Sync {
422 /// Fills in `output` with derived keying material.
423 ///
424 /// This is deterministic depending on a base secret (implicit in `self`),
425 /// plus the `label` and `context` values.
426 ///
427 /// Must fill in `output` entirely, or return an error.
428 fn derive(&self, label: &[u8], context: Option<&[u8]>, output: &mut [u8]) -> Result<(), Error>;
429}
430
431#[derive(Debug)]
432pub(crate) struct ConnectionRandoms {
433 pub(crate) client: [u8; 32],
434 pub(crate) server: [u8; 32],
435}
436
437impl ConnectionRandoms {
438 pub(crate) fn new(client: Random, server: Random) -> Self {
439 Self {
440 client: client.0,
441 server: server.0,
442 }
443 }
444}
445
446/// TLS connection state with side-specific data (`Side`).
447///
448/// This is one of the core abstractions of the rustls API. It represents a single connection
449/// to a peer, and holds all the state associated with that connection. Note that it does
450/// not hold any IO objects: the application is responsible for reading and writing TLS records.
451/// If you want an object that does hold IO objects, see [`Stream`] and [`StreamOwned`].
452///
453/// This object is generic over the `Side` type parameter, which must implement the marker trait
454/// [`SideData`]. This is used to store side-specific data.
455///
456/// [`Stream`]: crate::Stream
457/// [`StreamOwned`]: crate::StreamOwned
458pub struct ConnectionCommon<Side: SideData> {
459 pub(crate) core: ConnectionCore<Side>,
460 deframer_buffer: DeframerVecBuffer,
461 pub(crate) sendable_plaintext: ChunkVecBuffer,
462}
463
464impl<Side: SideData> ConnectionCommon<Side> {
465 /// Processes any new packets read by a previous call to
466 /// [`Connection::read_tls`].
467 ///
468 /// Errors from this function relate to TLS protocol errors, and
469 /// are fatal to the connection. Future calls after an error will do
470 /// no new work and will return the same error. After an error is
471 /// received from [`process_new_packets`], you should not call [`read_tls`]
472 /// any more (it will fill up buffers to no purpose). However, you
473 /// may call the other methods on the connection, including `write`,
474 /// `send_close_notify`, and `write_tls`. Most likely you will want to
475 /// call `write_tls` to send any alerts queued by the error and then
476 /// close the underlying connection.
477 ///
478 /// Success from this function comes with some sundry state data
479 /// about the connection.
480 ///
481 /// [`read_tls`]: Connection::read_tls
482 /// [`process_new_packets`]: Connection::process_new_packets
483 #[inline]
484 pub fn process_new_packets(&mut self) -> Result<IoState, Error> {
485 self.core
486 .process_new_packets(&mut self.deframer_buffer, &mut self.sendable_plaintext)
487 }
488
489 /// Returns an object that can derive key material from the agreed connection secrets.
490 ///
491 /// See [RFC5705][] for more details on what this is for.
492 ///
493 /// This function can be called at most once per connection.
494 ///
495 /// This function will error:
496 ///
497 /// - if called prior to the handshake completing; (check with
498 /// [`CommonState::is_handshaking`] first).
499 /// - if called more than once per connection.
500 ///
501 /// [RFC5705]: https://datatracker.ietf.org/doc/html/rfc5705
502 pub fn exporter(&mut self) -> Result<KeyingMaterialExporter, Error> {
503 self.core.exporter()
504 }
505
506 /// Extract secrets, so they can be used when configuring kTLS, for example.
507 /// Should be used with care as it exposes secret key material.
508 pub fn dangerous_extract_secrets(self) -> Result<ExtractedSecrets, Error> {
509 self.core.dangerous_extract_secrets()
510 }
511
512 /// Sets a limit on the internal buffers used to buffer
513 /// unsent plaintext (prior to completing the TLS handshake)
514 /// and unsent TLS records. This limit acts only on application
515 /// data written through [`Connection::writer`].
516 ///
517 /// By default the limit is 64KB. The limit can be set
518 /// at any time, even if the current buffer use is higher.
519 ///
520 /// [`None`] means no limit applies, and will mean that written
521 /// data is buffered without bound -- it is up to the application
522 /// to appropriately schedule its plaintext and TLS writes to bound
523 /// memory usage.
524 ///
525 /// For illustration: `Some(1)` means a limit of one byte applies:
526 /// [`Connection::writer`] will accept only one byte, encrypt it and
527 /// add a TLS header. Once this is sent via [`Connection::write_tls`],
528 /// another byte may be sent.
529 ///
530 /// # Internal write-direction buffering
531 /// rustls has two buffers whose size are bounded by this setting:
532 ///
533 /// ## Buffering of unsent plaintext data prior to handshake completion
534 ///
535 /// Calls to [`Connection::writer`] before or during the handshake
536 /// are buffered (up to the limit specified here). Once the
537 /// handshake completes this data is encrypted and the resulting
538 /// TLS records are added to the outgoing buffer.
539 ///
540 /// ## Buffering of outgoing TLS records
541 ///
542 /// This buffer is used to store TLS records that rustls needs to
543 /// send to the peer. It is used in these two circumstances:
544 ///
545 /// - by [`Connection::process_new_packets`] when a handshake or alert
546 /// TLS record needs to be sent.
547 /// - by [`Connection::writer`] post-handshake: the plaintext is
548 /// encrypted and the resulting TLS record is buffered.
549 ///
550 /// This buffer is emptied by [`Connection::write_tls`].
551 ///
552 /// [`Connection::writer`]: crate::Connection::writer
553 /// [`Connection::write_tls`]: crate::Connection::write_tls
554 /// [`Connection::process_new_packets`]: crate::Connection::process_new_packets
555 pub fn set_buffer_limit(&mut self, limit: Option<usize>) {
556 self.sendable_plaintext.set_limit(limit);
557 self.sendable_tls.set_limit(limit);
558 }
559
560 /// Sets a limit on the internal buffers used to buffer decoded plaintext.
561 ///
562 /// See [`Self::set_buffer_limit`] for more information on how limits are applied.
563 pub fn set_plaintext_buffer_limit(&mut self, limit: Option<usize>) {
564 self.core
565 .common_state
566 .received_plaintext
567 .set_limit(limit);
568 }
569
570 /// Sends a TLS1.3 `key_update` message to refresh a connection's keys.
571 ///
572 /// This call refreshes our encryption keys. Once the peer receives the message,
573 /// it refreshes _its_ encryption and decryption keys and sends a response.
574 /// Once we receive that response, we refresh our decryption keys to match.
575 /// At the end of this process, keys in both directions have been refreshed.
576 ///
577 /// Note that this process does not happen synchronously: this call just
578 /// arranges that the `key_update` message will be included in the next
579 /// `write_tls` output.
580 ///
581 /// This fails with `Error::HandshakeNotComplete` if called before the initial
582 /// handshake is complete, or if a version prior to TLS1.3 is negotiated.
583 ///
584 /// # Usage advice
585 /// Note that other implementations (including rustls) may enforce limits on
586 /// the number of `key_update` messages allowed on a given connection to prevent
587 /// denial of service. Therefore, this should be called sparingly.
588 ///
589 /// rustls implicitly and automatically refreshes traffic keys when needed
590 /// according to the selected cipher suite's cryptographic constraints. There
591 /// is therefore no need to call this manually to avoid cryptographic keys
592 /// "wearing out".
593 ///
594 /// The main reason to call this manually is to roll keys when it is known
595 /// a connection will be idle for a long period.
596 pub fn refresh_traffic_keys(&mut self) -> Result<(), Error> {
597 self.core.refresh_traffic_keys()
598 }
599}
600
601#[cfg(feature = "std")]
602impl<Side: SideData> ConnectionCommon<Side> {
603 /// Returns an object that allows reading plaintext.
604 pub fn reader(&mut self) -> Reader<'_> {
605 let common = &mut self.core.common_state;
606 Reader {
607 received_plaintext: &mut common.received_plaintext,
608 // Are we done? i.e., have we processed all received messages, and received a
609 // close_notify to indicate that no new messages will arrive?
610 has_received_close_notify: common.has_received_close_notify,
611 has_seen_eof: common.has_seen_eof,
612 }
613 }
614
615 /// Returns an object that allows writing plaintext.
616 pub fn writer(&mut self) -> Writer<'_> {
617 Writer::new(self)
618 }
619
620 /// This function uses `io` to complete any outstanding IO for
621 /// this connection.
622 ///
623 /// This is a convenience function which solely uses other parts
624 /// of the public API.
625 ///
626 /// What this means depends on the connection state:
627 ///
628 /// - If the connection [`is_handshaking`], then IO is performed until
629 /// the handshake is complete.
630 /// - Otherwise, if [`wants_write`] is true, [`write_tls`] is invoked
631 /// until it is all written.
632 /// - Otherwise, if [`wants_read`] is true, [`read_tls`] is invoked
633 /// once.
634 ///
635 /// The return value is the number of bytes read from and written
636 /// to `io`, respectively. Once both `read()` and `write()` yield `WouldBlock`,
637 /// this function will propagate the error.
638 ///
639 /// Errors from TLS record handling (i.e., from [`process_new_packets`])
640 /// are wrapped in an `io::ErrorKind::InvalidData`-kind error.
641 ///
642 /// [`is_handshaking`]: CommonState::is_handshaking
643 /// [`wants_read`]: CommonState::wants_read
644 /// [`wants_write`]: CommonState::wants_write
645 /// [`write_tls`]: ConnectionCommon::write_tls
646 /// [`read_tls`]: ConnectionCommon::read_tls
647 /// [`process_new_packets`]: ConnectionCommon::process_new_packets
648 pub fn complete_io(
649 &mut self,
650 io: &mut (impl io::Read + io::Write),
651 ) -> Result<(usize, usize), io::Error> {
652 let mut eof = false;
653 let mut wrlen = 0;
654 let mut rdlen = 0;
655 loop {
656 let (mut blocked_write, mut blocked_read) = (None, None);
657 let until_handshaked = self.is_handshaking();
658
659 if !self.wants_write() && !self.wants_read() {
660 // We will make no further progress.
661 return Ok((rdlen, wrlen));
662 }
663
664 while self.wants_write() {
665 match self.write_tls(io) {
666 Ok(0) => {
667 io.flush()?;
668 return Ok((rdlen, wrlen)); // EOF.
669 }
670 Ok(n) => wrlen += n,
671 Err(err) if err.kind() == io::ErrorKind::WouldBlock => {
672 blocked_write = Some(err);
673 break;
674 }
675 Err(err) => return Err(err),
676 }
677 }
678 if wrlen > 0 {
679 io.flush()?;
680 }
681
682 if !until_handshaked && wrlen > 0 {
683 return Ok((rdlen, wrlen));
684 }
685
686 // If we want to write, but are WouldBlocked by the underlying IO, *and*
687 // have no desire to read; that is everything.
688 if let (Some(_), false) = (&blocked_write, self.wants_read()) {
689 return match wrlen {
690 0 => Err(blocked_write.unwrap()),
691 _ => Ok((rdlen, wrlen)),
692 };
693 }
694
695 while !eof && self.wants_read() {
696 let read_size = match self.read_tls(io) {
697 Ok(0) => {
698 eof = true;
699 Some(0)
700 }
701 Ok(n) => {
702 rdlen += n;
703 Some(n)
704 }
705 Err(err) if err.kind() == io::ErrorKind::WouldBlock => {
706 blocked_read = Some(err);
707 break;
708 }
709 Err(err) if err.kind() == io::ErrorKind::Interrupted => None, // nothing to do
710 Err(err) => return Err(err),
711 };
712 if read_size.is_some() {
713 break;
714 }
715 }
716
717 if let Err(e) = self.process_new_packets() {
718 // In case we have an alert to send describing this error, try a last-gasp
719 // write -- but don't predate the primary error.
720 let _ignored = self.write_tls(io);
721 let _ignored = io.flush();
722 return Err(io::Error::new(io::ErrorKind::InvalidData, e));
723 };
724
725 // If we want to read, but are WouldBlocked by the underlying IO, *and*
726 // have no desire to write; that is everything.
727 if let (Some(_), false) = (&blocked_read, self.wants_write()) {
728 return match rdlen {
729 0 => Err(blocked_read.unwrap()),
730 _ => Ok((rdlen, wrlen)),
731 };
732 }
733
734 // if we're doing IO until handshaked, and we believe we've finished handshaking,
735 // but process_new_packets() has queued TLS data to send, loop around again to write
736 // the queued messages.
737 if until_handshaked && !self.is_handshaking() && self.wants_write() {
738 continue;
739 }
740
741 let blocked = blocked_write.zip(blocked_read);
742 match (eof, until_handshaked, self.is_handshaking(), blocked) {
743 (_, true, false, _) => return Ok((rdlen, wrlen)),
744 (_, _, _, Some((e, _))) if rdlen == 0 && wrlen == 0 => return Err(e),
745 (_, false, _, _) => return Ok((rdlen, wrlen)),
746 (true, true, true, _) => return Err(io::Error::from(io::ErrorKind::UnexpectedEof)),
747 _ => {}
748 }
749 }
750 }
751
752 /// Extract the first handshake message.
753 ///
754 /// This is a shortcut to the `process_new_packets()` -> `process_msg()` ->
755 /// `process_handshake_messages()` path, specialized for the first handshake message.
756 pub(crate) fn first_handshake_message(&mut self) -> Result<Option<Message<'static>>, Error> {
757 let mut buffer_progress = self.core.hs_deframer.progress();
758
759 let res = self
760 .core
761 .deframe(self.deframer_buffer.filled_mut(), &mut buffer_progress)
762 .map(|opt| opt.map(|pm| Message::try_from(&pm).map(|m| m.into_owned())));
763
764 match res? {
765 Some(Ok(msg)) => {
766 self.deframer_buffer
767 .discard(buffer_progress.take_discard());
768 self.core.common_state.aligned_handshake = self.core.hs_deframer.aligned();
769 Ok(Some(msg))
770 }
771 Some(Err(err)) => Err(err.into()),
772 None => Ok(None),
773 }
774 }
775
776 pub(crate) fn replace_state(&mut self, new: Box<dyn State<Side>>) {
777 self.core.state = Ok(new);
778 }
779
780 /// Read TLS content from `rd` into the internal buffer.
781 ///
782 /// Due to the internal buffering, `rd` can supply TLS messages in arbitrary-sized chunks (like
783 /// a socket or pipe might).
784 ///
785 /// You should call [`process_new_packets()`] each time a call to this function succeeds in order
786 /// to empty the incoming TLS data buffer.
787 ///
788 /// This function returns `Ok(0)` when the underlying `rd` does so. This typically happens when
789 /// a socket is cleanly closed, or a file is at EOF. Errors may result from the IO done through
790 /// `rd`; additionally, errors of `ErrorKind::Other` are emitted to signal backpressure:
791 ///
792 /// * In order to empty the incoming TLS data buffer, you should call [`process_new_packets()`]
793 /// each time a call to this function succeeds.
794 /// * In order to empty the incoming plaintext data buffer, you should empty it through
795 /// the [`reader()`] after the call to [`process_new_packets()`].
796 ///
797 /// This function also returns `Ok(0)` once a `close_notify` alert has been successfully
798 /// received. No additional data is ever read in this state.
799 ///
800 /// [`process_new_packets()`]: ConnectionCommon::process_new_packets
801 /// [`reader()`]: ConnectionCommon::reader
802 pub fn read_tls(&mut self, rd: &mut dyn io::Read) -> Result<usize, io::Error> {
803 if self.received_plaintext.is_full() {
804 return Err(io::Error::other("received plaintext buffer full"));
805 }
806
807 if self.has_received_close_notify {
808 return Ok(0);
809 }
810
811 let res = self
812 .deframer_buffer
813 .read(rd, self.core.hs_deframer.is_active());
814 if let Ok(0) = res {
815 self.has_seen_eof = true;
816 }
817 res
818 }
819
820 /// Writes TLS messages to `wr`.
821 ///
822 /// On success, this function returns `Ok(n)` where `n` is a number of bytes written to `wr`
823 /// (after encoding and encryption).
824 ///
825 /// After this function returns, the connection buffer may not yet be fully flushed. The
826 /// [`CommonState::wants_write`] function can be used to check if the output buffer is empty.
827 pub fn write_tls(&mut self, wr: &mut dyn io::Write) -> Result<usize, io::Error> {
828 self.sendable_tls.write_to(wr)
829 }
830}
831
832impl<Side: SideData> Deref for ConnectionCommon<Side> {
833 type Target = CommonState;
834
835 fn deref(&self) -> &Self::Target {
836 &self.core.common_state
837 }
838}
839
840impl<Side: SideData> DerefMut for ConnectionCommon<Side> {
841 fn deref_mut(&mut self) -> &mut Self::Target {
842 &mut self.core.common_state
843 }
844}
845
846impl<Side: SideData> From<ConnectionCore<Side>> for ConnectionCommon<Side> {
847 fn from(core: ConnectionCore<Side>) -> Self {
848 Self {
849 core,
850 deframer_buffer: DeframerVecBuffer::default(),
851 sendable_plaintext: ChunkVecBuffer::new(Some(DEFAULT_BUFFER_LIMIT)),
852 }
853 }
854}
855
856/// Interface shared by unbuffered client and server connections.
857pub struct UnbufferedConnectionCommon<Side: SideData> {
858 pub(crate) core: ConnectionCore<Side>,
859 wants_write: bool,
860 emitted_peer_closed_state: bool,
861}
862
863impl<Side: SideData> From<ConnectionCore<Side>> for UnbufferedConnectionCommon<Side> {
864 fn from(core: ConnectionCore<Side>) -> Self {
865 Self {
866 core,
867 wants_write: false,
868 emitted_peer_closed_state: false,
869 }
870 }
871}
872
873impl<Side: SideData> UnbufferedConnectionCommon<Side> {
874 /// Extract secrets, so they can be used when configuring kTLS, for example.
875 /// Should be used with care as it exposes secret key material.
876 pub fn dangerous_extract_secrets(self) -> Result<ExtractedSecrets, Error> {
877 self.core.dangerous_extract_secrets()
878 }
879}
880
881impl<Side: SideData> Deref for UnbufferedConnectionCommon<Side> {
882 type Target = CommonState;
883
884 fn deref(&self) -> &Self::Target {
885 &self.core.common_state
886 }
887}
888
889pub(crate) struct ConnectionCore<Side: SideData> {
890 pub(crate) state: Result<Box<dyn State<Side>>, Error>,
891 pub(crate) side: Side,
892 pub(crate) common_state: CommonState,
893 pub(crate) hs_deframer: HandshakeDeframer,
894
895 /// We limit consecutive empty fragments to avoid a route for the peer to send
896 /// us significant but fruitless traffic.
897 seen_consecutive_empty_fragments: u8,
898}
899
900impl<Side: SideData> ConnectionCore<Side> {
901 pub(crate) fn new(state: Box<dyn State<Side>>, side: Side, common_state: CommonState) -> Self {
902 Self {
903 state: Ok(state),
904 side,
905 common_state,
906 hs_deframer: HandshakeDeframer::default(),
907 seen_consecutive_empty_fragments: 0,
908 }
909 }
910
911 pub(crate) fn process_new_packets(
912 &mut self,
913 deframer_buffer: &mut DeframerVecBuffer,
914 sendable_plaintext: &mut ChunkVecBuffer,
915 ) -> Result<IoState, Error> {
916 let mut state = match mem::replace(&mut self.state, Err(Error::HandshakeNotComplete)) {
917 Ok(state) => state,
918 Err(e) => {
919 self.state = Err(e.clone());
920 return Err(e);
921 }
922 };
923
924 // Should `EncodedMessage<Payload>` resolve to plaintext application
925 // data it will be allocated within `plaintext` and written to
926 // `CommonState.received_plaintext` buffer.
927 //
928 // TODO `CommonState.received_plaintext` should be hoisted into
929 // `ConnectionCommon`
930 let mut plaintext = None;
931 let mut buffer_progress = self.hs_deframer.progress();
932
933 loop {
934 let buffer = deframer_buffer.filled_mut();
935 let locator = Locator::new(buffer);
936 let res = self.deframe(buffer, &mut buffer_progress);
937
938 let opt_msg = match res {
939 Ok(opt_msg) => opt_msg,
940 Err(e) => {
941 self.common_state
942 .maybe_send_fatal_alert(&e);
943 if let Error::DecryptError = e {
944 state.handle_decrypt_error();
945 }
946 self.state = Err(e.clone());
947 deframer_buffer.discard(buffer_progress.take_discard());
948 return Err(e);
949 }
950 };
951
952 let Some(msg) = opt_msg else {
953 break;
954 };
955
956 match self.common_state.process_main_protocol(
957 msg,
958 state,
959 &mut self.side,
960 &locator,
961 &mut plaintext,
962 Some(sendable_plaintext),
963 ) {
964 Ok(new) => state = new,
965 Err(e) => {
966 self.common_state
967 .maybe_send_fatal_alert(&e);
968 self.state = Err(e.clone());
969 deframer_buffer.discard(buffer_progress.take_discard());
970 return Err(e);
971 }
972 }
973
974 if self
975 .common_state
976 .has_received_close_notify
977 {
978 // "Any data received after a closure alert has been received MUST be ignored."
979 // -- <https://datatracker.ietf.org/doc/html/rfc8446#section-6.1>
980 // This is data that has already been accepted in `read_tls`.
981 buffer_progress.add_discard(deframer_buffer.filled().len());
982 break;
983 }
984
985 if let Some(payload) = plaintext.take() {
986 let payload = payload.reborrow(&Delocator::new(buffer));
987 self.common_state
988 .received_plaintext
989 .append(payload.into_vec());
990 }
991
992 deframer_buffer.discard(buffer_progress.take_discard());
993 }
994
995 deframer_buffer.discard(buffer_progress.take_discard());
996 self.state = Ok(state);
997 Ok(self.common_state.current_io_state())
998 }
999
1000 /// Pull a message out of the deframer and send any messages that need to be sent as a result.
1001 fn deframe<'b>(
1002 &mut self,
1003 buffer: &'b mut [u8],
1004 buffer_progress: &mut BufferProgress,
1005 ) -> Result<Option<EncodedMessage<&'b [u8]>>, Error> {
1006 // before processing any more of `buffer`, return any extant messages from `hs_deframer`
1007 if self.hs_deframer.has_message_ready() {
1008 Ok(self.take_handshake_message(buffer, buffer_progress))
1009 } else {
1010 self.process_more_input(buffer, buffer_progress)
1011 }
1012 }
1013
1014 fn take_handshake_message<'b>(
1015 &mut self,
1016 buffer: &'b mut [u8],
1017 buffer_progress: &mut BufferProgress,
1018 ) -> Option<EncodedMessage<&'b [u8]>> {
1019 self.hs_deframer
1020 .iter(buffer)
1021 .next()
1022 .map(|(message, discard)| {
1023 buffer_progress.add_discard(discard);
1024 message
1025 })
1026 }
1027
1028 fn process_more_input<'b>(
1029 &mut self,
1030 buffer: &'b mut [u8],
1031 buffer_progress: &mut BufferProgress,
1032 ) -> Result<Option<EncodedMessage<&'b [u8]>>, Error> {
1033 let version_is_tls13 = matches!(
1034 self.common_state.negotiated_version,
1035 Some(ProtocolVersion::TLSv1_3)
1036 );
1037
1038 let locator = Locator::new(buffer);
1039
1040 loop {
1041 let mut iter = DeframerIter::new(&mut buffer[buffer_progress.processed()..]);
1042
1043 let (message, processed) = loop {
1044 let message = match iter.next().transpose() {
1045 Ok(Some(message)) => message,
1046 Ok(None) => return Ok(None),
1047 Err(err) => return Err(err),
1048 };
1049
1050 let allowed_plaintext = match message.typ {
1051 // CCS messages are always plaintext.
1052 ContentType::ChangeCipherSpec => true,
1053 // Alerts are allowed to be plaintext if-and-only-if:
1054 // * The negotiated protocol version is TLS 1.3. - In TLS 1.2 it is unambiguous when
1055 // keying changes based on the CCS message. Only TLS 1.3 requires these heuristics.
1056 // * We have not yet decrypted any messages from the peer - if we have we don't
1057 // expect any plaintext.
1058 // * The payload size is indicative of a plaintext alert message.
1059 ContentType::Alert
1060 if version_is_tls13
1061 && !self
1062 .common_state
1063 .record_layer
1064 .has_decrypted()
1065 && message.payload.len() <= 2 =>
1066 {
1067 true
1068 }
1069 // In other circumstances, we expect all messages to be encrypted.
1070 _ => false,
1071 };
1072
1073 if allowed_plaintext && !self.hs_deframer.is_active() {
1074 break (message.into_plain_message(), iter.bytes_consumed());
1075 }
1076
1077 let message = match self
1078 .common_state
1079 .record_layer
1080 .decrypt_incoming(message)
1081 {
1082 // failed decryption during trial decryption is not allowed to be
1083 // interleaved with partial handshake data.
1084 Ok(None) if self.hs_deframer.aligned().is_none() => {
1085 return Err(
1086 PeerMisbehaved::RejectedEarlyDataInterleavedWithHandshakeMessage.into(),
1087 );
1088 }
1089
1090 // failed decryption during trial decryption.
1091 Ok(None) => continue,
1092
1093 Ok(Some(message)) => message,
1094
1095 Err(err) => return Err(err),
1096 };
1097
1098 let Decrypted {
1099 want_close_before_decrypt,
1100 plaintext,
1101 } = message;
1102
1103 if want_close_before_decrypt {
1104 self.common_state.send_close_notify();
1105 }
1106
1107 break (plaintext, iter.bytes_consumed());
1108 };
1109
1110 if self.hs_deframer.aligned().is_none() && message.typ != ContentType::Handshake {
1111 // "Handshake messages MUST NOT be interleaved with other record
1112 // types. That is, if a handshake message is split over two or more
1113 // records, there MUST NOT be any other records between them."
1114 // https://www.rfc-editor.org/rfc/rfc8446#section-5.1
1115 return Err(PeerMisbehaved::MessageInterleavedWithHandshakeMessage.into());
1116 }
1117
1118 match message.payload.len() {
1119 0 => {
1120 if self.seen_consecutive_empty_fragments
1121 == ALLOWED_CONSECUTIVE_EMPTY_FRAGMENTS_MAX
1122 {
1123 return Err(PeerMisbehaved::TooManyEmptyFragments.into());
1124 }
1125 self.seen_consecutive_empty_fragments += 1;
1126 }
1127 _ => {
1128 self.seen_consecutive_empty_fragments = 0;
1129 }
1130 };
1131
1132 buffer_progress.add_processed(processed);
1133
1134 // do an end-run around the borrow checker, converting `message` (containing
1135 // a borrowed slice) to an unborrowed one (containing a `Range` into the
1136 // same buffer). the reborrow happens inside the branch that returns the
1137 // message.
1138 //
1139 // is fixed by -Zpolonius
1140 // https://github.com/rust-lang/rfcs/blob/master/text/2094-nll.md#problem-case-3-conditional-control-flow-across-functions
1141 let unborrowed = InboundUnborrowedMessage::unborrow(&locator, message);
1142
1143 if unborrowed.typ != ContentType::Handshake {
1144 let message = unborrowed.reborrow(&Delocator::new(buffer));
1145 buffer_progress.add_discard(processed);
1146 return Ok(Some(message));
1147 }
1148
1149 let message = unborrowed.reborrow(&Delocator::new(buffer));
1150 self.hs_deframer
1151 .input_message(message, &locator, buffer_progress.processed());
1152 self.hs_deframer.coalesce(buffer)?;
1153
1154 self.common_state.aligned_handshake = self.hs_deframer.aligned();
1155
1156 if self.hs_deframer.has_message_ready() {
1157 // trial decryption finishes with the first handshake message after it started.
1158 self.common_state
1159 .record_layer
1160 .finish_trial_decryption();
1161
1162 return Ok(self.take_handshake_message(buffer, buffer_progress));
1163 }
1164 }
1165 }
1166
1167 pub(crate) fn dangerous_extract_secrets(self) -> Result<ExtractedSecrets, Error> {
1168 Ok(self
1169 .dangerous_into_kernel_connection()?
1170 .0)
1171 }
1172
1173 pub(crate) fn dangerous_into_kernel_connection(
1174 self,
1175 ) -> Result<(ExtractedSecrets, KernelConnection<Side>), Error> {
1176 if !self
1177 .common_state
1178 .enable_secret_extraction
1179 {
1180 return Err(ApiMisuse::SecretExtractionRequiresPriorOptIn.into());
1181 }
1182
1183 if self.common_state.is_handshaking() {
1184 return Err(Error::HandshakeNotComplete);
1185 }
1186
1187 if !self
1188 .common_state
1189 .sendable_tls
1190 .is_empty()
1191 {
1192 return Err(ApiMisuse::SecretExtractionWithPendingSendableData.into());
1193 }
1194
1195 let state = self.state?;
1196
1197 let record_layer = &self.common_state.record_layer;
1198
1199 let (secrets, state) = state.into_external_state()?;
1200 let secrets = ExtractedSecrets {
1201 tx: (record_layer.write_seq(), secrets.tx),
1202 rx: (record_layer.read_seq(), secrets.rx),
1203 };
1204 let external = KernelConnection::new(state, self.common_state)?;
1205
1206 Ok((secrets, external))
1207 }
1208
1209 pub(crate) fn exporter(&mut self) -> Result<KeyingMaterialExporter, Error> {
1210 match self.common_state.exporter.take() {
1211 Some(inner) => Ok(KeyingMaterialExporter { inner }),
1212 None if self.common_state.is_handshaking() => Err(Error::HandshakeNotComplete),
1213 None => Err(ApiMisuse::ExporterAlreadyUsed.into()),
1214 }
1215 }
1216
1217 #[cfg(feature = "std")]
1218 pub(crate) fn early_exporter(&mut self) -> Result<KeyingMaterialExporter, Error> {
1219 match self.common_state.early_exporter.take() {
1220 Some(inner) => Ok(KeyingMaterialExporter { inner }),
1221 None => Err(ApiMisuse::ExporterAlreadyUsed.into()),
1222 }
1223 }
1224
1225 /// Trigger a `refresh_traffic_keys` if required by `CommonState`.
1226 fn maybe_refresh_traffic_keys(&mut self) {
1227 if mem::take(
1228 &mut self
1229 .common_state
1230 .refresh_traffic_keys_pending,
1231 ) {
1232 let _ = self.refresh_traffic_keys();
1233 }
1234 }
1235
1236 fn refresh_traffic_keys(&mut self) -> Result<(), Error> {
1237 match &mut self.state {
1238 Ok(st) => st.send_key_update_request(&mut self.common_state),
1239 Err(e) => Err(e.clone()),
1240 }
1241 }
1242}
1243
1244/// Data specific to the peer's side (client or server).
1245pub trait SideData: Debug {}
1246
1247/// An [`EncodedMessage<Payload<'_>>`] which does not borrow its payload, but
1248/// references a range that can later be borrowed.
1249struct InboundUnborrowedMessage {
1250 typ: ContentType,
1251 version: ProtocolVersion,
1252 bounds: Range<usize>,
1253}
1254
1255impl InboundUnborrowedMessage {
1256 fn unborrow(locator: &Locator, msg: EncodedMessage<&'_ [u8]>) -> Self {
1257 Self {
1258 typ: msg.typ,
1259 version: msg.version,
1260 bounds: locator.locate(msg.payload),
1261 }
1262 }
1263
1264 fn reborrow<'b>(self, delocator: &Delocator<'b>) -> EncodedMessage<&'b [u8]> {
1265 EncodedMessage {
1266 typ: self.typ,
1267 version: self.version,
1268 payload: delocator.slice_from_range(&self.bounds),
1269 }
1270 }
1271}
1272
1273/// cf. BoringSSL's `kMaxEmptyRecords`
1274/// <https://github.com/google/boringssl/blob/dec5989b793c56ad4dd32173bd2d8595ca78b398/ssl/tls_record.cc#L124-L128>
1275const ALLOWED_CONSECUTIVE_EMPTY_FRAGMENTS_MAX: u8 = 32;