rustls/conn.rs
1use alloc::boxed::Box;
2use core::fmt::Debug;
3use core::mem;
4use core::ops::{Deref, DerefMut, Range};
5#[cfg(feature = "std")]
6use std::io;
7
8use kernel::KernelConnection;
9
10use crate::common_state::{CommonState, Context, DEFAULT_BUFFER_LIMIT, IoState, State};
11use crate::enums::{AlertDescription, ContentType, ProtocolVersion};
12use crate::error::{Error, PeerMisbehaved};
13use crate::log::trace;
14use crate::msgs::deframer::DeframerIter;
15use crate::msgs::deframer::buffers::{BufferProgress, DeframerVecBuffer, Delocator, Locator};
16use crate::msgs::deframer::handshake::HandshakeDeframer;
17use crate::msgs::handshake::Random;
18use crate::msgs::message::{InboundPlainMessage, Message, MessagePayload};
19use crate::record_layer::Decrypted;
20use crate::suites::ExtractedSecrets;
21use crate::vecbuf::ChunkVecBuffer;
22
23// pub so that it can be re-exported from the crate root
24pub mod kernel;
25pub(crate) mod unbuffered;
26
27#[cfg(feature = "std")]
28mod connection {
29 use alloc::vec::Vec;
30 use core::fmt::Debug;
31 use core::ops::{Deref, DerefMut};
32 use std::io::{self, BufRead, Read};
33
34 use crate::ConnectionCommon;
35 use crate::common_state::{CommonState, IoState};
36 use crate::error::Error;
37 use crate::msgs::message::OutboundChunks;
38 use crate::suites::ExtractedSecrets;
39 use crate::vecbuf::ChunkVecBuffer;
40
41 /// A client or server connection.
42 #[allow(clippy::exhaustive_enums)]
43 #[derive(Debug)]
44 pub enum Connection {
45 /// A client connection
46 Client(crate::client::ClientConnection),
47 /// A server connection
48 Server(crate::server::ServerConnection),
49 }
50
51 impl Connection {
52 /// Read TLS content from `rd`.
53 ///
54 /// See [`ConnectionCommon::read_tls()`] for more information.
55 pub fn read_tls(&mut self, rd: &mut dyn Read) -> Result<usize, io::Error> {
56 match self {
57 Self::Client(conn) => conn.read_tls(rd),
58 Self::Server(conn) => conn.read_tls(rd),
59 }
60 }
61
62 /// Writes TLS messages to `wr`.
63 ///
64 /// See [`ConnectionCommon::write_tls()`] for more information.
65 pub fn write_tls(&mut self, wr: &mut dyn io::Write) -> Result<usize, io::Error> {
66 self.sendable_tls.write_to(wr)
67 }
68
69 /// Returns an object that allows reading plaintext.
70 pub fn reader(&mut self) -> Reader<'_> {
71 match self {
72 Self::Client(conn) => conn.reader(),
73 Self::Server(conn) => conn.reader(),
74 }
75 }
76
77 /// Returns an object that allows writing plaintext.
78 pub fn writer(&mut self) -> Writer<'_> {
79 match self {
80 Self::Client(conn) => Writer::new(&mut **conn),
81 Self::Server(conn) => Writer::new(&mut **conn),
82 }
83 }
84
85 /// Processes any new packets read by a previous call to [`Connection::read_tls`].
86 ///
87 /// See [`ConnectionCommon::process_new_packets()`] for more information.
88 pub fn process_new_packets(&mut self) -> Result<IoState, Error> {
89 match self {
90 Self::Client(conn) => conn.process_new_packets(),
91 Self::Server(conn) => conn.process_new_packets(),
92 }
93 }
94
95 /// Derives key material from the agreed connection secrets.
96 ///
97 /// See [`ConnectionCommon::export_keying_material()`] for more information.
98 pub fn export_keying_material<T: AsMut<[u8]>>(
99 &self,
100 output: T,
101 label: &[u8],
102 context: Option<&[u8]>,
103 ) -> Result<T, Error> {
104 match self {
105 Self::Client(conn) => conn.export_keying_material(output, label, context),
106 Self::Server(conn) => conn.export_keying_material(output, label, context),
107 }
108 }
109
110 /// This function uses `io` to complete any outstanding IO for this connection.
111 ///
112 /// See [`ConnectionCommon::complete_io()`] for more information.
113 pub fn complete_io(
114 &mut self,
115 io: &mut (impl Read + io::Write),
116 ) -> Result<(usize, usize), io::Error> {
117 match self {
118 Self::Client(conn) => conn.complete_io(io),
119 Self::Server(conn) => conn.complete_io(io),
120 }
121 }
122
123 /// Extract secrets, so they can be used when configuring kTLS, for example.
124 /// Should be used with care as it exposes secret key material.
125 pub fn dangerous_extract_secrets(self) -> Result<ExtractedSecrets, Error> {
126 match self {
127 Self::Client(client) => client.dangerous_extract_secrets(),
128 Self::Server(server) => server.dangerous_extract_secrets(),
129 }
130 }
131
132 /// Sets a limit on the internal buffers
133 ///
134 /// See [`ConnectionCommon::set_buffer_limit()`] for more information.
135 pub fn set_buffer_limit(&mut self, limit: Option<usize>) {
136 match self {
137 Self::Client(client) => client.set_buffer_limit(limit),
138 Self::Server(server) => server.set_buffer_limit(limit),
139 }
140 }
141
142 /// Sends a TLS1.3 `key_update` message to refresh a connection's keys
143 ///
144 /// See [`ConnectionCommon::refresh_traffic_keys()`] for more information.
145 pub fn refresh_traffic_keys(&mut self) -> Result<(), Error> {
146 match self {
147 Self::Client(client) => client.refresh_traffic_keys(),
148 Self::Server(server) => server.refresh_traffic_keys(),
149 }
150 }
151 }
152
153 impl Deref for Connection {
154 type Target = CommonState;
155
156 fn deref(&self) -> &Self::Target {
157 match self {
158 Self::Client(conn) => &conn.core.common_state,
159 Self::Server(conn) => &conn.core.common_state,
160 }
161 }
162 }
163
164 impl DerefMut for Connection {
165 fn deref_mut(&mut self) -> &mut Self::Target {
166 match self {
167 Self::Client(conn) => &mut conn.core.common_state,
168 Self::Server(conn) => &mut conn.core.common_state,
169 }
170 }
171 }
172
173 /// A structure that implements [`std::io::Read`] for reading plaintext.
174 pub struct Reader<'a> {
175 pub(super) received_plaintext: &'a mut ChunkVecBuffer,
176 pub(super) has_received_close_notify: bool,
177 pub(super) has_seen_eof: bool,
178 }
179
180 impl<'a> Reader<'a> {
181 /// Check the connection's state if no bytes are available for reading.
182 fn check_no_bytes_state(&self) -> io::Result<()> {
183 match (self.has_received_close_notify, self.has_seen_eof) {
184 // cleanly closed; don't care about TCP EOF: express this as Ok(0)
185 (true, _) => Ok(()),
186 // unclean closure
187 (false, true) => Err(io::Error::new(
188 io::ErrorKind::UnexpectedEof,
189 UNEXPECTED_EOF_MESSAGE,
190 )),
191 // connection still going, but needs more data: signal `WouldBlock` so that
192 // the caller knows this
193 (false, false) => Err(io::ErrorKind::WouldBlock.into()),
194 }
195 }
196
197 /// Obtain a chunk of plaintext data received from the peer over this TLS connection.
198 ///
199 /// This method consumes `self` so that it can return a slice whose lifetime is bounded by
200 /// the [`ConnectionCommon`] that created this `Reader`.
201 pub fn into_first_chunk(self) -> io::Result<&'a [u8]> {
202 match self.received_plaintext.chunk() {
203 Some(chunk) => Ok(chunk),
204 None => {
205 self.check_no_bytes_state()?;
206 Ok(&[])
207 }
208 }
209 }
210 }
211
212 impl Read for Reader<'_> {
213 /// Obtain plaintext data received from the peer over this TLS connection.
214 ///
215 /// If the peer closes the TLS session cleanly, this returns `Ok(0)` once all
216 /// the pending data has been read. No further data can be received on that
217 /// connection, so the underlying TCP connection should be half-closed too.
218 ///
219 /// If the peer closes the TLS session uncleanly (a TCP EOF without sending a
220 /// `close_notify` alert) this function returns a `std::io::Error` of type
221 /// `ErrorKind::UnexpectedEof` once any pending data has been read.
222 ///
223 /// Note that support for `close_notify` varies in peer TLS libraries: many do not
224 /// support it and uncleanly close the TCP connection (this might be
225 /// vulnerable to truncation attacks depending on the application protocol).
226 /// This means applications using rustls must both handle EOF
227 /// from this function, *and* unexpected EOF of the underlying TCP connection.
228 ///
229 /// If there are no bytes to read, this returns `Err(ErrorKind::WouldBlock.into())`.
230 ///
231 /// You may learn the number of bytes available at any time by inspecting
232 /// the return of [`Connection::process_new_packets`].
233 fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
234 let len = self.received_plaintext.read(buf)?;
235 if len > 0 || buf.is_empty() {
236 return Ok(len);
237 }
238
239 self.check_no_bytes_state()
240 .map(|()| len)
241 }
242 }
243
244 impl BufRead for Reader<'_> {
245 /// Obtain a chunk of plaintext data received from the peer over this TLS connection.
246 /// This reads the same data as [`Reader::read()`], but returns a reference instead of
247 /// copying the data.
248 ///
249 /// The caller should call [`Reader::consume()`] afterward to advance the buffer.
250 ///
251 /// See [`Reader::into_first_chunk()`] for a version of this function that returns a
252 /// buffer with a longer lifetime.
253 fn fill_buf(&mut self) -> io::Result<&[u8]> {
254 Reader {
255 // reborrow
256 received_plaintext: self.received_plaintext,
257 ..*self
258 }
259 .into_first_chunk()
260 }
261
262 fn consume(&mut self, amt: usize) {
263 self.received_plaintext
264 .consume_first_chunk(amt)
265 }
266 }
267
268 const UNEXPECTED_EOF_MESSAGE: &str = "peer closed connection without sending TLS close_notify: \
269https://docs.rs/rustls/latest/rustls/manual/_03_howto/index.html#unexpected-eof";
270
271 /// A structure that implements [`std::io::Write`] for writing plaintext.
272 pub struct Writer<'a> {
273 sink: &'a mut dyn PlaintextSink,
274 }
275
276 impl<'a> Writer<'a> {
277 /// Create a new Writer.
278 ///
279 /// This is not an external interface. Get one of these objects
280 /// from [`Connection::writer`].
281 pub(crate) fn new(sink: &'a mut dyn PlaintextSink) -> Self {
282 Writer { sink }
283 }
284 }
285
286 impl io::Write for Writer<'_> {
287 /// Send the plaintext `buf` to the peer, encrypting
288 /// and authenticating it. Once this function succeeds
289 /// you should call [`Connection::write_tls`] which will output the
290 /// corresponding TLS records.
291 ///
292 /// This function buffers plaintext sent before the
293 /// TLS handshake completes, and sends it as soon
294 /// as it can. See [`ConnectionCommon::set_buffer_limit`] to control
295 /// the size of this buffer.
296 fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
297 self.sink.write(buf)
298 }
299
300 fn write_vectored(&mut self, bufs: &[io::IoSlice<'_>]) -> io::Result<usize> {
301 self.sink.write_vectored(bufs)
302 }
303
304 fn flush(&mut self) -> io::Result<()> {
305 self.sink.flush()
306 }
307 }
308
309 /// Internal trait implemented by the [`ServerConnection`]/[`ClientConnection`]
310 /// allowing them to be the subject of a [`Writer`].
311 ///
312 /// [`ServerConnection`]: crate::ServerConnection
313 /// [`ClientConnection`]: crate::ClientConnection
314 pub(crate) trait PlaintextSink {
315 fn write(&mut self, buf: &[u8]) -> io::Result<usize>;
316 fn write_vectored(&mut self, bufs: &[io::IoSlice<'_>]) -> io::Result<usize>;
317 fn flush(&mut self) -> io::Result<()>;
318 }
319
320 impl<T> PlaintextSink for ConnectionCommon<T> {
321 fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
322 let len = self
323 .core
324 .common_state
325 .buffer_plaintext(buf.into(), &mut self.sendable_plaintext);
326 self.core.maybe_refresh_traffic_keys();
327 Ok(len)
328 }
329
330 fn write_vectored(&mut self, bufs: &[io::IoSlice<'_>]) -> io::Result<usize> {
331 let payload_owner: Vec<&[u8]>;
332 let payload = match bufs.len() {
333 0 => return Ok(0),
334 1 => OutboundChunks::Single(bufs[0].deref()),
335 _ => {
336 payload_owner = bufs
337 .iter()
338 .map(|io_slice| io_slice.deref())
339 .collect();
340
341 OutboundChunks::new(&payload_owner)
342 }
343 };
344 let len = self
345 .core
346 .common_state
347 .buffer_plaintext(payload, &mut self.sendable_plaintext);
348 self.core.maybe_refresh_traffic_keys();
349 Ok(len)
350 }
351
352 fn flush(&mut self) -> io::Result<()> {
353 Ok(())
354 }
355 }
356}
357
358#[cfg(feature = "std")]
359pub use connection::{Connection, Reader, Writer};
360
361#[derive(Debug)]
362pub(crate) struct ConnectionRandoms {
363 pub(crate) client: [u8; 32],
364 pub(crate) server: [u8; 32],
365}
366
367impl ConnectionRandoms {
368 pub(crate) fn new(client: Random, server: Random) -> Self {
369 Self {
370 client: client.0,
371 server: server.0,
372 }
373 }
374}
375
376/// Interface shared by client and server connections.
377pub struct ConnectionCommon<Data> {
378 pub(crate) core: ConnectionCore<Data>,
379 deframer_buffer: DeframerVecBuffer,
380 sendable_plaintext: ChunkVecBuffer,
381}
382
383impl<Data> ConnectionCommon<Data> {
384 /// Processes any new packets read by a previous call to
385 /// [`Connection::read_tls`].
386 ///
387 /// Errors from this function relate to TLS protocol errors, and
388 /// are fatal to the connection. Future calls after an error will do
389 /// no new work and will return the same error. After an error is
390 /// received from [`process_new_packets`], you should not call [`read_tls`]
391 /// any more (it will fill up buffers to no purpose). However, you
392 /// may call the other methods on the connection, including `write`,
393 /// `send_close_notify`, and `write_tls`. Most likely you will want to
394 /// call `write_tls` to send any alerts queued by the error and then
395 /// close the underlying connection.
396 ///
397 /// Success from this function comes with some sundry state data
398 /// about the connection.
399 ///
400 /// [`read_tls`]: Connection::read_tls
401 /// [`process_new_packets`]: Connection::process_new_packets
402 #[inline]
403 pub fn process_new_packets(&mut self) -> Result<IoState, Error> {
404 self.core
405 .process_new_packets(&mut self.deframer_buffer, &mut self.sendable_plaintext)
406 }
407
408 /// Derives key material from the agreed connection secrets.
409 ///
410 /// This function fills in `output` with `output.len()` bytes of key
411 /// material derived from the master session secret using `label`
412 /// and `context` for diversification. Ownership of the buffer is taken
413 /// by the function and returned via the Ok result to ensure no key
414 /// material leaks if the function fails.
415 ///
416 /// See RFC5705 for more details on what this does and is for.
417 ///
418 /// For TLS1.3 connections, this function does not use the
419 /// "early" exporter at any point.
420 ///
421 /// This function fails if called prior to the handshake completing;
422 /// check with [`CommonState::is_handshaking`] first.
423 ///
424 /// This function fails if `output.len()` is zero.
425 #[inline]
426 pub fn export_keying_material<T: AsMut<[u8]>>(
427 &self,
428 output: T,
429 label: &[u8],
430 context: Option<&[u8]>,
431 ) -> Result<T, Error> {
432 self.core
433 .export_keying_material(output, label, context)
434 }
435
436 /// Extract secrets, so they can be used when configuring kTLS, for example.
437 /// Should be used with care as it exposes secret key material.
438 pub fn dangerous_extract_secrets(self) -> Result<ExtractedSecrets, Error> {
439 self.core.dangerous_extract_secrets()
440 }
441
442 /// Sets a limit on the internal buffers used to buffer
443 /// unsent plaintext (prior to completing the TLS handshake)
444 /// and unsent TLS records. This limit acts only on application
445 /// data written through [`Connection::writer`].
446 ///
447 /// By default the limit is 64KB. The limit can be set
448 /// at any time, even if the current buffer use is higher.
449 ///
450 /// [`None`] means no limit applies, and will mean that written
451 /// data is buffered without bound -- it is up to the application
452 /// to appropriately schedule its plaintext and TLS writes to bound
453 /// memory usage.
454 ///
455 /// For illustration: `Some(1)` means a limit of one byte applies:
456 /// [`Connection::writer`] will accept only one byte, encrypt it and
457 /// add a TLS header. Once this is sent via [`Connection::write_tls`],
458 /// another byte may be sent.
459 ///
460 /// # Internal write-direction buffering
461 /// rustls has two buffers whose size are bounded by this setting:
462 ///
463 /// ## Buffering of unsent plaintext data prior to handshake completion
464 ///
465 /// Calls to [`Connection::writer`] before or during the handshake
466 /// are buffered (up to the limit specified here). Once the
467 /// handshake completes this data is encrypted and the resulting
468 /// TLS records are added to the outgoing buffer.
469 ///
470 /// ## Buffering of outgoing TLS records
471 ///
472 /// This buffer is used to store TLS records that rustls needs to
473 /// send to the peer. It is used in these two circumstances:
474 ///
475 /// - by [`Connection::process_new_packets`] when a handshake or alert
476 /// TLS record needs to be sent.
477 /// - by [`Connection::writer`] post-handshake: the plaintext is
478 /// encrypted and the resulting TLS record is buffered.
479 ///
480 /// This buffer is emptied by [`Connection::write_tls`].
481 ///
482 /// [`Connection::writer`]: crate::Connection::writer
483 /// [`Connection::write_tls`]: crate::Connection::write_tls
484 /// [`Connection::process_new_packets`]: crate::Connection::process_new_packets
485 pub fn set_buffer_limit(&mut self, limit: Option<usize>) {
486 self.sendable_plaintext.set_limit(limit);
487 self.sendable_tls.set_limit(limit);
488 }
489
490 /// Sends a TLS1.3 `key_update` message to refresh a connection's keys.
491 ///
492 /// This call refreshes our encryption keys. Once the peer receives the message,
493 /// it refreshes _its_ encryption and decryption keys and sends a response.
494 /// Once we receive that response, we refresh our decryption keys to match.
495 /// At the end of this process, keys in both directions have been refreshed.
496 ///
497 /// Note that this process does not happen synchronously: this call just
498 /// arranges that the `key_update` message will be included in the next
499 /// `write_tls` output.
500 ///
501 /// This fails with `Error::HandshakeNotComplete` if called before the initial
502 /// handshake is complete, or if a version prior to TLS1.3 is negotiated.
503 ///
504 /// # Usage advice
505 /// Note that other implementations (including rustls) may enforce limits on
506 /// the number of `key_update` messages allowed on a given connection to prevent
507 /// denial of service. Therefore, this should be called sparingly.
508 ///
509 /// rustls implicitly and automatically refreshes traffic keys when needed
510 /// according to the selected cipher suite's cryptographic constraints. There
511 /// is therefore no need to call this manually to avoid cryptographic keys
512 /// "wearing out".
513 ///
514 /// The main reason to call this manually is to roll keys when it is known
515 /// a connection will be idle for a long period.
516 pub fn refresh_traffic_keys(&mut self) -> Result<(), Error> {
517 self.core.refresh_traffic_keys()
518 }
519}
520
521#[cfg(feature = "std")]
522impl<Data> ConnectionCommon<Data> {
523 /// Returns an object that allows reading plaintext.
524 pub fn reader(&mut self) -> Reader<'_> {
525 let common = &mut self.core.common_state;
526 Reader {
527 received_plaintext: &mut common.received_plaintext,
528 // Are we done? i.e., have we processed all received messages, and received a
529 // close_notify to indicate that no new messages will arrive?
530 has_received_close_notify: common.has_received_close_notify,
531 has_seen_eof: common.has_seen_eof,
532 }
533 }
534
535 /// Returns an object that allows writing plaintext.
536 pub fn writer(&mut self) -> Writer<'_> {
537 Writer::new(self)
538 }
539
540 /// This function uses `io` to complete any outstanding IO for
541 /// this connection.
542 ///
543 /// This is a convenience function which solely uses other parts
544 /// of the public API.
545 ///
546 /// What this means depends on the connection state:
547 ///
548 /// - If the connection [`is_handshaking`], then IO is performed until
549 /// the handshake is complete.
550 /// - Otherwise, if [`wants_write`] is true, [`write_tls`] is invoked
551 /// until it is all written.
552 /// - Otherwise, if [`wants_read`] is true, [`read_tls`] is invoked
553 /// once.
554 ///
555 /// The return value is the number of bytes read from and written
556 /// to `io`, respectively. Once both `read()` and `write()` yield `WouldBlock`,
557 /// this function will propagate the error.
558 ///
559 /// Errors from TLS record handling (i.e., from [`process_new_packets`])
560 /// are wrapped in an `io::ErrorKind::InvalidData`-kind error.
561 ///
562 /// [`is_handshaking`]: CommonState::is_handshaking
563 /// [`wants_read`]: CommonState::wants_read
564 /// [`wants_write`]: CommonState::wants_write
565 /// [`write_tls`]: ConnectionCommon::write_tls
566 /// [`read_tls`]: ConnectionCommon::read_tls
567 /// [`process_new_packets`]: ConnectionCommon::process_new_packets
568 pub fn complete_io(
569 &mut self,
570 io: &mut (impl io::Read + io::Write),
571 ) -> Result<(usize, usize), io::Error> {
572 let mut eof = false;
573 let mut wrlen = 0;
574 let mut rdlen = 0;
575 loop {
576 let (mut blocked_write, mut blocked_read) = (None, None);
577 let until_handshaked = self.is_handshaking();
578
579 if !self.wants_write() && !self.wants_read() {
580 // We will make no further progress.
581 return Ok((rdlen, wrlen));
582 }
583
584 while self.wants_write() {
585 match self.write_tls(io) {
586 Ok(0) => {
587 io.flush()?;
588 return Ok((rdlen, wrlen)); // EOF.
589 }
590 Ok(n) => wrlen += n,
591 Err(err) if err.kind() == io::ErrorKind::WouldBlock => {
592 blocked_write = Some(err);
593 break;
594 }
595 Err(err) => return Err(err),
596 }
597 }
598 if wrlen > 0 {
599 io.flush()?;
600 }
601
602 if !until_handshaked && wrlen > 0 {
603 return Ok((rdlen, wrlen));
604 }
605
606 // If we want to write, but are WouldBlocked by the underlying IO, *and*
607 // have no desire to read; that is everything.
608 if let (Some(_), false) = (&blocked_write, self.wants_read()) {
609 return match wrlen {
610 0 => Err(blocked_write.unwrap()),
611 _ => Ok((rdlen, wrlen)),
612 };
613 }
614
615 while !eof && self.wants_read() {
616 let read_size = match self.read_tls(io) {
617 Ok(0) => {
618 eof = true;
619 Some(0)
620 }
621 Ok(n) => {
622 rdlen += n;
623 Some(n)
624 }
625 Err(err) if err.kind() == io::ErrorKind::WouldBlock => {
626 blocked_read = Some(err);
627 break;
628 }
629 Err(err) if err.kind() == io::ErrorKind::Interrupted => None, // nothing to do
630 Err(err) => return Err(err),
631 };
632 if read_size.is_some() {
633 break;
634 }
635 }
636
637 if let Err(e) = self.process_new_packets() {
638 // In case we have an alert to send describing this error, try a last-gasp
639 // write -- but don't predate the primary error.
640 let _ignored = self.write_tls(io);
641 let _ignored = io.flush();
642 return Err(io::Error::new(io::ErrorKind::InvalidData, e));
643 };
644
645 // If we want to read, but are WouldBlocked by the underlying IO, *and*
646 // have no desire to write; that is everything.
647 if let (Some(_), false) = (&blocked_read, self.wants_write()) {
648 return match rdlen {
649 0 => Err(blocked_read.unwrap()),
650 _ => Ok((rdlen, wrlen)),
651 };
652 }
653
654 // if we're doing IO until handshaked, and we believe we've finished handshaking,
655 // but process_new_packets() has queued TLS data to send, loop around again to write
656 // the queued messages.
657 if until_handshaked && !self.is_handshaking() && self.wants_write() {
658 continue;
659 }
660
661 let blocked = blocked_write.zip(blocked_read);
662 match (eof, until_handshaked, self.is_handshaking(), blocked) {
663 (_, true, false, _) => return Ok((rdlen, wrlen)),
664 (_, _, _, Some((e, _))) if rdlen == 0 && wrlen == 0 => return Err(e),
665 (_, false, _, _) => return Ok((rdlen, wrlen)),
666 (true, true, true, _) => return Err(io::Error::from(io::ErrorKind::UnexpectedEof)),
667 _ => {}
668 }
669 }
670 }
671
672 /// Extract the first handshake message.
673 ///
674 /// This is a shortcut to the `process_new_packets()` -> `process_msg()` ->
675 /// `process_handshake_messages()` path, specialized for the first handshake message.
676 pub(crate) fn first_handshake_message(&mut self) -> Result<Option<Message<'static>>, Error> {
677 let mut buffer_progress = self.core.hs_deframer.progress();
678
679 let res = self
680 .core
681 .deframe(
682 None,
683 self.deframer_buffer.filled_mut(),
684 &mut buffer_progress,
685 )
686 .map(|opt| opt.map(|pm| Message::try_from(pm).map(|m| m.into_owned())));
687
688 match res? {
689 Some(Ok(msg)) => {
690 self.deframer_buffer
691 .discard(buffer_progress.take_discard());
692 Ok(Some(msg))
693 }
694 Some(Err(err)) => Err(self.send_fatal_alert(AlertDescription::DecodeError, err)),
695 None => Ok(None),
696 }
697 }
698
699 pub(crate) fn replace_state(&mut self, new: Box<dyn State<Data>>) {
700 self.core.state = Ok(new);
701 }
702
703 /// Read TLS content from `rd` into the internal buffer.
704 ///
705 /// Due to the internal buffering, `rd` can supply TLS messages in arbitrary-sized chunks (like
706 /// a socket or pipe might).
707 ///
708 /// You should call [`process_new_packets()`] each time a call to this function succeeds in order
709 /// to empty the incoming TLS data buffer.
710 ///
711 /// This function returns `Ok(0)` when the underlying `rd` does so. This typically happens when
712 /// a socket is cleanly closed, or a file is at EOF. Errors may result from the IO done through
713 /// `rd`; additionally, errors of `ErrorKind::Other` are emitted to signal backpressure:
714 ///
715 /// * In order to empty the incoming TLS data buffer, you should call [`process_new_packets()`]
716 /// each time a call to this function succeeds.
717 /// * In order to empty the incoming plaintext data buffer, you should empty it through
718 /// the [`reader()`] after the call to [`process_new_packets()`].
719 ///
720 /// This function also returns `Ok(0)` once a `close_notify` alert has been successfully
721 /// received. No additional data is ever read in this state.
722 ///
723 /// [`process_new_packets()`]: ConnectionCommon::process_new_packets
724 /// [`reader()`]: ConnectionCommon::reader
725 pub fn read_tls(&mut self, rd: &mut dyn io::Read) -> Result<usize, io::Error> {
726 if self.received_plaintext.is_full() {
727 return Err(io::Error::other("received plaintext buffer full"));
728 }
729
730 if self.has_received_close_notify {
731 return Ok(0);
732 }
733
734 let res = self
735 .deframer_buffer
736 .read(rd, self.core.hs_deframer.is_active());
737 if let Ok(0) = res {
738 self.has_seen_eof = true;
739 }
740 res
741 }
742
743 /// Writes TLS messages to `wr`.
744 ///
745 /// On success, this function returns `Ok(n)` where `n` is a number of bytes written to `wr`
746 /// (after encoding and encryption).
747 ///
748 /// After this function returns, the connection buffer may not yet be fully flushed. The
749 /// [`CommonState::wants_write`] function can be used to check if the output buffer is empty.
750 pub fn write_tls(&mut self, wr: &mut dyn io::Write) -> Result<usize, io::Error> {
751 self.sendable_tls.write_to(wr)
752 }
753}
754
755impl<'a, Data> From<&'a mut ConnectionCommon<Data>> for Context<'a, Data> {
756 fn from(conn: &'a mut ConnectionCommon<Data>) -> Self {
757 Self {
758 common: &mut conn.core.common_state,
759 data: &mut conn.core.data,
760 sendable_plaintext: Some(&mut conn.sendable_plaintext),
761 }
762 }
763}
764
765impl<T> Deref for ConnectionCommon<T> {
766 type Target = CommonState;
767
768 fn deref(&self) -> &Self::Target {
769 &self.core.common_state
770 }
771}
772
773impl<T> DerefMut for ConnectionCommon<T> {
774 fn deref_mut(&mut self) -> &mut Self::Target {
775 &mut self.core.common_state
776 }
777}
778
779impl<Data> From<ConnectionCore<Data>> for ConnectionCommon<Data> {
780 fn from(core: ConnectionCore<Data>) -> Self {
781 Self {
782 core,
783 deframer_buffer: DeframerVecBuffer::default(),
784 sendable_plaintext: ChunkVecBuffer::new(Some(DEFAULT_BUFFER_LIMIT)),
785 }
786 }
787}
788
789/// Interface shared by unbuffered client and server connections.
790pub struct UnbufferedConnectionCommon<Data> {
791 pub(crate) core: ConnectionCore<Data>,
792 wants_write: bool,
793 emitted_peer_closed_state: bool,
794}
795
796impl<Data> From<ConnectionCore<Data>> for UnbufferedConnectionCommon<Data> {
797 fn from(core: ConnectionCore<Data>) -> Self {
798 Self {
799 core,
800 wants_write: false,
801 emitted_peer_closed_state: false,
802 }
803 }
804}
805
806impl<Data> UnbufferedConnectionCommon<Data> {
807 /// Extract secrets, so they can be used when configuring kTLS, for example.
808 /// Should be used with care as it exposes secret key material.
809 pub fn dangerous_extract_secrets(self) -> Result<ExtractedSecrets, Error> {
810 self.core.dangerous_extract_secrets()
811 }
812}
813
814impl<T> Deref for UnbufferedConnectionCommon<T> {
815 type Target = CommonState;
816
817 fn deref(&self) -> &Self::Target {
818 &self.core.common_state
819 }
820}
821
822pub(crate) struct ConnectionCore<Data> {
823 pub(crate) state: Result<Box<dyn State<Data>>, Error>,
824 pub(crate) data: Data,
825 pub(crate) common_state: CommonState,
826 pub(crate) hs_deframer: HandshakeDeframer,
827
828 /// We limit consecutive empty fragments to avoid a route for the peer to send
829 /// us significant but fruitless traffic.
830 seen_consecutive_empty_fragments: u8,
831}
832
833impl<Data> ConnectionCore<Data> {
834 pub(crate) fn new(state: Box<dyn State<Data>>, data: Data, common_state: CommonState) -> Self {
835 Self {
836 state: Ok(state),
837 data,
838 common_state,
839 hs_deframer: HandshakeDeframer::default(),
840 seen_consecutive_empty_fragments: 0,
841 }
842 }
843
844 pub(crate) fn process_new_packets(
845 &mut self,
846 deframer_buffer: &mut DeframerVecBuffer,
847 sendable_plaintext: &mut ChunkVecBuffer,
848 ) -> Result<IoState, Error> {
849 let mut state = match mem::replace(&mut self.state, Err(Error::HandshakeNotComplete)) {
850 Ok(state) => state,
851 Err(e) => {
852 self.state = Err(e.clone());
853 return Err(e);
854 }
855 };
856
857 let mut buffer_progress = self.hs_deframer.progress();
858
859 loop {
860 let res = self.deframe(
861 Some(&*state),
862 deframer_buffer.filled_mut(),
863 &mut buffer_progress,
864 );
865
866 let opt_msg = match res {
867 Ok(opt_msg) => opt_msg,
868 Err(e) => {
869 self.state = Err(e.clone());
870 deframer_buffer.discard(buffer_progress.take_discard());
871 return Err(e);
872 }
873 };
874
875 let Some(msg) = opt_msg else {
876 break;
877 };
878
879 match self.process_msg(msg, state, Some(sendable_plaintext)) {
880 Ok(new) => state = new,
881 Err(e) => {
882 self.state = Err(e.clone());
883 deframer_buffer.discard(buffer_progress.take_discard());
884 return Err(e);
885 }
886 }
887
888 if self
889 .common_state
890 .has_received_close_notify
891 {
892 // "Any data received after a closure alert has been received MUST be ignored."
893 // -- <https://datatracker.ietf.org/doc/html/rfc8446#section-6.1>
894 // This is data that has already been accepted in `read_tls`.
895 buffer_progress.add_discard(deframer_buffer.filled().len());
896 break;
897 }
898
899 deframer_buffer.discard(buffer_progress.take_discard());
900 }
901
902 deframer_buffer.discard(buffer_progress.take_discard());
903 self.state = Ok(state);
904 Ok(self.common_state.current_io_state())
905 }
906
907 /// Pull a message out of the deframer and send any messages that need to be sent as a result.
908 fn deframe<'b>(
909 &mut self,
910 state: Option<&dyn State<Data>>,
911 buffer: &'b mut [u8],
912 buffer_progress: &mut BufferProgress,
913 ) -> Result<Option<InboundPlainMessage<'b>>, Error> {
914 // before processing any more of `buffer`, return any extant messages from `hs_deframer`
915 if self.hs_deframer.has_message_ready() {
916 Ok(self.take_handshake_message(buffer, buffer_progress))
917 } else {
918 self.process_more_input(state, buffer, buffer_progress)
919 }
920 }
921
922 fn take_handshake_message<'b>(
923 &mut self,
924 buffer: &'b mut [u8],
925 buffer_progress: &mut BufferProgress,
926 ) -> Option<InboundPlainMessage<'b>> {
927 self.hs_deframer
928 .iter(buffer)
929 .next()
930 .map(|(message, discard)| {
931 buffer_progress.add_discard(discard);
932 message
933 })
934 }
935
936 fn process_more_input<'b>(
937 &mut self,
938 state: Option<&dyn State<Data>>,
939 buffer: &'b mut [u8],
940 buffer_progress: &mut BufferProgress,
941 ) -> Result<Option<InboundPlainMessage<'b>>, Error> {
942 let version_is_tls13 = matches!(
943 self.common_state.negotiated_version,
944 Some(ProtocolVersion::TLSv1_3)
945 );
946
947 let locator = Locator::new(buffer);
948
949 loop {
950 let mut iter = DeframerIter::new(&mut buffer[buffer_progress.processed()..]);
951
952 let (message, processed) = loop {
953 let message = match iter.next().transpose() {
954 Ok(Some(message)) => message,
955 Ok(None) => return Ok(None),
956 Err(err) => return Err(self.handle_deframe_error(err, state)),
957 };
958
959 let allowed_plaintext = match message.typ {
960 // CCS messages are always plaintext.
961 ContentType::ChangeCipherSpec => true,
962 // Alerts are allowed to be plaintext if-and-only-if:
963 // * The negotiated protocol version is TLS 1.3. - In TLS 1.2 it is unambiguous when
964 // keying changes based on the CCS message. Only TLS 1.3 requires these heuristics.
965 // * We have not yet decrypted any messages from the peer - if we have we don't
966 // expect any plaintext.
967 // * The payload size is indicative of a plaintext alert message.
968 ContentType::Alert
969 if version_is_tls13
970 && !self
971 .common_state
972 .record_layer
973 .has_decrypted()
974 && message.payload.len() <= 2 =>
975 {
976 true
977 }
978 // In other circumstances, we expect all messages to be encrypted.
979 _ => false,
980 };
981
982 if allowed_plaintext && !self.hs_deframer.is_active() {
983 break (message.into_plain_message(), iter.bytes_consumed());
984 }
985
986 let message = match self
987 .common_state
988 .record_layer
989 .decrypt_incoming(message)
990 {
991 // failed decryption during trial decryption is not allowed to be
992 // interleaved with partial handshake data.
993 Ok(None) if !self.hs_deframer.is_aligned() => {
994 return Err(
995 PeerMisbehaved::RejectedEarlyDataInterleavedWithHandshakeMessage.into(),
996 );
997 }
998
999 // failed decryption during trial decryption.
1000 Ok(None) => continue,
1001
1002 Ok(Some(message)) => message,
1003
1004 Err(err) => return Err(self.handle_deframe_error(err, state)),
1005 };
1006
1007 let Decrypted {
1008 want_close_before_decrypt,
1009 plaintext,
1010 } = message;
1011
1012 if want_close_before_decrypt {
1013 self.common_state.send_close_notify();
1014 }
1015
1016 break (plaintext, iter.bytes_consumed());
1017 };
1018
1019 if !self.hs_deframer.is_aligned() && message.typ != ContentType::Handshake {
1020 // "Handshake messages MUST NOT be interleaved with other record
1021 // types. That is, if a handshake message is split over two or more
1022 // records, there MUST NOT be any other records between them."
1023 // https://www.rfc-editor.org/rfc/rfc8446#section-5.1
1024 return Err(PeerMisbehaved::MessageInterleavedWithHandshakeMessage.into());
1025 }
1026
1027 match message.payload.len() {
1028 0 => {
1029 if self.seen_consecutive_empty_fragments
1030 == ALLOWED_CONSECUTIVE_EMPTY_FRAGMENTS_MAX
1031 {
1032 return Err(PeerMisbehaved::TooManyEmptyFragments.into());
1033 }
1034 self.seen_consecutive_empty_fragments += 1;
1035 }
1036 _ => {
1037 self.seen_consecutive_empty_fragments = 0;
1038 }
1039 };
1040
1041 buffer_progress.add_processed(processed);
1042
1043 // do an end-run around the borrow checker, converting `message` (containing
1044 // a borrowed slice) to an unborrowed one (containing a `Range` into the
1045 // same buffer). the reborrow happens inside the branch that returns the
1046 // message.
1047 //
1048 // is fixed by -Zpolonius
1049 // https://github.com/rust-lang/rfcs/blob/master/text/2094-nll.md#problem-case-3-conditional-control-flow-across-functions
1050 let unborrowed = InboundUnborrowedMessage::unborrow(&locator, message);
1051
1052 if unborrowed.typ != ContentType::Handshake {
1053 let message = unborrowed.reborrow(&Delocator::new(buffer));
1054 buffer_progress.add_discard(processed);
1055 return Ok(Some(message));
1056 }
1057
1058 let message = unborrowed.reborrow(&Delocator::new(buffer));
1059 self.hs_deframer
1060 .input_message(message, &locator, buffer_progress.processed());
1061 self.hs_deframer.coalesce(buffer)?;
1062
1063 self.common_state.aligned_handshake = self.hs_deframer.is_aligned();
1064
1065 if self.hs_deframer.has_message_ready() {
1066 // trial decryption finishes with the first handshake message after it started.
1067 self.common_state
1068 .record_layer
1069 .finish_trial_decryption();
1070
1071 return Ok(self.take_handshake_message(buffer, buffer_progress));
1072 }
1073 }
1074 }
1075
1076 fn handle_deframe_error(&mut self, error: Error, state: Option<&dyn State<Data>>) -> Error {
1077 match error {
1078 error @ Error::InvalidMessage(_) => {
1079 if self.common_state.is_quic() {
1080 self.common_state.quic.alert = Some(AlertDescription::DecodeError);
1081 error
1082 } else {
1083 self.common_state
1084 .send_fatal_alert(AlertDescription::DecodeError, error)
1085 }
1086 }
1087 Error::PeerSentOversizedRecord => self
1088 .common_state
1089 .send_fatal_alert(AlertDescription::RecordOverflow, error),
1090 Error::DecryptError => {
1091 if let Some(state) = state {
1092 state.handle_decrypt_error();
1093 }
1094 self.common_state
1095 .send_fatal_alert(AlertDescription::BadRecordMac, error)
1096 }
1097
1098 error => error,
1099 }
1100 }
1101
1102 fn process_msg(
1103 &mut self,
1104 msg: InboundPlainMessage<'_>,
1105 state: Box<dyn State<Data>>,
1106 sendable_plaintext: Option<&mut ChunkVecBuffer>,
1107 ) -> Result<Box<dyn State<Data>>, Error> {
1108 // Drop CCS messages during handshake in TLS1.3
1109 if msg.typ == ContentType::ChangeCipherSpec
1110 && !self
1111 .common_state
1112 .may_receive_application_data
1113 && self.common_state.is_tls13()
1114 {
1115 if !msg.is_valid_ccs() {
1116 // "An implementation which receives any other change_cipher_spec value or
1117 // which receives a protected change_cipher_spec record MUST abort the
1118 // handshake with an "unexpected_message" alert."
1119 return Err(self.common_state.send_fatal_alert(
1120 AlertDescription::UnexpectedMessage,
1121 PeerMisbehaved::IllegalMiddleboxChangeCipherSpec,
1122 ));
1123 }
1124
1125 self.common_state
1126 .received_tls13_change_cipher_spec()?;
1127 trace!("Dropping CCS");
1128 return Ok(state);
1129 }
1130
1131 // Now we can fully parse the message payload.
1132 let msg = match Message::try_from(msg) {
1133 Ok(msg) => msg,
1134 Err(err) => {
1135 return Err(self
1136 .common_state
1137 .send_fatal_alert(AlertDescription::from(err), err));
1138 }
1139 };
1140
1141 // For alerts, we have separate logic.
1142 if let MessagePayload::Alert(alert) = &msg.payload {
1143 self.common_state.process_alert(alert)?;
1144 return Ok(state);
1145 }
1146
1147 self.common_state
1148 .process_main_protocol(msg, state, &mut self.data, sendable_plaintext)
1149 }
1150
1151 pub(crate) fn dangerous_extract_secrets(self) -> Result<ExtractedSecrets, Error> {
1152 Ok(self
1153 .dangerous_into_kernel_connection()?
1154 .0)
1155 }
1156
1157 pub(crate) fn dangerous_into_kernel_connection(
1158 self,
1159 ) -> Result<(ExtractedSecrets, KernelConnection<Data>), Error> {
1160 if !self
1161 .common_state
1162 .enable_secret_extraction
1163 {
1164 return Err(Error::General("Secret extraction is disabled".into()));
1165 }
1166
1167 if self.common_state.is_handshaking() {
1168 return Err(Error::HandshakeNotComplete);
1169 }
1170
1171 if !self
1172 .common_state
1173 .sendable_tls
1174 .is_empty()
1175 {
1176 return Err(Error::General(
1177 "cannot convert into an KernelConnection while there are still buffered TLS records to send"
1178 .into()
1179 ));
1180 }
1181
1182 let state = self.state?;
1183
1184 let record_layer = &self.common_state.record_layer;
1185 let secrets = state.extract_secrets()?;
1186 let secrets = ExtractedSecrets {
1187 tx: (record_layer.write_seq(), secrets.tx),
1188 rx: (record_layer.read_seq(), secrets.rx),
1189 };
1190
1191 let state = state.into_external_state()?;
1192 let external = KernelConnection::new(state, self.common_state)?;
1193
1194 Ok((secrets, external))
1195 }
1196
1197 pub(crate) fn export_keying_material<T: AsMut<[u8]>>(
1198 &self,
1199 mut output: T,
1200 label: &[u8],
1201 context: Option<&[u8]>,
1202 ) -> Result<T, Error> {
1203 if output.as_mut().is_empty() {
1204 return Err(Error::General(
1205 "export_keying_material with zero-length output".into(),
1206 ));
1207 }
1208
1209 match self.state.as_ref() {
1210 Ok(st) => st
1211 .export_keying_material(output.as_mut(), label, context)
1212 .map(|_| output),
1213 Err(e) => Err(e.clone()),
1214 }
1215 }
1216
1217 /// Trigger a `refresh_traffic_keys` if required by `CommonState`.
1218 fn maybe_refresh_traffic_keys(&mut self) {
1219 if mem::take(
1220 &mut self
1221 .common_state
1222 .refresh_traffic_keys_pending,
1223 ) {
1224 let _ = self.refresh_traffic_keys();
1225 }
1226 }
1227
1228 fn refresh_traffic_keys(&mut self) -> Result<(), Error> {
1229 match &mut self.state {
1230 Ok(st) => st.send_key_update_request(&mut self.common_state),
1231 Err(e) => Err(e.clone()),
1232 }
1233 }
1234}
1235
1236/// Data specific to the peer's side (client or server).
1237pub trait SideData: Debug {}
1238
1239/// An InboundPlainMessage which does not borrow its payload, but
1240/// references a range that can later be borrowed.
1241struct InboundUnborrowedMessage {
1242 typ: ContentType,
1243 version: ProtocolVersion,
1244 bounds: Range<usize>,
1245}
1246
1247impl InboundUnborrowedMessage {
1248 fn unborrow(locator: &Locator, msg: InboundPlainMessage<'_>) -> Self {
1249 Self {
1250 typ: msg.typ,
1251 version: msg.version,
1252 bounds: locator.locate(msg.payload),
1253 }
1254 }
1255
1256 fn reborrow<'b>(self, delocator: &Delocator<'b>) -> InboundPlainMessage<'b> {
1257 InboundPlainMessage {
1258 typ: self.typ,
1259 version: self.version,
1260 payload: delocator.slice_from_range(&self.bounds),
1261 }
1262 }
1263}
1264
1265/// cf. BoringSSL's `kMaxEmptyRecords`
1266/// <https://github.com/google/boringssl/blob/dec5989b793c56ad4dd32173bd2d8595ca78b398/ssl/tls_record.cc#L124-L128>
1267const ALLOWED_CONSECUTIVE_EMPTY_FRAGMENTS_MAX: u8 = 32;