blake3/
lib.rs

1//! The official Rust implementation of the [BLAKE3] cryptographic hash
2//! function.
3//!
4//! # Examples
5//!
6//! ```
7//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
8//! // Hash an input all at once.
9//! let hash1 = blake3::hash(b"foobarbaz");
10//!
11//! // Hash an input incrementally.
12//! let mut hasher = blake3::Hasher::new();
13//! hasher.update(b"foo");
14//! hasher.update(b"bar");
15//! hasher.update(b"baz");
16//! let hash2 = hasher.finalize();
17//! assert_eq!(hash1, hash2);
18//!
19//! // Extended output. OutputReader also implements Read and Seek.
20//! # #[cfg(feature = "std")] {
21//! let mut output = [0; 1000];
22//! let mut output_reader = hasher.finalize_xof();
23//! output_reader.fill(&mut output);
24//! assert_eq!(hash1, output[..32]);
25//! # }
26//!
27//! // Print a hash as hex.
28//! println!("{}", hash1);
29//! # Ok(())
30//! # }
31//! ```
32//!
33//! # Cargo Features
34//!
35//! The `std` feature (the only feature enabled by default) is required for
36//! implementations of the [`Write`] and [`Seek`] traits, the
37//! [`update_reader`](Hasher::update_reader) helper method, and runtime CPU
38//! feature detection on x86. If this feature is disabled, the only way to use
39//! the x86 SIMD implementations is to enable the corresponding instruction sets
40//! globally, with e.g. `RUSTFLAGS="-C target-cpu=native"`. The resulting binary
41//! will not be portable to other machines.
42//!
43//! The `rayon` feature (disabled by default, but enabled for [docs.rs]) adds
44//! the [`update_rayon`](Hasher::update_rayon) and (in combination with `mmap`
45//! below) [`update_mmap_rayon`](Hasher::update_mmap_rayon) methods, for
46//! multithreaded hashing. However, even if this feature is enabled, all other
47//! APIs remain single-threaded.
48//!
49//! The `mmap` feature (disabled by default, but enabled for [docs.rs]) adds the
50//! [`update_mmap`](Hasher::update_mmap) and (in combination with `rayon` above)
51//! [`update_mmap_rayon`](Hasher::update_mmap_rayon) helper methods for
52//! memory-mapped IO.
53//!
54//! The `zeroize` feature (disabled by default, but enabled for [docs.rs])
55//! implements
56//! [`Zeroize`](https://docs.rs/zeroize/latest/zeroize/trait.Zeroize.html) for
57//! this crate's types.
58//!
59//! The `serde` feature (disabled by default, but enabled for [docs.rs]) implements
60//! [`serde::Serialize`](https://docs.rs/serde/latest/serde/trait.Serialize.html) and
61//! [`serde::Deserialize`](https://docs.rs/serde/latest/serde/trait.Deserialize.html)
62//! for [`Hash`](struct@Hash).
63//!
64//! The NEON implementation is enabled by default for AArch64 but requires the
65//! `neon` feature for other ARM targets. Not all ARMv7 CPUs support NEON, and
66//! enabling this feature will produce a binary that's not portable to CPUs
67//! without NEON support.
68//!
69//! The `wasm32_simd` feature enables the WASM SIMD implementation for all `wasm32-`
70//! targets. Similar to the `neon` feature, if `wasm32_simd` is enabled, WASM SIMD
71//! support is assumed. This may become the default in the future.
72//!
73//! The `traits-preview` feature enables implementations of traits from the
74//! RustCrypto [`digest`] crate, and re-exports that crate as `traits::digest`.
75//! However, the traits aren't stable, and they're expected to change in
76//! incompatible ways before that crate reaches 1.0. For that reason, this crate
77//! makes no SemVer guarantees for this feature, and callers who use it should
78//! expect breaking changes between patch versions. (The "-preview" feature name
79//! follows the conventions of the RustCrypto [`signature`] crate.)
80//!
81//! [`Hasher::update_rayon`]: struct.Hasher.html#method.update_rayon
82//! [BLAKE3]: https://blake3.io
83//! [Rayon]: https://github.com/rayon-rs/rayon
84//! [docs.rs]: https://docs.rs/
85//! [`Write`]: https://doc.rust-lang.org/std/io/trait.Write.html
86//! [`Seek`]: https://doc.rust-lang.org/std/io/trait.Seek.html
87//! [`digest`]: https://crates.io/crates/digest
88//! [`signature`]: https://crates.io/crates/signature
89
90#![cfg_attr(not(feature = "std"), no_std)]
91
92#[cfg(test)]
93mod test;
94
95#[doc(hidden)]
96#[deprecated(since = "1.8.0", note = "use the hazmat module instead")]
97pub mod guts;
98
99pub mod hazmat;
100
101/// Undocumented and unstable, for benchmarks only.
102#[doc(hidden)]
103pub mod platform;
104
105// Platform-specific implementations of the compression function. These
106// BLAKE3-specific cfg flags are set in build.rs.
107#[cfg(blake3_avx2_rust)]
108#[path = "rust_avx2.rs"]
109mod avx2;
110#[cfg(blake3_avx2_ffi)]
111#[path = "ffi_avx2.rs"]
112mod avx2;
113#[cfg(blake3_avx512_ffi)]
114#[path = "ffi_avx512.rs"]
115mod avx512;
116#[cfg(blake3_neon)]
117#[path = "ffi_neon.rs"]
118mod neon;
119mod portable;
120#[cfg(blake3_sse2_rust)]
121#[path = "rust_sse2.rs"]
122mod sse2;
123#[cfg(blake3_sse2_ffi)]
124#[path = "ffi_sse2.rs"]
125mod sse2;
126#[cfg(blake3_sse41_rust)]
127#[path = "rust_sse41.rs"]
128mod sse41;
129#[cfg(blake3_sse41_ffi)]
130#[path = "ffi_sse41.rs"]
131mod sse41;
132
133#[cfg(blake3_wasm32_simd)]
134#[path = "wasm32_simd.rs"]
135mod wasm32_simd;
136
137#[cfg(feature = "traits-preview")]
138pub mod traits;
139
140mod io;
141mod join;
142
143use arrayref::{array_mut_ref, array_ref};
144use arrayvec::{ArrayString, ArrayVec};
145use core::cmp;
146use core::fmt;
147use platform::{Platform, MAX_SIMD_DEGREE, MAX_SIMD_DEGREE_OR_2};
148#[cfg(feature = "zeroize")]
149use zeroize::Zeroize;
150
151/// The number of bytes in a [`Hash`](struct.Hash.html), 32.
152pub const OUT_LEN: usize = 32;
153
154/// The number of bytes in a key, 32.
155pub const KEY_LEN: usize = 32;
156
157/// The number of bytes in a block, 64.
158///
159/// You don't usually need to think about this number. One case where it matters is calling
160/// [`OutputReader::fill`] in a loop, where using a `buf` argument that's a multiple of `BLOCK_LEN`
161/// avoids repeating work.
162pub const BLOCK_LEN: usize = 64;
163
164/// The number of bytes in a chunk, 1024.
165///
166/// You don't usually need to think about this number, but it often comes up in benchmarks, because
167/// the maximum degree of parallelism used by the implementation equals the number of chunks.
168pub const CHUNK_LEN: usize = 1024;
169
170const MAX_DEPTH: usize = 54; // 2^54 * CHUNK_LEN = 2^64
171
172// While iterating the compression function within a chunk, the CV is
173// represented as words, to avoid doing two extra endianness conversions for
174// each compression in the portable implementation. But the hash_many interface
175// needs to hash both input bytes and parent nodes, so its better for its
176// output CVs to be represented as bytes.
177type CVWords = [u32; 8];
178type CVBytes = [u8; 32]; // little-endian
179
180const IV: &CVWords = &[
181    0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A, 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19,
182];
183
184const MSG_SCHEDULE: [[usize; 16]; 7] = [
185    [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
186    [2, 6, 3, 10, 7, 0, 4, 13, 1, 11, 12, 5, 9, 14, 15, 8],
187    [3, 4, 10, 12, 13, 2, 7, 14, 6, 5, 9, 0, 11, 15, 8, 1],
188    [10, 7, 12, 9, 14, 3, 13, 15, 4, 0, 11, 2, 5, 8, 1, 6],
189    [12, 13, 9, 11, 15, 10, 14, 8, 7, 2, 5, 3, 0, 1, 6, 4],
190    [9, 14, 11, 5, 8, 12, 15, 1, 13, 3, 0, 10, 2, 6, 4, 7],
191    [11, 15, 5, 0, 1, 9, 8, 6, 14, 10, 2, 12, 3, 4, 7, 13],
192];
193
194// These are the internal flags that we use to domain separate root/non-root,
195// chunk/parent, and chunk beginning/middle/end. These get set at the high end
196// of the block flags word in the compression function, so their values start
197// high and go down.
198const CHUNK_START: u8 = 1 << 0;
199const CHUNK_END: u8 = 1 << 1;
200const PARENT: u8 = 1 << 2;
201const ROOT: u8 = 1 << 3;
202const KEYED_HASH: u8 = 1 << 4;
203const DERIVE_KEY_CONTEXT: u8 = 1 << 5;
204const DERIVE_KEY_MATERIAL: u8 = 1 << 6;
205
206#[inline]
207fn counter_low(counter: u64) -> u32 {
208    counter as u32
209}
210
211#[inline]
212fn counter_high(counter: u64) -> u32 {
213    (counter >> 32) as u32
214}
215
216/// An output of the default size, 32 bytes, which provides constant-time
217/// equality checking.
218///
219/// `Hash` implements [`From`] and [`Into`] for `[u8; 32]`, and it provides
220/// [`from_bytes`] and [`as_bytes`] for explicit conversions between itself and
221/// `[u8; 32]`. However, byte arrays and slices don't provide constant-time
222/// equality checking, which is often a security requirement in software that
223/// handles private data. `Hash` doesn't implement [`Deref`] or [`AsRef`], to
224/// avoid situations where a type conversion happens implicitly and the
225/// constant-time property is accidentally lost.
226///
227/// `Hash` provides the [`to_hex`] and [`from_hex`] methods for converting to
228/// and from hexadecimal. It also implements [`Display`] and [`FromStr`].
229///
230/// [`From`]: https://doc.rust-lang.org/std/convert/trait.From.html
231/// [`Into`]: https://doc.rust-lang.org/std/convert/trait.Into.html
232/// [`as_bytes`]: #method.as_bytes
233/// [`from_bytes`]: #method.from_bytes
234/// [`Deref`]: https://doc.rust-lang.org/stable/std/ops/trait.Deref.html
235/// [`AsRef`]: https://doc.rust-lang.org/std/convert/trait.AsRef.html
236/// [`to_hex`]: #method.to_hex
237/// [`from_hex`]: #method.from_hex
238/// [`Display`]: https://doc.rust-lang.org/std/fmt/trait.Display.html
239/// [`FromStr`]: https://doc.rust-lang.org/std/str/trait.FromStr.html
240#[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))]
241#[derive(Clone, Copy, Hash, Eq)]
242pub struct Hash([u8; OUT_LEN]);
243
244impl Hash {
245    /// The raw bytes of the `Hash`. Note that byte arrays don't provide
246    /// constant-time equality checking, so if  you need to compare hashes,
247    /// prefer the `Hash` type.
248    #[inline]
249    pub const fn as_bytes(&self) -> &[u8; OUT_LEN] {
250        &self.0
251    }
252
253    /// Create a `Hash` from its raw bytes representation.
254    pub const fn from_bytes(bytes: [u8; OUT_LEN]) -> Self {
255        Self(bytes)
256    }
257
258    /// Create a `Hash` from its raw bytes representation as a slice.
259    ///
260    /// Returns an error if the slice is not exactly 32 bytes long.
261    pub fn from_slice(bytes: &[u8]) -> Result<Self, core::array::TryFromSliceError> {
262        Ok(Self::from_bytes(bytes.try_into()?))
263    }
264
265    /// Encode a `Hash` in lowercase hexadecimal.
266    ///
267    /// The returned [`ArrayString`] is a fixed size and doesn't allocate memory
268    /// on the heap. Note that [`ArrayString`] doesn't provide constant-time
269    /// equality checking, so if you need to compare hashes, prefer the `Hash`
270    /// type.
271    ///
272    /// [`ArrayString`]: https://docs.rs/arrayvec/0.5.1/arrayvec/struct.ArrayString.html
273    pub fn to_hex(&self) -> ArrayString<{ 2 * OUT_LEN }> {
274        let mut s = ArrayString::new();
275        let table = b"0123456789abcdef";
276        for &b in self.0.iter() {
277            s.push(table[(b >> 4) as usize] as char);
278            s.push(table[(b & 0xf) as usize] as char);
279        }
280        s
281    }
282
283    /// Decode a `Hash` from hexadecimal. Both uppercase and lowercase ASCII
284    /// bytes are supported.
285    ///
286    /// Any byte outside the ranges `'0'...'9'`, `'a'...'f'`, and `'A'...'F'`
287    /// results in an error. An input length other than 64 also results in an
288    /// error.
289    ///
290    /// Note that `Hash` also implements `FromStr`, so `Hash::from_hex("...")`
291    /// is equivalent to `"...".parse()`.
292    pub fn from_hex(hex: impl AsRef<[u8]>) -> Result<Self, HexError> {
293        fn hex_val(byte: u8) -> Result<u8, HexError> {
294            match byte {
295                b'A'..=b'F' => Ok(byte - b'A' + 10),
296                b'a'..=b'f' => Ok(byte - b'a' + 10),
297                b'0'..=b'9' => Ok(byte - b'0'),
298                _ => Err(HexError(HexErrorInner::InvalidByte(byte))),
299            }
300        }
301        let hex_bytes: &[u8] = hex.as_ref();
302        if hex_bytes.len() != OUT_LEN * 2 {
303            return Err(HexError(HexErrorInner::InvalidLen(hex_bytes.len())));
304        }
305        let mut hash_bytes: [u8; OUT_LEN] = [0; OUT_LEN];
306        for i in 0..OUT_LEN {
307            hash_bytes[i] = 16 * hex_val(hex_bytes[2 * i])? + hex_val(hex_bytes[2 * i + 1])?;
308        }
309        Ok(Hash::from(hash_bytes))
310    }
311}
312
313impl From<[u8; OUT_LEN]> for Hash {
314    #[inline]
315    fn from(bytes: [u8; OUT_LEN]) -> Self {
316        Self::from_bytes(bytes)
317    }
318}
319
320impl From<Hash> for [u8; OUT_LEN] {
321    #[inline]
322    fn from(hash: Hash) -> Self {
323        hash.0
324    }
325}
326
327impl core::str::FromStr for Hash {
328    type Err = HexError;
329
330    fn from_str(s: &str) -> Result<Self, Self::Err> {
331        Hash::from_hex(s)
332    }
333}
334
335#[cfg(feature = "zeroize")]
336impl Zeroize for Hash {
337    fn zeroize(&mut self) {
338        // Destructuring to trigger compile error as a reminder to update this impl.
339        let Self(bytes) = self;
340        bytes.zeroize();
341    }
342}
343
344/// This implementation is constant-time.
345impl PartialEq for Hash {
346    #[inline]
347    fn eq(&self, other: &Hash) -> bool {
348        constant_time_eq::constant_time_eq_32(&self.0, &other.0)
349    }
350}
351
352/// This implementation is constant-time.
353impl PartialEq<[u8; OUT_LEN]> for Hash {
354    #[inline]
355    fn eq(&self, other: &[u8; OUT_LEN]) -> bool {
356        constant_time_eq::constant_time_eq_32(&self.0, other)
357    }
358}
359
360/// This implementation is constant-time if the target is 32 bytes long.
361impl PartialEq<[u8]> for Hash {
362    #[inline]
363    fn eq(&self, other: &[u8]) -> bool {
364        constant_time_eq::constant_time_eq(&self.0, other)
365    }
366}
367
368impl fmt::Display for Hash {
369    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
370        // Formatting field as `&str` to reduce code size since the `Debug`
371        // dynamic dispatch table for `&str` is likely needed elsewhere already,
372        // but that for `ArrayString<[u8; 64]>` is not.
373        let hex = self.to_hex();
374        let hex: &str = hex.as_str();
375
376        f.write_str(hex)
377    }
378}
379
380impl fmt::Debug for Hash {
381    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
382        // Formatting field as `&str` to reduce code size since the `Debug`
383        // dynamic dispatch table for `&str` is likely needed elsewhere already,
384        // but that for `ArrayString<[u8; 64]>` is not.
385        let hex = self.to_hex();
386        let hex: &str = hex.as_str();
387
388        f.debug_tuple("Hash").field(&hex).finish()
389    }
390}
391
392/// The error type for [`Hash::from_hex`].
393///
394/// The `.to_string()` representation of this error currently distinguishes between bad length
395/// errors and bad character errors. This is to help with logging and debugging, but it isn't a
396/// stable API detail, and it may change at any time.
397#[derive(Clone, Debug)]
398pub struct HexError(HexErrorInner);
399
400#[derive(Clone, Debug)]
401enum HexErrorInner {
402    InvalidByte(u8),
403    InvalidLen(usize),
404}
405
406impl fmt::Display for HexError {
407    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
408        match self.0 {
409            HexErrorInner::InvalidByte(byte) => {
410                if byte < 128 {
411                    write!(f, "invalid hex character: {:?}", byte as char)
412                } else {
413                    write!(f, "invalid hex character: 0x{:x}", byte)
414                }
415            }
416            HexErrorInner::InvalidLen(len) => {
417                write!(f, "expected 64 hex bytes, received {}", len)
418            }
419        }
420    }
421}
422
423#[cfg(feature = "std")]
424impl std::error::Error for HexError {}
425
426// Each chunk or parent node can produce either a 32-byte chaining value or, by
427// setting the ROOT flag, any number of final output bytes. The Output struct
428// captures the state just prior to choosing between those two possibilities.
429#[derive(Clone)]
430struct Output {
431    input_chaining_value: CVWords,
432    block: [u8; 64],
433    block_len: u8,
434    counter: u64,
435    flags: u8,
436    platform: Platform,
437}
438
439impl Output {
440    fn chaining_value(&self) -> CVBytes {
441        let mut cv = self.input_chaining_value;
442        self.platform.compress_in_place(
443            &mut cv,
444            &self.block,
445            self.block_len,
446            self.counter,
447            self.flags,
448        );
449        platform::le_bytes_from_words_32(&cv)
450    }
451
452    fn root_hash(&self) -> Hash {
453        debug_assert_eq!(self.counter, 0);
454        let mut cv = self.input_chaining_value;
455        self.platform
456            .compress_in_place(&mut cv, &self.block, self.block_len, 0, self.flags | ROOT);
457        Hash(platform::le_bytes_from_words_32(&cv))
458    }
459
460    fn root_output_block(&self) -> [u8; 2 * OUT_LEN] {
461        self.platform.compress_xof(
462            &self.input_chaining_value,
463            &self.block,
464            self.block_len,
465            self.counter,
466            self.flags | ROOT,
467        )
468    }
469}
470
471#[cfg(feature = "zeroize")]
472impl Zeroize for Output {
473    fn zeroize(&mut self) {
474        // Destructuring to trigger compile error as a reminder to update this impl.
475        let Self {
476            input_chaining_value,
477            block,
478            block_len,
479            counter,
480            flags,
481            platform: _,
482        } = self;
483
484        input_chaining_value.zeroize();
485        block.zeroize();
486        block_len.zeroize();
487        counter.zeroize();
488        flags.zeroize();
489    }
490}
491
492#[derive(Clone)]
493struct ChunkState {
494    cv: CVWords,
495    chunk_counter: u64,
496    buf: [u8; BLOCK_LEN],
497    buf_len: u8,
498    blocks_compressed: u8,
499    flags: u8,
500    platform: Platform,
501}
502
503impl ChunkState {
504    fn new(key: &CVWords, chunk_counter: u64, flags: u8, platform: Platform) -> Self {
505        Self {
506            cv: *key,
507            chunk_counter,
508            buf: [0; BLOCK_LEN],
509            buf_len: 0,
510            blocks_compressed: 0,
511            flags,
512            platform,
513        }
514    }
515
516    fn count(&self) -> usize {
517        BLOCK_LEN * self.blocks_compressed as usize + self.buf_len as usize
518    }
519
520    fn fill_buf(&mut self, input: &mut &[u8]) {
521        let want = BLOCK_LEN - self.buf_len as usize;
522        let take = cmp::min(want, input.len());
523        self.buf[self.buf_len as usize..][..take].copy_from_slice(&input[..take]);
524        self.buf_len += take as u8;
525        *input = &input[take..];
526    }
527
528    fn start_flag(&self) -> u8 {
529        if self.blocks_compressed == 0 {
530            CHUNK_START
531        } else {
532            0
533        }
534    }
535
536    // Try to avoid buffering as much as possible, by compressing directly from
537    // the input slice when full blocks are available.
538    fn update(&mut self, mut input: &[u8]) -> &mut Self {
539        if self.buf_len > 0 {
540            self.fill_buf(&mut input);
541            if !input.is_empty() {
542                debug_assert_eq!(self.buf_len as usize, BLOCK_LEN);
543                let block_flags = self.flags | self.start_flag(); // borrowck
544                self.platform.compress_in_place(
545                    &mut self.cv,
546                    &self.buf,
547                    BLOCK_LEN as u8,
548                    self.chunk_counter,
549                    block_flags,
550                );
551                self.buf_len = 0;
552                self.buf = [0; BLOCK_LEN];
553                self.blocks_compressed += 1;
554            }
555        }
556
557        while input.len() > BLOCK_LEN {
558            debug_assert_eq!(self.buf_len, 0);
559            let block_flags = self.flags | self.start_flag(); // borrowck
560            self.platform.compress_in_place(
561                &mut self.cv,
562                array_ref!(input, 0, BLOCK_LEN),
563                BLOCK_LEN as u8,
564                self.chunk_counter,
565                block_flags,
566            );
567            self.blocks_compressed += 1;
568            input = &input[BLOCK_LEN..];
569        }
570
571        self.fill_buf(&mut input);
572        debug_assert!(input.is_empty());
573        debug_assert!(self.count() <= CHUNK_LEN);
574        self
575    }
576
577    fn output(&self) -> Output {
578        let block_flags = self.flags | self.start_flag() | CHUNK_END;
579        Output {
580            input_chaining_value: self.cv,
581            block: self.buf,
582            block_len: self.buf_len,
583            counter: self.chunk_counter,
584            flags: block_flags,
585            platform: self.platform,
586        }
587    }
588}
589
590// Don't derive(Debug), because the state may be secret.
591impl fmt::Debug for ChunkState {
592    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
593        f.debug_struct("ChunkState")
594            .field("count", &self.count())
595            .field("chunk_counter", &self.chunk_counter)
596            .field("flags", &self.flags)
597            .field("platform", &self.platform)
598            .finish()
599    }
600}
601
602#[cfg(feature = "zeroize")]
603impl Zeroize for ChunkState {
604    fn zeroize(&mut self) {
605        // Destructuring to trigger compile error as a reminder to update this impl.
606        let Self {
607            cv,
608            chunk_counter,
609            buf,
610            buf_len,
611            blocks_compressed,
612            flags,
613            platform: _,
614        } = self;
615
616        cv.zeroize();
617        chunk_counter.zeroize();
618        buf.zeroize();
619        buf_len.zeroize();
620        blocks_compressed.zeroize();
621        flags.zeroize();
622    }
623}
624
625// IMPLEMENTATION NOTE
626// ===================
627// The recursive function compress_subtree_wide(), implemented below, is the
628// basis of high-performance BLAKE3. We use it both for all-at-once hashing,
629// and for the incremental input with Hasher (though we have to be careful with
630// subtree boundaries in the incremental case). compress_subtree_wide() applies
631// several optimizations at the same time:
632// - Multithreading with Rayon.
633// - Parallel chunk hashing with SIMD.
634// - Parallel parent hashing with SIMD. Note that while SIMD chunk hashing
635//   maxes out at MAX_SIMD_DEGREE*CHUNK_LEN, parallel parent hashing continues
636//   to benefit from larger inputs, because more levels of the tree benefit can
637//   use full-width SIMD vectors for parent hashing. Without parallel parent
638//   hashing, we lose about 10% of overall throughput on AVX2 and AVX-512.
639
640/// Undocumented and unstable, for benchmarks only.
641#[doc(hidden)]
642#[derive(Clone, Copy)]
643pub enum IncrementCounter {
644    Yes,
645    No,
646}
647
648impl IncrementCounter {
649    #[inline]
650    fn yes(&self) -> bool {
651        match self {
652            IncrementCounter::Yes => true,
653            IncrementCounter::No => false,
654        }
655    }
656}
657
658// The largest power of two less than or equal to `n`, used in Hasher::update(). This is similar to
659// left_subtree_len(n), but note that left_subtree_len(n) is strictly less than `n`.
660fn largest_power_of_two_leq(n: usize) -> usize {
661    ((n / 2) + 1).next_power_of_two()
662}
663
664// Use SIMD parallelism to hash up to MAX_SIMD_DEGREE chunks at the same time
665// on a single thread. Write out the chunk chaining values and return the
666// number of chunks hashed. These chunks are never the root and never empty;
667// those cases use a different codepath.
668fn compress_chunks_parallel(
669    input: &[u8],
670    key: &CVWords,
671    chunk_counter: u64,
672    flags: u8,
673    platform: Platform,
674    out: &mut [u8],
675) -> usize {
676    debug_assert!(!input.is_empty(), "empty chunks below the root");
677    debug_assert!(input.len() <= MAX_SIMD_DEGREE * CHUNK_LEN);
678
679    let mut chunks_exact = input.chunks_exact(CHUNK_LEN);
680    let mut chunks_array = ArrayVec::<&[u8; CHUNK_LEN], MAX_SIMD_DEGREE>::new();
681    for chunk in &mut chunks_exact {
682        chunks_array.push(array_ref!(chunk, 0, CHUNK_LEN));
683    }
684    platform.hash_many(
685        &chunks_array,
686        key,
687        chunk_counter,
688        IncrementCounter::Yes,
689        flags,
690        CHUNK_START,
691        CHUNK_END,
692        out,
693    );
694
695    // Hash the remaining partial chunk, if there is one. Note that the empty
696    // chunk (meaning the empty message) is a different codepath.
697    let chunks_so_far = chunks_array.len();
698    if !chunks_exact.remainder().is_empty() {
699        let counter = chunk_counter + chunks_so_far as u64;
700        let mut chunk_state = ChunkState::new(key, counter, flags, platform);
701        chunk_state.update(chunks_exact.remainder());
702        *array_mut_ref!(out, chunks_so_far * OUT_LEN, OUT_LEN) =
703            chunk_state.output().chaining_value();
704        chunks_so_far + 1
705    } else {
706        chunks_so_far
707    }
708}
709
710// Use SIMD parallelism to hash up to MAX_SIMD_DEGREE parents at the same time
711// on a single thread. Write out the parent chaining values and return the
712// number of parents hashed. (If there's an odd input chaining value left over,
713// return it as an additional output.) These parents are never the root and
714// never empty; those cases use a different codepath.
715fn compress_parents_parallel(
716    child_chaining_values: &[u8],
717    key: &CVWords,
718    flags: u8,
719    platform: Platform,
720    out: &mut [u8],
721) -> usize {
722    debug_assert_eq!(child_chaining_values.len() % OUT_LEN, 0, "wacky hash bytes");
723    let num_children = child_chaining_values.len() / OUT_LEN;
724    debug_assert!(num_children >= 2, "not enough children");
725    debug_assert!(num_children <= 2 * MAX_SIMD_DEGREE_OR_2, "too many");
726
727    let mut parents_exact = child_chaining_values.chunks_exact(BLOCK_LEN);
728    // Use MAX_SIMD_DEGREE_OR_2 rather than MAX_SIMD_DEGREE here, because of
729    // the requirements of compress_subtree_wide().
730    let mut parents_array = ArrayVec::<&[u8; BLOCK_LEN], MAX_SIMD_DEGREE_OR_2>::new();
731    for parent in &mut parents_exact {
732        parents_array.push(array_ref!(parent, 0, BLOCK_LEN));
733    }
734    platform.hash_many(
735        &parents_array,
736        key,
737        0, // Parents always use counter 0.
738        IncrementCounter::No,
739        flags | PARENT,
740        0, // Parents have no start flags.
741        0, // Parents have no end flags.
742        out,
743    );
744
745    // If there's an odd child left over, it becomes an output.
746    let parents_so_far = parents_array.len();
747    if !parents_exact.remainder().is_empty() {
748        out[parents_so_far * OUT_LEN..][..OUT_LEN].copy_from_slice(parents_exact.remainder());
749        parents_so_far + 1
750    } else {
751        parents_so_far
752    }
753}
754
755// The wide helper function returns (writes out) an array of chaining values
756// and returns the length of that array. The number of chaining values returned
757// is the dynamically detected SIMD degree, at most MAX_SIMD_DEGREE. Or fewer,
758// if the input is shorter than that many chunks. The reason for maintaining a
759// wide array of chaining values going back up the tree, is to allow the
760// implementation to hash as many parents in parallel as possible.
761//
762// As a special case when the SIMD degree is 1, this function will still return
763// at least 2 outputs. This guarantees that this function doesn't perform the
764// root compression. (If it did, it would use the wrong flags, and also we
765// wouldn't be able to implement extendable output.) Note that this function is
766// not used when the whole input is only 1 chunk long; that's a different
767// codepath.
768//
769// Why not just have the caller split the input on the first update(), instead
770// of implementing this special rule? Because we don't want to limit SIMD or
771// multithreading parallelism for that update().
772fn compress_subtree_wide<J: join::Join>(
773    input: &[u8],
774    key: &CVWords,
775    chunk_counter: u64,
776    flags: u8,
777    platform: Platform,
778    out: &mut [u8],
779) -> usize {
780    // Note that the single chunk case does *not* bump the SIMD degree up to 2
781    // when it is 1. This allows Rayon the option of multithreading even the
782    // 2-chunk case, which can help performance on smaller platforms.
783    if input.len() <= platform.simd_degree() * CHUNK_LEN {
784        return compress_chunks_parallel(input, key, chunk_counter, flags, platform, out);
785    }
786
787    // With more than simd_degree chunks, we need to recurse. Start by dividing
788    // the input into left and right subtrees. (Note that this is only optimal
789    // as long as the SIMD degree is a power of 2. If we ever get a SIMD degree
790    // of 3 or something, we'll need a more complicated strategy.)
791    debug_assert_eq!(platform.simd_degree().count_ones(), 1, "power of 2");
792    let (left, right) = input.split_at(hazmat::left_subtree_len(input.len() as u64) as usize);
793    let right_chunk_counter = chunk_counter + (left.len() / CHUNK_LEN) as u64;
794
795    // Make space for the child outputs. Here we use MAX_SIMD_DEGREE_OR_2 to
796    // account for the special case of returning 2 outputs when the SIMD degree
797    // is 1.
798    let mut cv_array = [0; 2 * MAX_SIMD_DEGREE_OR_2 * OUT_LEN];
799    let degree = if left.len() == CHUNK_LEN {
800        // The "simd_degree=1 and we're at the leaf nodes" case.
801        debug_assert_eq!(platform.simd_degree(), 1);
802        1
803    } else {
804        cmp::max(platform.simd_degree(), 2)
805    };
806    let (left_out, right_out) = cv_array.split_at_mut(degree * OUT_LEN);
807
808    // Recurse! For update_rayon(), this is where we take advantage of RayonJoin and use multiple
809    // threads.
810    let (left_n, right_n) = J::join(
811        || compress_subtree_wide::<J>(left, key, chunk_counter, flags, platform, left_out),
812        || compress_subtree_wide::<J>(right, key, right_chunk_counter, flags, platform, right_out),
813    );
814
815    // The special case again. If simd_degree=1, then we'll have left_n=1 and
816    // right_n=1. Rather than compressing them into a single output, return
817    // them directly, to make sure we always have at least two outputs.
818    debug_assert_eq!(left_n, degree);
819    debug_assert!(right_n >= 1 && right_n <= left_n);
820    if left_n == 1 {
821        out[..2 * OUT_LEN].copy_from_slice(&cv_array[..2 * OUT_LEN]);
822        return 2;
823    }
824
825    // Otherwise, do one layer of parent node compression.
826    let num_children = left_n + right_n;
827    compress_parents_parallel(
828        &cv_array[..num_children * OUT_LEN],
829        key,
830        flags,
831        platform,
832        out,
833    )
834}
835
836// Hash a subtree with compress_subtree_wide(), and then condense the resulting
837// list of chaining values down to a single parent node. Don't compress that
838// last parent node, however. Instead, return its message bytes (the
839// concatenated chaining values of its children). This is necessary when the
840// first call to update() supplies a complete subtree, because the topmost
841// parent node of that subtree could end up being the root. It's also necessary
842// for extended output in the general case.
843//
844// As with compress_subtree_wide(), this function is not used on inputs of 1
845// chunk or less. That's a different codepath.
846fn compress_subtree_to_parent_node<J: join::Join>(
847    input: &[u8],
848    key: &CVWords,
849    chunk_counter: u64,
850    flags: u8,
851    platform: Platform,
852) -> [u8; BLOCK_LEN] {
853    debug_assert!(input.len() > CHUNK_LEN);
854    let mut cv_array = [0; MAX_SIMD_DEGREE_OR_2 * OUT_LEN];
855    let mut num_cvs =
856        compress_subtree_wide::<J>(input, &key, chunk_counter, flags, platform, &mut cv_array);
857    debug_assert!(num_cvs >= 2);
858
859    // If MAX_SIMD_DEGREE is greater than 2 and there's enough input,
860    // compress_subtree_wide() returns more than 2 chaining values. Condense
861    // them into 2 by forming parent nodes repeatedly.
862    let mut out_array = [0; MAX_SIMD_DEGREE_OR_2 * OUT_LEN / 2];
863    while num_cvs > 2 {
864        let cv_slice = &cv_array[..num_cvs * OUT_LEN];
865        num_cvs = compress_parents_parallel(cv_slice, key, flags, platform, &mut out_array);
866        cv_array[..num_cvs * OUT_LEN].copy_from_slice(&out_array[..num_cvs * OUT_LEN]);
867    }
868    *array_ref!(cv_array, 0, 2 * OUT_LEN)
869}
870
871// Hash a complete input all at once. Unlike compress_subtree_wide() and
872// compress_subtree_to_parent_node(), this function handles the 1 chunk case.
873fn hash_all_at_once<J: join::Join>(input: &[u8], key: &CVWords, flags: u8) -> Output {
874    let platform = Platform::detect();
875
876    // If the whole subtree is one chunk, hash it directly with a ChunkState.
877    if input.len() <= CHUNK_LEN {
878        return ChunkState::new(key, 0, flags, platform)
879            .update(input)
880            .output();
881    }
882
883    // Otherwise construct an Output object from the parent node returned by
884    // compress_subtree_to_parent_node().
885    Output {
886        input_chaining_value: *key,
887        block: compress_subtree_to_parent_node::<J>(input, key, 0, flags, platform),
888        block_len: BLOCK_LEN as u8,
889        counter: 0,
890        flags: flags | PARENT,
891        platform,
892    }
893}
894
895/// The default hash function.
896///
897/// For an incremental version that accepts multiple writes, see [`Hasher::new`],
898/// [`Hasher::update`], and [`Hasher::finalize`]. These two lines are equivalent:
899///
900/// ```
901/// let hash = blake3::hash(b"foo");
902/// # let hash1 = hash;
903///
904/// let hash = blake3::Hasher::new().update(b"foo").finalize();
905/// # let hash2 = hash;
906/// # assert_eq!(hash1, hash2);
907/// ```
908///
909/// For output sizes other than 32 bytes, see [`Hasher::finalize_xof`] and
910/// [`OutputReader`].
911///
912/// This function is always single-threaded. For multithreading support, see
913/// [`Hasher::update_rayon`](struct.Hasher.html#method.update_rayon).
914pub fn hash(input: &[u8]) -> Hash {
915    hash_all_at_once::<join::SerialJoin>(input, IV, 0).root_hash()
916}
917
918/// The keyed hash function.
919///
920/// This is suitable for use as a message authentication code, for example to
921/// replace an HMAC instance. In that use case, the constant-time equality
922/// checking provided by [`Hash`](struct.Hash.html) is almost always a security
923/// requirement, and callers need to be careful not to compare MACs as raw
924/// bytes.
925///
926/// For an incremental version that accepts multiple writes, see [`Hasher::new_keyed`],
927/// [`Hasher::update`], and [`Hasher::finalize`]. These two lines are equivalent:
928///
929/// ```
930/// # const KEY: &[u8; 32] = &[0; 32];
931/// let mac = blake3::keyed_hash(KEY, b"foo");
932/// # let mac1 = mac;
933///
934/// let mac = blake3::Hasher::new_keyed(KEY).update(b"foo").finalize();
935/// # let mac2 = mac;
936/// # assert_eq!(mac1, mac2);
937/// ```
938///
939/// For output sizes other than 32 bytes, see [`Hasher::finalize_xof`], and [`OutputReader`].
940///
941/// This function is always single-threaded. For multithreading support, see
942/// [`Hasher::update_rayon`](struct.Hasher.html#method.update_rayon).
943pub fn keyed_hash(key: &[u8; KEY_LEN], input: &[u8]) -> Hash {
944    let key_words = platform::words_from_le_bytes_32(key);
945    hash_all_at_once::<join::SerialJoin>(input, &key_words, KEYED_HASH).root_hash()
946}
947
948/// The key derivation function.
949///
950/// Given cryptographic key material of any length and a context string of any
951/// length, this function outputs a 32-byte derived subkey. **The context string
952/// should be hardcoded, globally unique, and application-specific.** A good
953/// default format for such strings is `"[application] [commit timestamp]
954/// [purpose]"`, e.g., `"example.com 2019-12-25 16:18:03 session tokens v1"`.
955///
956/// Key derivation is important when you want to use the same key in multiple
957/// algorithms or use cases. Using the same key with different cryptographic
958/// algorithms is generally forbidden, and deriving a separate subkey for each
959/// use case protects you from bad interactions. Derived keys also mitigate the
960/// damage from one part of your application accidentally leaking its key.
961///
962/// As a rare exception to that general rule, however, it is possible to use
963/// `derive_key` itself with key material that you are already using with
964/// another algorithm. You might need to do this if you're adding features to
965/// an existing application, which does not yet use key derivation internally.
966/// However, you still must not share key material with algorithms that forbid
967/// key reuse entirely, like a one-time pad. For more on this, see sections 6.2
968/// and 7.8 of the [BLAKE3 paper](https://github.com/BLAKE3-team/BLAKE3-specs/blob/master/blake3.pdf).
969///
970/// Note that BLAKE3 is not a password hash, and **`derive_key` should never be
971/// used with passwords.** Instead, use a dedicated password hash like
972/// [Argon2]. Password hashes are entirely different from generic hash
973/// functions, with opposite design requirements.
974///
975/// For an incremental version that accepts multiple writes, see [`Hasher::new_derive_key`],
976/// [`Hasher::update`], and [`Hasher::finalize`]. These two statements are equivalent:
977///
978/// ```
979/// # const CONTEXT: &str = "example.com 2019-12-25 16:18:03 session tokens v1";
980/// let key = blake3::derive_key(CONTEXT, b"key material, not a password");
981/// # let key1 = key;
982///
983/// let key: [u8; 32] = blake3::Hasher::new_derive_key(CONTEXT)
984///     .update(b"key material, not a password")
985///     .finalize()
986///     .into();
987/// # let key2 = key;
988/// # assert_eq!(key1, key2);
989/// ```
990///
991/// For output sizes other than 32 bytes, see [`Hasher::finalize_xof`], and [`OutputReader`].
992///
993/// This function is always single-threaded. For multithreading support, see
994/// [`Hasher::update_rayon`](struct.Hasher.html#method.update_rayon).
995///
996/// [Argon2]: https://en.wikipedia.org/wiki/Argon2
997pub fn derive_key(context: &str, key_material: &[u8]) -> [u8; OUT_LEN] {
998    let context_key = hazmat::hash_derive_key_context(context);
999    let context_key_words = platform::words_from_le_bytes_32(&context_key);
1000    hash_all_at_once::<join::SerialJoin>(key_material, &context_key_words, DERIVE_KEY_MATERIAL)
1001        .root_hash()
1002        .0
1003}
1004
1005fn parent_node_output(
1006    left_child: &CVBytes,
1007    right_child: &CVBytes,
1008    key: &CVWords,
1009    flags: u8,
1010    platform: Platform,
1011) -> Output {
1012    let mut block = [0; BLOCK_LEN];
1013    block[..32].copy_from_slice(left_child);
1014    block[32..].copy_from_slice(right_child);
1015    Output {
1016        input_chaining_value: *key,
1017        block,
1018        block_len: BLOCK_LEN as u8,
1019        counter: 0,
1020        flags: flags | PARENT,
1021        platform,
1022    }
1023}
1024
1025/// An incremental hash state that can accept any number of writes.
1026///
1027/// The `rayon` and `mmap` Cargo features enable additional methods on this
1028/// type related to multithreading and memory-mapped IO.
1029///
1030/// When the `traits-preview` Cargo feature is enabled, this type implements
1031/// several commonly used traits from the
1032/// [`digest`](https://crates.io/crates/digest) crate. However, those
1033/// traits aren't stable, and they're expected to change in incompatible ways
1034/// before that crate reaches 1.0. For that reason, this crate makes no SemVer
1035/// guarantees for this feature, and callers who use it should expect breaking
1036/// changes between patch versions.
1037///
1038/// # Examples
1039///
1040/// ```
1041/// # fn main() -> Result<(), Box<dyn std::error::Error>> {
1042/// // Hash an input incrementally.
1043/// let mut hasher = blake3::Hasher::new();
1044/// hasher.update(b"foo");
1045/// hasher.update(b"bar");
1046/// hasher.update(b"baz");
1047/// assert_eq!(hasher.finalize(), blake3::hash(b"foobarbaz"));
1048///
1049/// // Extended output. OutputReader also implements Read and Seek.
1050/// # #[cfg(feature = "std")] {
1051/// let mut output = [0; 1000];
1052/// let mut output_reader = hasher.finalize_xof();
1053/// output_reader.fill(&mut output);
1054/// assert_eq!(&output[..32], blake3::hash(b"foobarbaz").as_bytes());
1055/// # }
1056/// # Ok(())
1057/// # }
1058/// ```
1059#[derive(Clone)]
1060pub struct Hasher {
1061    key: CVWords,
1062    chunk_state: ChunkState,
1063    initial_chunk_counter: u64,
1064    // The stack size is MAX_DEPTH + 1 because we do lazy merging. For example,
1065    // with 7 chunks, we have 3 entries in the stack. Adding an 8th chunk
1066    // requires a 4th entry, rather than merging everything down to 1, because
1067    // we don't know whether more input is coming. This is different from how
1068    // the reference implementation does things.
1069    cv_stack: ArrayVec<CVBytes, { MAX_DEPTH + 1 }>,
1070}
1071
1072impl Hasher {
1073    fn new_internal(key: &CVWords, flags: u8) -> Self {
1074        Self {
1075            key: *key,
1076            chunk_state: ChunkState::new(key, 0, flags, Platform::detect()),
1077            initial_chunk_counter: 0,
1078            cv_stack: ArrayVec::new(),
1079        }
1080    }
1081
1082    /// Construct a new `Hasher` for the regular hash function.
1083    pub fn new() -> Self {
1084        Self::new_internal(IV, 0)
1085    }
1086
1087    /// Construct a new `Hasher` for the keyed hash function. See
1088    /// [`keyed_hash`].
1089    ///
1090    /// [`keyed_hash`]: fn.keyed_hash.html
1091    pub fn new_keyed(key: &[u8; KEY_LEN]) -> Self {
1092        let key_words = platform::words_from_le_bytes_32(key);
1093        Self::new_internal(&key_words, KEYED_HASH)
1094    }
1095
1096    /// Construct a new `Hasher` for the key derivation function. See
1097    /// [`derive_key`]. The context string should be hardcoded, globally
1098    /// unique, and application-specific.
1099    ///
1100    /// [`derive_key`]: fn.derive_key.html
1101    pub fn new_derive_key(context: &str) -> Self {
1102        let context_key = hazmat::hash_derive_key_context(context);
1103        let context_key_words = platform::words_from_le_bytes_32(&context_key);
1104        Self::new_internal(&context_key_words, DERIVE_KEY_MATERIAL)
1105    }
1106
1107    /// Reset the `Hasher` to its initial state.
1108    ///
1109    /// This is functionally the same as overwriting the `Hasher` with a new
1110    /// one, using the same key or context string if any.
1111    pub fn reset(&mut self) -> &mut Self {
1112        self.chunk_state = ChunkState::new(
1113            &self.key,
1114            0,
1115            self.chunk_state.flags,
1116            self.chunk_state.platform,
1117        );
1118        self.cv_stack.clear();
1119        self
1120    }
1121
1122    // As described in push_cv() below, we do "lazy merging", delaying merges
1123    // until right before the next CV is about to be added. This is different
1124    // from the reference implementation. Another difference is that we aren't
1125    // always merging 1 chunk at a time. Instead, each CV might represent any
1126    // power-of-two number of chunks, as long as the smaller-above-larger stack
1127    // order is maintained. Instead of the "count the trailing 0-bits"
1128    // algorithm described in the spec (which assumes you're adding one chunk
1129    // at a time), we use a "count the total number of 1-bits" variant (which
1130    // doesn't assume that). The principle is the same: each CV that should
1131    // remain in the stack is represented by a 1-bit in the total number of
1132    // chunks (or bytes) so far.
1133    fn merge_cv_stack(&mut self, chunk_counter: u64) {
1134        // Account for non-zero cases of Hasher::set_input_offset, where there are no prior
1135        // subtrees in the stack. Note that initial_chunk_counter is always 0 for callers who don't
1136        // use the hazmat module.
1137        let post_merge_stack_len =
1138            (chunk_counter - self.initial_chunk_counter).count_ones() as usize;
1139        while self.cv_stack.len() > post_merge_stack_len {
1140            let right_child = self.cv_stack.pop().unwrap();
1141            let left_child = self.cv_stack.pop().unwrap();
1142            let parent_output = parent_node_output(
1143                &left_child,
1144                &right_child,
1145                &self.key,
1146                self.chunk_state.flags,
1147                self.chunk_state.platform,
1148            );
1149            self.cv_stack.push(parent_output.chaining_value());
1150        }
1151    }
1152
1153    // In reference_impl.rs, we merge the new CV with existing CVs from the
1154    // stack before pushing it. We can do that because we know more input is
1155    // coming, so we know none of the merges are root.
1156    //
1157    // This setting is different. We want to feed as much input as possible to
1158    // compress_subtree_wide(), without setting aside anything for the
1159    // chunk_state. If the user gives us 64 KiB, we want to parallelize over
1160    // all 64 KiB at once as a single subtree, if at all possible.
1161    //
1162    // This leads to two problems:
1163    // 1) This 64 KiB input might be the only call that ever gets made to
1164    //    update. In this case, the root node of the 64 KiB subtree would be
1165    //    the root node of the whole tree, and it would need to be ROOT
1166    //    finalized. We can't compress it until we know.
1167    // 2) This 64 KiB input might complete a larger tree, whose root node is
1168    //    similarly going to be the root of the whole tree. For example,
1169    //    maybe we have 196 KiB (that is, 128 + 64) hashed so far. We can't
1170    //    compress the node at the root of the 256 KiB subtree until we know
1171    //    how to finalize it.
1172    //
1173    // The second problem is solved with "lazy merging". That is, when we're
1174    // about to add a CV to the stack, we don't merge it with anything first,
1175    // as the reference impl does. Instead we do merges using the *previous* CV
1176    // that was added, which is sitting on top of the stack, and we put the new
1177    // CV (unmerged) on top of the stack afterwards. This guarantees that we
1178    // never merge the root node until finalize().
1179    //
1180    // Solving the first problem requires an additional tool,
1181    // compress_subtree_to_parent_node(). That function always returns the top
1182    // *two* chaining values of the subtree it's compressing. We then do lazy
1183    // merging with each of them separately, so that the second CV will always
1184    // remain unmerged. (That also helps us support extendable output when
1185    // we're hashing an input all-at-once.)
1186    fn push_cv(&mut self, new_cv: &CVBytes, chunk_counter: u64) {
1187        self.merge_cv_stack(chunk_counter);
1188        self.cv_stack.push(*new_cv);
1189    }
1190
1191    /// Add input bytes to the hash state. You can call this any number of times.
1192    ///
1193    /// This method is always single-threaded. For multithreading support, see
1194    /// [`update_rayon`](#method.update_rayon) (enabled with the `rayon` Cargo feature).
1195    ///
1196    /// Note that the degree of SIMD parallelism that `update` can use is limited by the size of
1197    /// this input buffer. See [`update_reader`](#method.update_reader).
1198    pub fn update(&mut self, input: &[u8]) -> &mut Self {
1199        self.update_with_join::<join::SerialJoin>(input)
1200    }
1201
1202    fn update_with_join<J: join::Join>(&mut self, mut input: &[u8]) -> &mut Self {
1203        let input_offset = self.initial_chunk_counter * CHUNK_LEN as u64;
1204        if let Some(max) = hazmat::max_subtree_len(input_offset) {
1205            let remaining = max - self.count();
1206            assert!(
1207                input.len() as u64 <= remaining,
1208                "the subtree starting at {} contains at most {} bytes (found {})",
1209                CHUNK_LEN as u64 * self.initial_chunk_counter,
1210                max,
1211                input.len(),
1212            );
1213        }
1214        // If we have some partial chunk bytes in the internal chunk_state, we
1215        // need to finish that chunk first.
1216        if self.chunk_state.count() > 0 {
1217            let want = CHUNK_LEN - self.chunk_state.count();
1218            let take = cmp::min(want, input.len());
1219            self.chunk_state.update(&input[..take]);
1220            input = &input[take..];
1221            if !input.is_empty() {
1222                // We've filled the current chunk, and there's more input
1223                // coming, so we know it's not the root and we can finalize it.
1224                // Then we'll proceed to hashing whole chunks below.
1225                debug_assert_eq!(self.chunk_state.count(), CHUNK_LEN);
1226                let chunk_cv = self.chunk_state.output().chaining_value();
1227                self.push_cv(&chunk_cv, self.chunk_state.chunk_counter);
1228                self.chunk_state = ChunkState::new(
1229                    &self.key,
1230                    self.chunk_state.chunk_counter + 1,
1231                    self.chunk_state.flags,
1232                    self.chunk_state.platform,
1233                );
1234            } else {
1235                return self;
1236            }
1237        }
1238
1239        // Now the chunk_state is clear, and we have more input. If there's
1240        // more than a single chunk (so, definitely not the root chunk), hash
1241        // the largest whole subtree we can, with the full benefits of SIMD and
1242        // multithreading parallelism. Two restrictions:
1243        // - The subtree has to be a power-of-2 number of chunks. Only subtrees
1244        //   along the right edge can be incomplete, and we don't know where
1245        //   the right edge is going to be until we get to finalize().
1246        // - The subtree must evenly divide the total number of chunks up until
1247        //   this point (if total is not 0). If the current incomplete subtree
1248        //   is only waiting for 1 more chunk, we can't hash a subtree of 4
1249        //   chunks. We have to complete the current subtree first.
1250        // Because we might need to break up the input to form powers of 2, or
1251        // to evenly divide what we already have, this part runs in a loop.
1252        while input.len() > CHUNK_LEN {
1253            debug_assert_eq!(self.chunk_state.count(), 0, "no partial chunk data");
1254            debug_assert_eq!(CHUNK_LEN.count_ones(), 1, "power of 2 chunk len");
1255            let mut subtree_len = largest_power_of_two_leq(input.len());
1256            let count_so_far = self.chunk_state.chunk_counter * CHUNK_LEN as u64;
1257            // Shrink the subtree_len until it evenly divides the count so far.
1258            // We know that subtree_len itself is a power of 2, so we can use a
1259            // bitmasking trick instead of an actual remainder operation. (Note
1260            // that if the caller consistently passes power-of-2 inputs of the
1261            // same size, as is hopefully typical, this loop condition will
1262            // always fail, and subtree_len will always be the full length of
1263            // the input.)
1264            //
1265            // An aside: We don't have to shrink subtree_len quite this much.
1266            // For example, if count_so_far is 1, we could pass 2 chunks to
1267            // compress_subtree_to_parent_node. Since we'll get 2 CVs back,
1268            // we'll still get the right answer in the end, and we might get to
1269            // use 2-way SIMD parallelism. The problem with this optimization,
1270            // is that it gets us stuck always hashing 2 chunks. The total
1271            // number of chunks will remain odd, and we'll never graduate to
1272            // higher degrees of parallelism. See
1273            // https://github.com/BLAKE3-team/BLAKE3/issues/69.
1274            while (subtree_len - 1) as u64 & count_so_far != 0 {
1275                subtree_len /= 2;
1276            }
1277            // The shrunken subtree_len might now be 1 chunk long. If so, hash
1278            // that one chunk by itself. Otherwise, compress the subtree into a
1279            // pair of CVs.
1280            let subtree_chunks = (subtree_len / CHUNK_LEN) as u64;
1281            if subtree_len <= CHUNK_LEN {
1282                debug_assert_eq!(subtree_len, CHUNK_LEN);
1283                self.push_cv(
1284                    &ChunkState::new(
1285                        &self.key,
1286                        self.chunk_state.chunk_counter,
1287                        self.chunk_state.flags,
1288                        self.chunk_state.platform,
1289                    )
1290                    .update(&input[..subtree_len])
1291                    .output()
1292                    .chaining_value(),
1293                    self.chunk_state.chunk_counter,
1294                );
1295            } else {
1296                // This is the high-performance happy path, though getting here
1297                // depends on the caller giving us a long enough input.
1298                let cv_pair = compress_subtree_to_parent_node::<J>(
1299                    &input[..subtree_len],
1300                    &self.key,
1301                    self.chunk_state.chunk_counter,
1302                    self.chunk_state.flags,
1303                    self.chunk_state.platform,
1304                );
1305                let left_cv = array_ref!(cv_pair, 0, 32);
1306                let right_cv = array_ref!(cv_pair, 32, 32);
1307                // Push the two CVs we received into the CV stack in order. Because
1308                // the stack merges lazily, this guarantees we aren't merging the
1309                // root.
1310                self.push_cv(left_cv, self.chunk_state.chunk_counter);
1311                self.push_cv(
1312                    right_cv,
1313                    self.chunk_state.chunk_counter + (subtree_chunks / 2),
1314                );
1315            }
1316            self.chunk_state.chunk_counter += subtree_chunks;
1317            input = &input[subtree_len..];
1318        }
1319
1320        // What remains is 1 chunk or less. Add it to the chunk state.
1321        debug_assert!(input.len() <= CHUNK_LEN);
1322        if !input.is_empty() {
1323            self.chunk_state.update(input);
1324            // Having added some input to the chunk_state, we know what's in
1325            // the CV stack won't become the root node, and we can do an extra
1326            // merge. This simplifies finalize().
1327            self.merge_cv_stack(self.chunk_state.chunk_counter);
1328        }
1329
1330        self
1331    }
1332
1333    fn final_output(&self) -> Output {
1334        // If the current chunk is the only chunk, that makes it the root node
1335        // also. Convert it directly into an Output. Otherwise, we need to
1336        // merge subtrees below.
1337        if self.cv_stack.is_empty() {
1338            debug_assert_eq!(self.chunk_state.chunk_counter, self.initial_chunk_counter);
1339            return self.chunk_state.output();
1340        }
1341
1342        // If there are any bytes in the ChunkState, finalize that chunk and
1343        // merge its CV with everything in the CV stack. In that case, the work
1344        // we did at the end of update() above guarantees that the stack
1345        // doesn't contain any unmerged subtrees that need to be merged first.
1346        // (This is important, because if there were two chunk hashes sitting
1347        // on top of the stack, they would need to merge with each other, and
1348        // merging a new chunk hash into them would be incorrect.)
1349        //
1350        // If there are no bytes in the ChunkState, we'll merge what's already
1351        // in the stack. In this case it's fine if there are unmerged chunks on
1352        // top, because we'll merge them with each other. Note that the case of
1353        // the empty chunk is taken care of above.
1354        let mut output: Output;
1355        let mut num_cvs_remaining = self.cv_stack.len();
1356        if self.chunk_state.count() > 0 {
1357            debug_assert_eq!(
1358                self.cv_stack.len(),
1359                (self.chunk_state.chunk_counter - self.initial_chunk_counter).count_ones() as usize,
1360                "cv stack does not need a merge",
1361            );
1362            output = self.chunk_state.output();
1363        } else {
1364            debug_assert!(self.cv_stack.len() >= 2);
1365            output = parent_node_output(
1366                &self.cv_stack[num_cvs_remaining - 2],
1367                &self.cv_stack[num_cvs_remaining - 1],
1368                &self.key,
1369                self.chunk_state.flags,
1370                self.chunk_state.platform,
1371            );
1372            num_cvs_remaining -= 2;
1373        }
1374        while num_cvs_remaining > 0 {
1375            output = parent_node_output(
1376                &self.cv_stack[num_cvs_remaining - 1],
1377                &output.chaining_value(),
1378                &self.key,
1379                self.chunk_state.flags,
1380                self.chunk_state.platform,
1381            );
1382            num_cvs_remaining -= 1;
1383        }
1384        output
1385    }
1386
1387    /// Finalize the hash state and return the [`Hash`](struct.Hash.html) of
1388    /// the input.
1389    ///
1390    /// This method is idempotent. Calling it twice will give the same result.
1391    /// You can also add more input and finalize again.
1392    pub fn finalize(&self) -> Hash {
1393        assert_eq!(
1394            self.initial_chunk_counter, 0,
1395            "set_input_offset must be used with finalize_non_root",
1396        );
1397        self.final_output().root_hash()
1398    }
1399
1400    /// Finalize the hash state and return an [`OutputReader`], which can
1401    /// supply any number of output bytes.
1402    ///
1403    /// This method is idempotent. Calling it twice will give the same result.
1404    /// You can also add more input and finalize again.
1405    ///
1406    /// [`OutputReader`]: struct.OutputReader.html
1407    pub fn finalize_xof(&self) -> OutputReader {
1408        assert_eq!(
1409            self.initial_chunk_counter, 0,
1410            "set_input_offset must be used with finalize_non_root",
1411        );
1412        OutputReader::new(self.final_output())
1413    }
1414
1415    /// Return the total number of bytes hashed so far.
1416    ///
1417    /// [`hazmat::HasherExt::set_input_offset`] does not affect this value. This only counts bytes
1418    /// passed to [`update`](Hasher::update).
1419    pub fn count(&self) -> u64 {
1420        // Account for non-zero cases of Hasher::set_input_offset. Note that initial_chunk_counter
1421        // is always 0 for callers who don't use the hazmat module.
1422        (self.chunk_state.chunk_counter - self.initial_chunk_counter) * CHUNK_LEN as u64
1423            + self.chunk_state.count() as u64
1424    }
1425
1426    /// As [`update`](Hasher::update), but reading from a
1427    /// [`std::io::Read`](https://doc.rust-lang.org/std/io/trait.Read.html) implementation.
1428    ///
1429    /// [`Hasher`] implements
1430    /// [`std::io::Write`](https://doc.rust-lang.org/std/io/trait.Write.html), so it's possible to
1431    /// use [`std::io::copy`](https://doc.rust-lang.org/std/io/fn.copy.html) to update a [`Hasher`]
1432    /// from any reader. Unfortunately, this standard approach can limit performance, because
1433    /// `copy` currently uses an internal 8 KiB buffer that isn't big enough to take advantage of
1434    /// all SIMD instruction sets. (In particular, [AVX-512](https://en.wikipedia.org/wiki/AVX-512)
1435    /// needs a 16 KiB buffer.) `update_reader` avoids this performance problem and is slightly
1436    /// more convenient.
1437    ///
1438    /// The internal buffer size this method uses may change at any time, and it may be different
1439    /// for different targets. The only guarantee is that it will be large enough for all of this
1440    /// crate's SIMD implementations on the current platform.
1441    ///
1442    /// The most common implementer of
1443    /// [`std::io::Read`](https://doc.rust-lang.org/std/io/trait.Read.html) might be
1444    /// [`std::fs::File`](https://doc.rust-lang.org/std/fs/struct.File.html), but note that memory
1445    /// mapping can be faster than this method for hashing large files. See
1446    /// [`update_mmap`](Hasher::update_mmap) and [`update_mmap_rayon`](Hasher::update_mmap_rayon),
1447    /// which require the `mmap` and (for the latter) `rayon` Cargo features.
1448    ///
1449    /// This method requires the `std` Cargo feature, which is enabled by default.
1450    ///
1451    /// # Example
1452    ///
1453    /// ```no_run
1454    /// # use std::fs::File;
1455    /// # use std::io;
1456    /// # fn main() -> io::Result<()> {
1457    /// // Hash standard input.
1458    /// let mut hasher = blake3::Hasher::new();
1459    /// hasher.update_reader(std::io::stdin().lock())?;
1460    /// println!("{}", hasher.finalize());
1461    /// # Ok(())
1462    /// # }
1463    /// ```
1464    #[cfg(feature = "std")]
1465    pub fn update_reader(&mut self, reader: impl std::io::Read) -> std::io::Result<&mut Self> {
1466        io::copy_wide(reader, self)?;
1467        Ok(self)
1468    }
1469
1470    /// As [`update`](Hasher::update), but using Rayon-based multithreading
1471    /// internally.
1472    ///
1473    /// This method is gated by the `rayon` Cargo feature, which is disabled by
1474    /// default but enabled on [docs.rs](https://docs.rs).
1475    ///
1476    /// To get any performance benefit from multithreading, the input buffer
1477    /// needs to be large. As a rule of thumb on x86_64, `update_rayon` is
1478    /// _slower_ than `update` for inputs under 128 KiB. That threshold varies
1479    /// quite a lot across different processors, and it's important to benchmark
1480    /// your specific use case. See also the performance warning associated with
1481    /// [`update_mmap_rayon`](Hasher::update_mmap_rayon).
1482    ///
1483    /// If you already have a large buffer in memory, and you want to hash it
1484    /// with multiple threads, this method is a good option. However, reading a
1485    /// file into memory just to call this method can be a performance mistake,
1486    /// both because it requires lots of memory and because single-threaded
1487    /// reads can be slow. For hashing whole files, see
1488    /// [`update_mmap_rayon`](Hasher::update_mmap_rayon), which is gated by both
1489    /// the `rayon` and `mmap` Cargo features.
1490    #[cfg(feature = "rayon")]
1491    pub fn update_rayon(&mut self, input: &[u8]) -> &mut Self {
1492        self.update_with_join::<join::RayonJoin>(input)
1493    }
1494
1495    /// As [`update`](Hasher::update), but reading the contents of a file using memory mapping.
1496    ///
1497    /// Not all files can be memory mapped, and memory mapping small files can be slower than
1498    /// reading them the usual way. In those cases, this method will fall back to standard file IO.
1499    /// The heuristic for whether to use memory mapping is currently very simple (file size >=
1500    /// 16 KiB), and it might change at any time.
1501    ///
1502    /// Like [`update`](Hasher::update), this method is single-threaded. In this author's
1503    /// experience, memory mapping improves single-threaded performance by ~10% for large files
1504    /// that are already in cache. This probably varies between platforms, and as always it's a
1505    /// good idea to benchmark your own use case. In comparison, the multithreaded
1506    /// [`update_mmap_rayon`](Hasher::update_mmap_rayon) method can have a much larger impact on
1507    /// performance.
1508    ///
1509    /// There's a correctness reason that this method takes
1510    /// [`Path`](https://doc.rust-lang.org/stable/std/path/struct.Path.html) instead of
1511    /// [`File`](https://doc.rust-lang.org/std/fs/struct.File.html): reading from a memory-mapped
1512    /// file ignores the seek position of the original file handle (it neither respects the current
1513    /// position nor updates the position). This difference in behavior would've caused
1514    /// `update_mmap` and [`update_reader`](Hasher::update_reader) to give different answers and
1515    /// have different side effects in some cases. Taking a
1516    /// [`Path`](https://doc.rust-lang.org/stable/std/path/struct.Path.html) avoids this problem by
1517    /// making it clear that a new [`File`](https://doc.rust-lang.org/std/fs/struct.File.html) is
1518    /// opened internally.
1519    ///
1520    /// This method requires the `mmap` Cargo feature, which is disabled by default but enabled on
1521    /// [docs.rs](https://docs.rs).
1522    ///
1523    /// # Example
1524    ///
1525    /// ```no_run
1526    /// # use std::io;
1527    /// # use std::path::Path;
1528    /// # fn main() -> io::Result<()> {
1529    /// let path = Path::new("file.dat");
1530    /// let mut hasher = blake3::Hasher::new();
1531    /// hasher.update_mmap(path)?;
1532    /// println!("{}", hasher.finalize());
1533    /// # Ok(())
1534    /// # }
1535    /// ```
1536    #[cfg(feature = "mmap")]
1537    pub fn update_mmap(&mut self, path: impl AsRef<std::path::Path>) -> std::io::Result<&mut Self> {
1538        let file = std::fs::File::open(path.as_ref())?;
1539        if let Some(mmap) = io::maybe_mmap_file(&file)? {
1540            self.update(&mmap);
1541        } else {
1542            io::copy_wide(&file, self)?;
1543        }
1544        Ok(self)
1545    }
1546
1547    /// As [`update_rayon`](Hasher::update_rayon), but reading the contents of a file using
1548    /// memory mapping. This is the default behavior of `b3sum`.
1549    ///
1550    /// For large files that are likely to be in cache, this can be much faster than
1551    /// single-threaded hashing. When benchmarks report that BLAKE3 is 10x or 20x faster than other
1552    /// cryptographic hashes, this is usually what they're measuring. However...
1553    ///
1554    /// **Performance Warning:** There are cases where multithreading hurts performance. The worst
1555    /// case is [a large file on a spinning disk](https://github.com/BLAKE3-team/BLAKE3/issues/31),
1556    /// where simultaneous reads from multiple threads can cause "thrashing" (i.e. the disk spends
1557    /// more time seeking around than reading data). Windows tends to be somewhat worse about this,
1558    /// in part because it's less likely than Linux to keep very large files in cache. More
1559    /// generally, if your CPU cores are already busy, then multithreading will add overhead
1560    /// without improving performance. If your code runs in different environments that you don't
1561    /// control and can't measure, then unfortunately there's no one-size-fits-all answer for
1562    /// whether multithreading is a good idea.
1563    ///
1564    /// The memory mapping behavior of this function is the same as
1565    /// [`update_mmap`](Hasher::update_mmap), and the heuristic for when to fall back to standard
1566    /// file IO might change at any time.
1567    ///
1568    /// This method requires both the `mmap` and `rayon` Cargo features, which are disabled by
1569    /// default but enabled on [docs.rs](https://docs.rs).
1570    ///
1571    /// # Example
1572    ///
1573    /// ```no_run
1574    /// # use std::io;
1575    /// # use std::path::Path;
1576    /// # fn main() -> io::Result<()> {
1577    /// # #[cfg(feature = "rayon")]
1578    /// # {
1579    /// let path = Path::new("big_file.dat");
1580    /// let mut hasher = blake3::Hasher::new();
1581    /// hasher.update_mmap_rayon(path)?;
1582    /// println!("{}", hasher.finalize());
1583    /// # }
1584    /// # Ok(())
1585    /// # }
1586    /// ```
1587    #[cfg(feature = "mmap")]
1588    #[cfg(feature = "rayon")]
1589    pub fn update_mmap_rayon(
1590        &mut self,
1591        path: impl AsRef<std::path::Path>,
1592    ) -> std::io::Result<&mut Self> {
1593        let file = std::fs::File::open(path.as_ref())?;
1594        if let Some(mmap) = io::maybe_mmap_file(&file)? {
1595            self.update_rayon(&mmap);
1596        } else {
1597            io::copy_wide(&file, self)?;
1598        }
1599        Ok(self)
1600    }
1601}
1602
1603// Don't derive(Debug), because the state may be secret.
1604impl fmt::Debug for Hasher {
1605    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1606        f.debug_struct("Hasher")
1607            .field("flags", &self.chunk_state.flags)
1608            .field("platform", &self.chunk_state.platform)
1609            .finish()
1610    }
1611}
1612
1613impl Default for Hasher {
1614    #[inline]
1615    fn default() -> Self {
1616        Self::new()
1617    }
1618}
1619
1620#[cfg(feature = "std")]
1621impl std::io::Write for Hasher {
1622    /// This is equivalent to [`update`](#method.update).
1623    #[inline]
1624    fn write(&mut self, input: &[u8]) -> std::io::Result<usize> {
1625        self.update(input);
1626        Ok(input.len())
1627    }
1628
1629    #[inline]
1630    fn flush(&mut self) -> std::io::Result<()> {
1631        Ok(())
1632    }
1633}
1634
1635#[cfg(feature = "zeroize")]
1636impl Zeroize for Hasher {
1637    fn zeroize(&mut self) {
1638        // Destructuring to trigger compile error as a reminder to update this impl.
1639        let Self {
1640            key,
1641            chunk_state,
1642            initial_chunk_counter,
1643            cv_stack,
1644        } = self;
1645
1646        key.zeroize();
1647        chunk_state.zeroize();
1648        initial_chunk_counter.zeroize();
1649        cv_stack.zeroize();
1650    }
1651}
1652
1653/// An incremental reader for extended output, returned by
1654/// [`Hasher::finalize_xof`](struct.Hasher.html#method.finalize_xof).
1655///
1656/// Shorter BLAKE3 outputs are prefixes of longer ones, and explicitly requesting a short output is
1657/// equivalent to truncating the default-length output. Note that this is a difference between
1658/// BLAKE2 and BLAKE3.
1659///
1660/// # Security notes
1661///
1662/// Outputs shorter than the default length of 32 bytes (256 bits) provide less security. An N-bit
1663/// BLAKE3 output is intended to provide N bits of first and second preimage resistance and N/2
1664/// bits of collision resistance, for any N up to 256. Longer outputs don't provide any additional
1665/// security.
1666///
1667/// Avoid relying on the secrecy of the output offset, that is, the number of output bytes read or
1668/// the arguments to [`seek`](struct.OutputReader.html#method.seek) or
1669/// [`set_position`](struct.OutputReader.html#method.set_position). [_Block-Cipher-Based Tree
1670/// Hashing_ by Aldo Gunsing](https://eprint.iacr.org/2022/283) shows that an attacker who knows
1671/// both the message and the key (if any) can easily determine the offset of an extended output.
1672/// For comparison, AES-CTR has a similar property: if you know the key, you can decrypt a block
1673/// from an unknown position in the output stream to recover its block index. Callers with strong
1674/// secret keys aren't affected in practice, but secret offsets are a [design
1675/// smell](https://en.wikipedia.org/wiki/Design_smell) in any case.
1676#[derive(Clone)]
1677pub struct OutputReader {
1678    inner: Output,
1679    position_within_block: u8,
1680}
1681
1682impl OutputReader {
1683    fn new(inner: Output) -> Self {
1684        Self {
1685            inner,
1686            position_within_block: 0,
1687        }
1688    }
1689
1690    // This helper function handles both the case where the output buffer is
1691    // shorter than one block, and the case where our position_within_block is
1692    // non-zero.
1693    fn fill_one_block(&mut self, buf: &mut &mut [u8]) {
1694        let output_block: [u8; BLOCK_LEN] = self.inner.root_output_block();
1695        let output_bytes = &output_block[self.position_within_block as usize..];
1696        let take = cmp::min(buf.len(), output_bytes.len());
1697        buf[..take].copy_from_slice(&output_bytes[..take]);
1698        self.position_within_block += take as u8;
1699        if self.position_within_block == BLOCK_LEN as u8 {
1700            self.inner.counter += 1;
1701            self.position_within_block = 0;
1702        }
1703        // Advance the dest buffer. mem::take() is a borrowck workaround.
1704        *buf = &mut core::mem::take(buf)[take..];
1705    }
1706
1707    /// Fill a buffer with output bytes and advance the position of the
1708    /// `OutputReader`. This is equivalent to [`Read::read`], except that it
1709    /// doesn't return a `Result`. Both methods always fill the entire buffer.
1710    ///
1711    /// Note that `OutputReader` doesn't buffer output bytes internally, so
1712    /// calling `fill` repeatedly with a short-length or odd-length slice will
1713    /// end up performing the same compression multiple times. If you're
1714    /// reading output in a loop, prefer a slice length that's a multiple of
1715    /// [`BLOCK_LEN`] (64 bytes).
1716    ///
1717    /// The maximum output size of BLAKE3 is 2<sup>64</sup>-1 bytes. If you try
1718    /// to extract more than that, for example by seeking near the end and
1719    /// reading further, the behavior is unspecified.
1720    ///
1721    /// [`Read::read`]: #method.read
1722    pub fn fill(&mut self, mut buf: &mut [u8]) {
1723        if buf.is_empty() {
1724            return;
1725        }
1726
1727        // If we're partway through a block, try to get to a block boundary.
1728        if self.position_within_block != 0 {
1729            self.fill_one_block(&mut buf);
1730        }
1731
1732        let full_blocks = buf.len() / BLOCK_LEN;
1733        let full_blocks_len = full_blocks * BLOCK_LEN;
1734        if full_blocks > 0 {
1735            debug_assert_eq!(0, self.position_within_block);
1736            self.inner.platform.xof_many(
1737                &self.inner.input_chaining_value,
1738                &self.inner.block,
1739                self.inner.block_len,
1740                self.inner.counter,
1741                self.inner.flags | ROOT,
1742                &mut buf[..full_blocks_len],
1743            );
1744            self.inner.counter += full_blocks as u64;
1745            buf = &mut buf[full_blocks * BLOCK_LEN..];
1746        }
1747
1748        if !buf.is_empty() {
1749            debug_assert!(buf.len() < BLOCK_LEN);
1750            self.fill_one_block(&mut buf);
1751            debug_assert!(buf.is_empty());
1752        }
1753    }
1754
1755    /// Return the current read position in the output stream. This is
1756    /// equivalent to [`Seek::stream_position`], except that it doesn't return
1757    /// a `Result`. The position of a new `OutputReader` starts at 0, and each
1758    /// call to [`fill`] or [`Read::read`] moves the position forward by the
1759    /// number of bytes read.
1760    ///
1761    /// [`Seek::stream_position`]: #method.stream_position
1762    /// [`fill`]: #method.fill
1763    /// [`Read::read`]: #method.read
1764    pub fn position(&self) -> u64 {
1765        self.inner.counter * BLOCK_LEN as u64 + self.position_within_block as u64
1766    }
1767
1768    /// Seek to a new read position in the output stream. This is equivalent to
1769    /// calling [`Seek::seek`] with [`SeekFrom::Start`], except that it doesn't
1770    /// return a `Result`.
1771    ///
1772    /// [`Seek::seek`]: #method.seek
1773    /// [`SeekFrom::Start`]: https://doc.rust-lang.org/std/io/enum.SeekFrom.html
1774    pub fn set_position(&mut self, position: u64) {
1775        self.position_within_block = (position % BLOCK_LEN as u64) as u8;
1776        self.inner.counter = position / BLOCK_LEN as u64;
1777    }
1778}
1779
1780// Don't derive(Debug), because the state may be secret.
1781impl fmt::Debug for OutputReader {
1782    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1783        f.debug_struct("OutputReader")
1784            .field("position", &self.position())
1785            .finish()
1786    }
1787}
1788
1789#[cfg(feature = "std")]
1790impl std::io::Read for OutputReader {
1791    #[inline]
1792    fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
1793        self.fill(buf);
1794        Ok(buf.len())
1795    }
1796}
1797
1798#[cfg(feature = "std")]
1799impl std::io::Seek for OutputReader {
1800    fn seek(&mut self, pos: std::io::SeekFrom) -> std::io::Result<u64> {
1801        let max_position = u64::max_value() as i128;
1802        let target_position: i128 = match pos {
1803            std::io::SeekFrom::Start(x) => x as i128,
1804            std::io::SeekFrom::Current(x) => self.position() as i128 + x as i128,
1805            std::io::SeekFrom::End(_) => {
1806                return Err(std::io::Error::new(
1807                    std::io::ErrorKind::InvalidInput,
1808                    "seek from end not supported",
1809                ));
1810            }
1811        };
1812        if target_position < 0 {
1813            return Err(std::io::Error::new(
1814                std::io::ErrorKind::InvalidInput,
1815                "seek before start",
1816            ));
1817        }
1818        self.set_position(cmp::min(target_position, max_position) as u64);
1819        Ok(self.position())
1820    }
1821}
1822
1823#[cfg(feature = "zeroize")]
1824impl Zeroize for OutputReader {
1825    fn zeroize(&mut self) {
1826        // Destructuring to trigger compile error as a reminder to update this impl.
1827        let Self {
1828            inner,
1829            position_within_block,
1830        } = self;
1831
1832        inner.zeroize();
1833        position_within_block.zeroize();
1834    }
1835}