1//! Fixed size buffer for block processing of data.
2#![no_std]
3#![doc(
4 html_logo_url = "https://raw.githubusercontent.com/RustCrypto/media/6ee8e381/logo.svg",
5 html_favicon_url = "https://raw.githubusercontent.com/RustCrypto/media/6ee8e381/logo.svg"
6)]
7#![warn(missing_docs, rust_2018_idioms)]
8
9pub use generic_array;
10
11use core::{fmt, marker::PhantomData, slice};
12use generic_array::{
13 typenum::{IsLess, Le, NonZero, U256},
14 ArrayLength, GenericArray,
15};
16
17mod sealed;
18
19/// Block on which `BlockBuffer` operates.
20pub type Block<BlockSize> = GenericArray<u8, BlockSize>;
21
22/// Trait for buffer kinds.
23pub trait BufferKind: sealed::Sealed {}
24
25/// Eager block buffer kind, which guarantees that buffer position
26/// always lies in the range of `0..BlockSize`.
27#[derive(Copy, Clone, Debug, Default)]
28pub struct Eager {}
29
30/// Lazy block buffer kind, which guarantees that buffer position
31/// always lies in the range of `0..=BlockSize`.
32#[derive(Copy, Clone, Debug, Default)]
33pub struct Lazy {}
34
35impl BufferKind for Eager {}
36impl BufferKind for Lazy {}
37
38/// Eager block buffer.
39pub type EagerBuffer<B> = BlockBuffer<B, Eager>;
40/// Lazy block buffer.
41pub type LazyBuffer<B> = BlockBuffer<B, Lazy>;
42
43/// Block buffer error.
44#[derive(Copy, Clone, Eq, PartialEq, Debug)]
45pub struct Error;
46
47impl fmt::Display for Error {
48 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
49 f.write_str(data:"Block buffer error")
50 }
51}
52
53/// Buffer for block processing of data.
54#[derive(Debug)]
55pub struct BlockBuffer<BlockSize, Kind>
56where
57 BlockSize: ArrayLength<u8> + IsLess<U256>,
58 Le<BlockSize, U256>: NonZero,
59 Kind: BufferKind,
60{
61 buffer: Block<BlockSize>,
62 pos: u8,
63 _pd: PhantomData<Kind>,
64}
65
66impl<BlockSize, Kind> Default for BlockBuffer<BlockSize, Kind>
67where
68 BlockSize: ArrayLength<u8> + IsLess<U256>,
69 Le<BlockSize, U256>: NonZero,
70 Kind: BufferKind,
71{
72 fn default() -> Self {
73 if BlockSize::USIZE == 0 {
74 panic!("Block size can not be equal to zero");
75 }
76 Self {
77 buffer: Default::default(),
78 pos: 0,
79 _pd: PhantomData,
80 }
81 }
82}
83
84impl<BlockSize, Kind> Clone for BlockBuffer<BlockSize, Kind>
85where
86 BlockSize: ArrayLength<u8> + IsLess<U256>,
87 Le<BlockSize, U256>: NonZero,
88 Kind: BufferKind,
89{
90 fn clone(&self) -> Self {
91 Self {
92 buffer: self.buffer.clone(),
93 pos: self.pos,
94 _pd: PhantomData,
95 }
96 }
97}
98
99impl<BlockSize, Kind> BlockBuffer<BlockSize, Kind>
100where
101 BlockSize: ArrayLength<u8> + IsLess<U256>,
102 Le<BlockSize, U256>: NonZero,
103 Kind: BufferKind,
104{
105 /// Create new buffer from slice.
106 ///
107 /// # Panics
108 /// If slice length is not valid for used buffer kind.
109 #[inline(always)]
110 pub fn new(buf: &[u8]) -> Self {
111 Self::try_new(buf).unwrap()
112 }
113
114 /// Create new buffer from slice.
115 ///
116 /// Returns an error if slice length is not valid for used buffer kind.
117 #[inline(always)]
118 pub fn try_new(buf: &[u8]) -> Result<Self, Error> {
119 if BlockSize::USIZE == 0 {
120 panic!("Block size can not be equal to zero");
121 }
122 let pos = buf.len();
123 if !Kind::invariant(pos, BlockSize::USIZE) {
124 return Err(Error);
125 }
126 let mut buffer = Block::<BlockSize>::default();
127 buffer[..pos].copy_from_slice(buf);
128 Ok(Self {
129 buffer,
130 pos: pos as u8,
131 _pd: PhantomData,
132 })
133 }
134
135 /// Digest data in `input` in blocks of size `BlockSize` using
136 /// the `compress` function, which accepts slice of blocks.
137 #[inline]
138 pub fn digest_blocks(
139 &mut self,
140 mut input: &[u8],
141 mut compress: impl FnMut(&[Block<BlockSize>]),
142 ) {
143 let pos = self.get_pos();
144 // using `self.remaining()` for some reason
145 // prevents panic elimination
146 let rem = self.size() - pos;
147 let n = input.len();
148 // Note that checking condition `pos + n < BlockSize` is
149 // equivalent to checking `n < rem`, where `rem` is equal
150 // to `BlockSize - pos`. Using the latter allows us to work
151 // around compiler accounting for possible overflow of
152 // `pos + n` which results in it inserting unreachable
153 // panic branches. Using `unreachable_unchecked` in `get_pos`
154 // we convince compiler that `BlockSize - pos` never underflows.
155 if Kind::invariant(n, rem) {
156 // double slicing allows to remove panic branches
157 self.buffer[pos..][..n].copy_from_slice(input);
158 self.set_pos_unchecked(pos + n);
159 return;
160 }
161 if pos != 0 {
162 let (left, right) = input.split_at(rem);
163 input = right;
164 self.buffer[pos..].copy_from_slice(left);
165 compress(slice::from_ref(&self.buffer));
166 }
167
168 let (blocks, leftover) = Kind::split_blocks(input);
169 if !blocks.is_empty() {
170 compress(blocks);
171 }
172
173 let n = leftover.len();
174 self.buffer[..n].copy_from_slice(leftover);
175 self.set_pos_unchecked(n);
176 }
177
178 /// Reset buffer by setting cursor position to zero.
179 #[inline(always)]
180 pub fn reset(&mut self) {
181 self.set_pos_unchecked(0);
182 }
183
184 /// Pad remaining data with zeros and return resulting block.
185 #[inline(always)]
186 pub fn pad_with_zeros(&mut self) -> &mut Block<BlockSize> {
187 let pos = self.get_pos();
188 self.buffer[pos..].iter_mut().for_each(|b| *b = 0);
189 self.set_pos_unchecked(0);
190 &mut self.buffer
191 }
192
193 /// Return current cursor position.
194 #[inline(always)]
195 pub fn get_pos(&self) -> usize {
196 let pos = self.pos as usize;
197 if !Kind::invariant(pos, BlockSize::USIZE) {
198 debug_assert!(false);
199 // SAFETY: `pos` never breaks the invariant
200 unsafe {
201 core::hint::unreachable_unchecked();
202 }
203 }
204 pos
205 }
206
207 /// Return slice of data stored inside the buffer.
208 #[inline(always)]
209 pub fn get_data(&self) -> &[u8] {
210 &self.buffer[..self.get_pos()]
211 }
212
213 /// Set buffer content and cursor position.
214 ///
215 /// # Panics
216 /// If `pos` is bigger or equal to block size.
217 #[inline]
218 pub fn set(&mut self, buf: Block<BlockSize>, pos: usize) {
219 assert!(Kind::invariant(pos, BlockSize::USIZE));
220 self.buffer = buf;
221 self.set_pos_unchecked(pos);
222 }
223
224 /// Return size of the internal buffer in bytes.
225 #[inline(always)]
226 pub fn size(&self) -> usize {
227 BlockSize::USIZE
228 }
229
230 /// Return number of remaining bytes in the internal buffer.
231 #[inline(always)]
232 pub fn remaining(&self) -> usize {
233 self.size() - self.get_pos()
234 }
235
236 #[inline(always)]
237 fn set_pos_unchecked(&mut self, pos: usize) {
238 debug_assert!(Kind::invariant(pos, BlockSize::USIZE));
239 self.pos = pos as u8;
240 }
241}
242
243impl<BlockSize> BlockBuffer<BlockSize, Eager>
244where
245 BlockSize: ArrayLength<u8> + IsLess<U256>,
246 Le<BlockSize, U256>: NonZero,
247{
248 /// Set `data` to generated blocks.
249 #[inline]
250 pub fn set_data(
251 &mut self,
252 mut data: &mut [u8],
253 mut process_blocks: impl FnMut(&mut [Block<BlockSize>]),
254 ) {
255 let pos = self.get_pos();
256 let r = self.remaining();
257 let n = data.len();
258 if pos != 0 {
259 if n < r {
260 // double slicing allows to remove panic branches
261 data.copy_from_slice(&self.buffer[pos..][..n]);
262 self.set_pos_unchecked(pos + n);
263 return;
264 }
265 let (left, right) = data.split_at_mut(r);
266 data = right;
267 left.copy_from_slice(&self.buffer[pos..]);
268 }
269
270 let (blocks, leftover) = to_blocks_mut(data);
271 process_blocks(blocks);
272
273 let n = leftover.len();
274 if n != 0 {
275 let mut block = Default::default();
276 process_blocks(slice::from_mut(&mut block));
277 leftover.copy_from_slice(&block[..n]);
278 self.buffer = block;
279 }
280 self.set_pos_unchecked(n);
281 }
282
283 /// Compress remaining data after padding it with `delim`, zeros and
284 /// the `suffix` bytes. If there is not enough unused space, `compress`
285 /// will be called twice.
286 ///
287 /// # Panics
288 /// If suffix length is bigger than block size.
289 #[inline(always)]
290 pub fn digest_pad(
291 &mut self,
292 delim: u8,
293 suffix: &[u8],
294 mut compress: impl FnMut(&Block<BlockSize>),
295 ) {
296 if suffix.len() > BlockSize::USIZE {
297 panic!("suffix is too long");
298 }
299 let pos = self.get_pos();
300 self.buffer[pos] = delim;
301 for b in &mut self.buffer[pos + 1..] {
302 *b = 0;
303 }
304
305 let n = self.size() - suffix.len();
306 if self.size() - pos - 1 < suffix.len() {
307 compress(&self.buffer);
308 let mut block = Block::<BlockSize>::default();
309 block[n..].copy_from_slice(suffix);
310 compress(&block);
311 } else {
312 self.buffer[n..].copy_from_slice(suffix);
313 compress(&self.buffer);
314 }
315 self.set_pos_unchecked(0)
316 }
317
318 /// Pad message with 0x80, zeros and 64-bit message length using
319 /// big-endian byte order.
320 #[inline]
321 pub fn len64_padding_be(&mut self, data_len: u64, compress: impl FnMut(&Block<BlockSize>)) {
322 self.digest_pad(0x80, &data_len.to_be_bytes(), compress);
323 }
324
325 /// Pad message with 0x80, zeros and 64-bit message length using
326 /// little-endian byte order.
327 #[inline]
328 pub fn len64_padding_le(&mut self, data_len: u64, compress: impl FnMut(&Block<BlockSize>)) {
329 self.digest_pad(0x80, &data_len.to_le_bytes(), compress);
330 }
331
332 /// Pad message with 0x80, zeros and 128-bit message length using
333 /// big-endian byte order.
334 #[inline]
335 pub fn len128_padding_be(&mut self, data_len: u128, compress: impl FnMut(&Block<BlockSize>)) {
336 self.digest_pad(0x80, &data_len.to_be_bytes(), compress);
337 }
338}
339
340/// Split message into mutable slice of parallel blocks, blocks, and leftover bytes.
341#[inline(always)]
342fn to_blocks_mut<N: ArrayLength<u8>>(data: &mut [u8]) -> (&mut [Block<N>], &mut [u8]) {
343 let nb: usize = data.len() / N::USIZE;
344 let (left: &mut [u8], right: &mut [u8]) = data.split_at_mut(mid:nb * N::USIZE);
345 let p: *mut GenericArray = left.as_mut_ptr() as *mut Block<N>;
346 // SAFETY: we guarantee that `blocks` does not point outside of `data`, and `p` is valid for
347 // mutation
348 let blocks: &mut [GenericArray] = unsafe { slice::from_raw_parts_mut(data:p, len:nb) };
349 (blocks, right)
350}
351