1//! Hash generator (HASH)
2use core::cmp::min;
3#[cfg(hash_v2)]
4use core::future::poll_fn;
5use core::marker::PhantomData;
6#[cfg(hash_v2)]
7use core::ptr;
8#[cfg(hash_v2)]
9use core::task::Poll;
10
11use embassy_hal_internal::{into_ref, PeripheralRef};
12use embassy_sync::waitqueue::AtomicWaker;
13use stm32_metapac::hash::regs::*;
14
15use crate::dma::NoDma;
16#[cfg(hash_v2)]
17use crate::dma::Transfer;
18use crate::interrupt::typelevel::Interrupt;
19use crate::peripherals::HASH;
20use crate::{interrupt, pac, peripherals, rcc, Peripheral};
21
22#[cfg(hash_v1)]
23const NUM_CONTEXT_REGS: usize = 51;
24#[cfg(hash_v3)]
25const NUM_CONTEXT_REGS: usize = 103;
26#[cfg(any(hash_v2, hash_v4))]
27const NUM_CONTEXT_REGS: usize = 54;
28
29const HASH_BUFFER_LEN: usize = 132;
30const DIGEST_BLOCK_SIZE: usize = 128;
31
32static HASH_WAKER: AtomicWaker = AtomicWaker::new();
33
34/// HASH interrupt handler.
35pub struct InterruptHandler<T: Instance> {
36 _phantom: PhantomData<T>,
37}
38
39impl<T: Instance> interrupt::typelevel::Handler<T::Interrupt> for InterruptHandler<T> {
40 unsafe fn on_interrupt() {
41 let bits: Sr = T::regs().sr().read();
42 if bits.dinis() {
43 T::regs().imr().modify(|reg: &mut Imr| reg.set_dinie(val:false));
44 HASH_WAKER.wake();
45 }
46 if bits.dcis() {
47 T::regs().imr().modify(|reg: &mut Imr| reg.set_dcie(val:false));
48 HASH_WAKER.wake();
49 }
50 }
51}
52
53///Hash algorithm selection
54#[derive(Clone, Copy, PartialEq)]
55pub enum Algorithm {
56 /// SHA-1 Algorithm
57 SHA1 = 0,
58
59 #[cfg(any(hash_v1, hash_v2, hash_v4))]
60 /// MD5 Algorithm
61 MD5 = 1,
62
63 /// SHA-224 Algorithm
64 SHA224 = 2,
65
66 /// SHA-256 Algorithm
67 SHA256 = 3,
68
69 #[cfg(hash_v3)]
70 /// SHA-384 Algorithm
71 SHA384 = 12,
72
73 #[cfg(hash_v3)]
74 /// SHA-512/224 Algorithm
75 SHA512_224 = 13,
76
77 #[cfg(hash_v3)]
78 /// SHA-512/256 Algorithm
79 SHA512_256 = 14,
80
81 #[cfg(hash_v3)]
82 /// SHA-256 Algorithm
83 SHA512 = 15,
84}
85
86/// Input data width selection
87#[repr(u8)]
88#[derive(Clone, Copy)]
89pub enum DataType {
90 ///32-bit data, no data is swapped.
91 Width32 = 0,
92 ///16-bit data, each half-word is swapped.
93 Width16 = 1,
94 ///8-bit data, all bytes are swapped.
95 Width8 = 2,
96 ///1-bit data, all bits are swapped.
97 Width1 = 3,
98}
99
100/// Stores the state of the HASH peripheral for suspending/resuming
101/// digest calculation.
102pub struct Context<'c> {
103 first_word_sent: bool,
104 key_sent: bool,
105 buffer: [u8; HASH_BUFFER_LEN],
106 buflen: usize,
107 algo: Algorithm,
108 format: DataType,
109 imr: u32,
110 str: u32,
111 cr: u32,
112 csr: [u32; NUM_CONTEXT_REGS],
113 key: HmacKey<'c>,
114}
115
116type HmacKey<'k> = Option<&'k [u8]>;
117
118/// HASH driver.
119pub struct Hash<'d, T: Instance, D = NoDma> {
120 _peripheral: PeripheralRef<'d, T>,
121 #[allow(dead_code)]
122 dma: PeripheralRef<'d, D>,
123}
124
125impl<'d, T: Instance, D> Hash<'d, T, D> {
126 /// Instantiates, resets, and enables the HASH peripheral.
127 pub fn new(
128 peripheral: impl Peripheral<P = T> + 'd,
129 dma: impl Peripheral<P = D> + 'd,
130 _irq: impl interrupt::typelevel::Binding<T::Interrupt, InterruptHandler<T>> + 'd,
131 ) -> Self {
132 rcc::enable_and_reset::<HASH>();
133 into_ref!(peripheral, dma);
134 let instance = Self {
135 _peripheral: peripheral,
136 dma: dma,
137 };
138
139 T::Interrupt::unpend();
140 unsafe { T::Interrupt::enable() };
141
142 instance
143 }
144
145 /// Starts computation of a new hash and returns the saved peripheral state.
146 pub fn start<'c>(&mut self, algorithm: Algorithm, format: DataType, key: HmacKey<'c>) -> Context<'c> {
147 // Define a context for this new computation.
148 let mut ctx = Context {
149 first_word_sent: false,
150 key_sent: false,
151 buffer: [0; HASH_BUFFER_LEN],
152 buflen: 0,
153 algo: algorithm,
154 format: format,
155 imr: 0,
156 str: 0,
157 cr: 0,
158 csr: [0; NUM_CONTEXT_REGS],
159 key,
160 };
161
162 // Set the data type in the peripheral.
163 T::regs().cr().modify(|w| w.set_datatype(ctx.format as u8));
164
165 // Select the algorithm.
166 #[cfg(hash_v1)]
167 if ctx.algo == Algorithm::MD5 {
168 T::regs().cr().modify(|w| w.set_algo(true));
169 }
170
171 #[cfg(hash_v2)]
172 {
173 // Select the algorithm.
174 let mut algo0 = false;
175 let mut algo1 = false;
176 if ctx.algo == Algorithm::MD5 || ctx.algo == Algorithm::SHA256 {
177 algo0 = true;
178 }
179 if ctx.algo == Algorithm::SHA224 || ctx.algo == Algorithm::SHA256 {
180 algo1 = true;
181 }
182 T::regs().cr().modify(|w| w.set_algo0(algo0));
183 T::regs().cr().modify(|w| w.set_algo1(algo1));
184 }
185
186 #[cfg(any(hash_v3, hash_v4))]
187 T::regs().cr().modify(|w| w.set_algo(ctx.algo as u8));
188
189 // Configure HMAC mode if a key is provided.
190 if let Some(key) = ctx.key {
191 T::regs().cr().modify(|w| w.set_mode(true));
192 if key.len() > 64 {
193 T::regs().cr().modify(|w| w.set_lkey(true));
194 }
195 }
196
197 T::regs().cr().modify(|w| w.set_init(true));
198
199 // Store and return the state of the peripheral.
200 self.store_context(&mut ctx);
201 ctx
202 }
203
204 /// Restores the peripheral state using the given context,
205 /// then updates the state with the provided data.
206 /// Peripheral state is saved upon return.
207 pub fn update_blocking<'c>(&mut self, ctx: &mut Context<'c>, input: &[u8]) {
208 // Restore the peripheral state.
209 self.load_context(&ctx);
210
211 // Load the HMAC key if provided.
212 if !ctx.key_sent {
213 if let Some(key) = ctx.key {
214 self.accumulate_blocking(key);
215 T::regs().str().write(|w| w.set_dcal(true));
216 // Block waiting for digest.
217 while !T::regs().sr().read().dinis() {}
218 }
219 ctx.key_sent = true;
220 }
221
222 let mut data_waiting = input.len() + ctx.buflen;
223 if data_waiting < DIGEST_BLOCK_SIZE || (data_waiting < ctx.buffer.len() && !ctx.first_word_sent) {
224 // There isn't enough data to digest a block, so append it to the buffer.
225 ctx.buffer[ctx.buflen..ctx.buflen + input.len()].copy_from_slice(input);
226 ctx.buflen += input.len();
227 self.store_context(ctx);
228 return;
229 }
230
231 let mut ilen_remaining = input.len();
232 let mut input_start = 0;
233
234 // Handle first block.
235 if !ctx.first_word_sent {
236 let empty_len = ctx.buffer.len() - ctx.buflen;
237 let copy_len = min(empty_len, ilen_remaining);
238 // Fill the buffer.
239 if copy_len > 0 {
240 ctx.buffer[ctx.buflen..ctx.buflen + copy_len].copy_from_slice(&input[0..copy_len]);
241 ctx.buflen += copy_len;
242 ilen_remaining -= copy_len;
243 input_start += copy_len;
244 }
245 self.accumulate_blocking(ctx.buffer.as_slice());
246 data_waiting -= ctx.buflen;
247 ctx.buflen = 0;
248 ctx.first_word_sent = true;
249 }
250
251 if data_waiting < DIGEST_BLOCK_SIZE {
252 // There isn't enough data remaining to process another block, so store it.
253 ctx.buffer[0..ilen_remaining].copy_from_slice(&input[input_start..input_start + ilen_remaining]);
254 ctx.buflen += ilen_remaining;
255 } else {
256 // First ingest the data in the buffer.
257 let empty_len = DIGEST_BLOCK_SIZE - ctx.buflen;
258 if empty_len > 0 {
259 let copy_len = min(empty_len, ilen_remaining);
260 ctx.buffer[ctx.buflen..ctx.buflen + copy_len]
261 .copy_from_slice(&input[input_start..input_start + copy_len]);
262 ctx.buflen += copy_len;
263 ilen_remaining -= copy_len;
264 input_start += copy_len;
265 }
266 self.accumulate_blocking(&ctx.buffer[0..DIGEST_BLOCK_SIZE]);
267 ctx.buflen = 0;
268
269 // Move any extra data to the now-empty buffer.
270 let leftovers = ilen_remaining % 64;
271 if leftovers > 0 {
272 ctx.buffer[0..leftovers].copy_from_slice(&input[input.len() - leftovers..input.len()]);
273 ctx.buflen += leftovers;
274 ilen_remaining -= leftovers;
275 }
276
277 // Hash the remaining data.
278 self.accumulate_blocking(&input[input_start..input_start + ilen_remaining]);
279 }
280
281 // Save the peripheral context.
282 self.store_context(ctx);
283 }
284
285 /// Restores the peripheral state using the given context,
286 /// then updates the state with the provided data.
287 /// Peripheral state is saved upon return.
288 #[cfg(hash_v2)]
289 pub async fn update<'c>(&mut self, ctx: &mut Context<'c>, input: &[u8])
290 where
291 D: crate::hash::Dma<T>,
292 {
293 // Restore the peripheral state.
294 self.load_context(&ctx);
295
296 // Load the HMAC key if provided.
297 if !ctx.key_sent {
298 if let Some(key) = ctx.key {
299 self.accumulate(key).await;
300 }
301 ctx.key_sent = true;
302 }
303
304 let data_waiting = input.len() + ctx.buflen;
305 if data_waiting < DIGEST_BLOCK_SIZE {
306 // There isn't enough data to digest a block, so append it to the buffer.
307 ctx.buffer[ctx.buflen..ctx.buflen + input.len()].copy_from_slice(input);
308 ctx.buflen += input.len();
309 self.store_context(ctx);
310 return;
311 }
312
313 // Enable multiple DMA transfers.
314 T::regs().cr().modify(|w| w.set_mdmat(true));
315
316 let mut ilen_remaining = input.len();
317 let mut input_start = 0;
318
319 // First ingest the data in the buffer.
320 let empty_len = DIGEST_BLOCK_SIZE - ctx.buflen;
321 if empty_len > 0 {
322 let copy_len = min(empty_len, ilen_remaining);
323 ctx.buffer[ctx.buflen..ctx.buflen + copy_len].copy_from_slice(&input[input_start..input_start + copy_len]);
324 ctx.buflen += copy_len;
325 ilen_remaining -= copy_len;
326 input_start += copy_len;
327 }
328 self.accumulate(&ctx.buffer[..DIGEST_BLOCK_SIZE]).await;
329 ctx.buflen = 0;
330
331 // Move any extra data to the now-empty buffer.
332 let leftovers = ilen_remaining % DIGEST_BLOCK_SIZE;
333 if leftovers > 0 {
334 assert!(ilen_remaining >= leftovers);
335 ctx.buffer[0..leftovers].copy_from_slice(&input[input.len() - leftovers..input.len()]);
336 ctx.buflen += leftovers;
337 ilen_remaining -= leftovers;
338 } else {
339 ctx.buffer
340 .copy_from_slice(&input[input.len() - DIGEST_BLOCK_SIZE..input.len()]);
341 ctx.buflen += DIGEST_BLOCK_SIZE;
342 ilen_remaining -= DIGEST_BLOCK_SIZE;
343 }
344
345 // Hash the remaining data.
346 self.accumulate(&input[input_start..input_start + ilen_remaining]).await;
347
348 // Save the peripheral context.
349 self.store_context(ctx);
350 }
351
352 /// Computes a digest for the given context.
353 /// The digest buffer must be large enough to accomodate a digest for the selected algorithm.
354 /// The largest returned digest size is 128 bytes for SHA-512.
355 /// Panics if the supplied digest buffer is too short.
356 pub fn finish_blocking<'c>(&mut self, mut ctx: Context<'c>, digest: &mut [u8]) -> usize {
357 // Restore the peripheral state.
358 self.load_context(&ctx);
359
360 // Hash the leftover bytes, if any.
361 self.accumulate_blocking(&ctx.buffer[0..ctx.buflen]);
362 ctx.buflen = 0;
363
364 //Start the digest calculation.
365 T::regs().str().write(|w| w.set_dcal(true));
366
367 // Load the HMAC key if provided.
368 if let Some(key) = ctx.key {
369 while !T::regs().sr().read().dinis() {}
370 self.accumulate_blocking(key);
371 T::regs().str().write(|w| w.set_dcal(true));
372 }
373
374 // Block until digest computation is complete.
375 while !T::regs().sr().read().dcis() {}
376
377 // Return the digest.
378 let digest_words = match ctx.algo {
379 Algorithm::SHA1 => 5,
380 #[cfg(any(hash_v1, hash_v2, hash_v4))]
381 Algorithm::MD5 => 4,
382 Algorithm::SHA224 => 7,
383 Algorithm::SHA256 => 8,
384 #[cfg(hash_v3)]
385 Algorithm::SHA384 => 12,
386 #[cfg(hash_v3)]
387 Algorithm::SHA512_224 => 7,
388 #[cfg(hash_v3)]
389 Algorithm::SHA512_256 => 8,
390 #[cfg(hash_v3)]
391 Algorithm::SHA512 => 16,
392 };
393
394 let digest_len_bytes = digest_words * 4;
395 // Panics if the supplied digest buffer is too short.
396 if digest.len() < digest_len_bytes {
397 panic!("Digest buffer must be at least {} bytes long.", digest_words * 4);
398 }
399
400 let mut i = 0;
401 while i < digest_words {
402 let word = T::regs().hr(i).read();
403 digest[(i * 4)..((i * 4) + 4)].copy_from_slice(word.to_be_bytes().as_slice());
404 i += 1;
405 }
406 digest_len_bytes
407 }
408
409 /// Computes a digest for the given context.
410 /// The digest buffer must be large enough to accomodate a digest for the selected algorithm.
411 /// The largest returned digest size is 128 bytes for SHA-512.
412 /// Panics if the supplied digest buffer is too short.
413 #[cfg(hash_v2)]
414 pub async fn finish<'c>(&mut self, mut ctx: Context<'c>, digest: &mut [u8]) -> usize
415 where
416 D: crate::hash::Dma<T>,
417 {
418 // Restore the peripheral state.
419 self.load_context(&ctx);
420
421 // Must be cleared prior to the last DMA transfer.
422 T::regs().cr().modify(|w| w.set_mdmat(false));
423
424 // Hash the leftover bytes, if any.
425 self.accumulate(&ctx.buffer[0..ctx.buflen]).await;
426 ctx.buflen = 0;
427
428 // Load the HMAC key if provided.
429 if let Some(key) = ctx.key {
430 self.accumulate(key).await;
431 }
432
433 // Wait for completion.
434 poll_fn(|cx| {
435 // Check if already done.
436 let bits = T::regs().sr().read();
437 if bits.dcis() {
438 return Poll::Ready(());
439 }
440 // Register waker, then enable interrupts.
441 HASH_WAKER.register(cx.waker());
442 T::regs().imr().modify(|reg| reg.set_dcie(true));
443 // Check for completion.
444 let bits = T::regs().sr().read();
445 if bits.dcis() {
446 Poll::Ready(())
447 } else {
448 Poll::Pending
449 }
450 })
451 .await;
452
453 // Return the digest.
454 let digest_words = match ctx.algo {
455 Algorithm::SHA1 => 5,
456 #[cfg(any(hash_v1, hash_v2, hash_v4))]
457 Algorithm::MD5 => 4,
458 Algorithm::SHA224 => 7,
459 Algorithm::SHA256 => 8,
460 #[cfg(hash_v3)]
461 Algorithm::SHA384 => 12,
462 #[cfg(hash_v3)]
463 Algorithm::SHA512_224 => 7,
464 #[cfg(hash_v3)]
465 Algorithm::SHA512_256 => 8,
466 #[cfg(hash_v3)]
467 Algorithm::SHA512 => 16,
468 };
469
470 let digest_len_bytes = digest_words * 4;
471 // Panics if the supplied digest buffer is too short.
472 if digest.len() < digest_len_bytes {
473 panic!("Digest buffer must be at least {} bytes long.", digest_words * 4);
474 }
475
476 let mut i = 0;
477 while i < digest_words {
478 let word = T::regs().hr(i).read();
479 digest[(i * 4)..((i * 4) + 4)].copy_from_slice(word.to_be_bytes().as_slice());
480 i += 1;
481 }
482 digest_len_bytes
483 }
484
485 /// Push data into the hash core.
486 fn accumulate_blocking(&mut self, input: &[u8]) {
487 // Set the number of valid bits.
488 let num_valid_bits: u8 = (8 * (input.len() % 4)) as u8;
489 T::regs().str().modify(|w| w.set_nblw(num_valid_bits));
490
491 let mut i = 0;
492 while i < input.len() {
493 let mut word: [u8; 4] = [0; 4];
494 let copy_idx = min(i + 4, input.len());
495 word[0..copy_idx - i].copy_from_slice(&input[i..copy_idx]);
496 T::regs().din().write_value(u32::from_ne_bytes(word));
497 i += 4;
498 }
499 }
500
501 /// Push data into the hash core.
502 #[cfg(hash_v2)]
503 async fn accumulate(&mut self, input: &[u8])
504 where
505 D: crate::hash::Dma<T>,
506 {
507 // Ignore an input length of 0.
508 if input.len() == 0 {
509 return;
510 }
511
512 // Set the number of valid bits.
513 let num_valid_bits: u8 = (8 * (input.len() % 4)) as u8;
514 T::regs().str().modify(|w| w.set_nblw(num_valid_bits));
515
516 // Configure DMA to transfer input to hash core.
517 let dma_request = self.dma.request();
518 let dst_ptr = T::regs().din().as_ptr();
519 let mut num_words = input.len() / 4;
520 if input.len() % 4 > 0 {
521 num_words += 1;
522 }
523 let src_ptr = ptr::slice_from_raw_parts(input.as_ptr().cast(), num_words);
524 let dma_transfer =
525 unsafe { Transfer::new_write_raw(&mut self.dma, dma_request, src_ptr, dst_ptr, Default::default()) };
526 T::regs().cr().modify(|w| w.set_dmae(true));
527
528 // Wait for the transfer to complete.
529 dma_transfer.await;
530 }
531
532 /// Save the peripheral state to a context.
533 fn store_context<'c>(&mut self, ctx: &mut Context<'c>) {
534 // Block waiting for data in ready.
535 while !T::regs().sr().read().dinis() {}
536
537 // Store peripheral context.
538 ctx.imr = T::regs().imr().read().0;
539 ctx.str = T::regs().str().read().0;
540 ctx.cr = T::regs().cr().read().0;
541 let mut i = 0;
542 while i < NUM_CONTEXT_REGS {
543 ctx.csr[i] = T::regs().csr(i).read();
544 i += 1;
545 }
546 }
547
548 /// Restore the peripheral state from a context.
549 fn load_context(&mut self, ctx: &Context) {
550 // Restore the peripheral state from the context.
551 T::regs().imr().write_value(Imr { 0: ctx.imr });
552 T::regs().str().write_value(Str { 0: ctx.str });
553 T::regs().cr().write_value(Cr { 0: ctx.cr });
554 T::regs().cr().modify(|w| w.set_init(true));
555 let mut i = 0;
556 while i < NUM_CONTEXT_REGS {
557 T::regs().csr(i).write_value(ctx.csr[i]);
558 i += 1;
559 }
560 }
561}
562
563trait SealedInstance {
564 fn regs() -> pac::hash::Hash;
565}
566
567/// HASH instance trait.
568#[allow(private_bounds)]
569pub trait Instance: SealedInstance + Peripheral<P = Self> + crate::rcc::RccPeripheral + 'static + Send {
570 /// Interrupt for this HASH instance.
571 type Interrupt: interrupt::typelevel::Interrupt;
572}
573
574foreach_interrupt!(
575 ($inst:ident, hash, HASH, GLOBAL, $irq:ident) => {
576 impl Instance for peripherals::$inst {
577 type Interrupt = crate::interrupt::typelevel::$irq;
578 }
579
580 impl SealedInstance for peripherals::$inst {
581 fn regs() -> crate::pac::hash::Hash {
582 crate::pac::$inst
583 }
584 }
585 };
586);
587
588dma_trait!(Dma, Instance);
589