1 | #![no_std ] |
2 | #![cfg_attr (docsrs, feature(doc_cfg))] |
3 | #![doc ( |
4 | html_logo_url = "https://raw.githubusercontent.com/RustCrypto/media/6ee8e381/logo.svg" , |
5 | html_favicon_url = "https://raw.githubusercontent.com/RustCrypto/media/6ee8e381/logo.svg" |
6 | )] |
7 | #![warn (missing_docs, rust_2018_idioms, unused_qualifications)] |
8 | |
9 | //! Securely zero memory with a simple trait ([`Zeroize`]) built on stable Rust |
10 | //! primitives which guarantee the operation will not be "optimized away". |
11 | //! |
12 | //! ## About |
13 | //! |
14 | //! [Zeroing memory securely is hard] - compilers optimize for performance, and |
15 | //! in doing so they love to "optimize away" unnecessary zeroing calls. There are |
16 | //! many documented "tricks" to attempt to avoid these optimizations and ensure |
17 | //! that a zeroing routine is performed reliably. |
18 | //! |
19 | //! This crate isn't about tricks: it uses [`core::ptr::write_volatile`] |
20 | //! and [`core::sync::atomic`] memory fences to provide easy-to-use, portable |
21 | //! zeroing behavior which works on all of Rust's core number types and slices |
22 | //! thereof, implemented in pure Rust with no usage of FFI or assembly. |
23 | //! |
24 | //! - No insecure fallbacks! |
25 | //! - No dependencies! |
26 | //! - No FFI or inline assembly! **WASM friendly** (and tested)! |
27 | //! - `#![no_std]` i.e. **embedded-friendly**! |
28 | //! - No functionality besides securely zeroing memory! |
29 | //! - (Optional) Custom derive support for zeroing complex structures |
30 | //! |
31 | //! ## Minimum Supported Rust Version |
32 | //! |
33 | //! Requires Rust **1.60** or newer. |
34 | //! |
35 | //! In the future, we reserve the right to change MSRV (i.e. MSRV is out-of-scope |
36 | //! for this crate's SemVer guarantees), however when we do it will be accompanied |
37 | //! by a minor version bump. |
38 | //! |
39 | //! ## Usage |
40 | //! |
41 | //! ``` |
42 | //! use zeroize::Zeroize; |
43 | //! |
44 | //! fn main() { |
45 | //! // Protip: don't embed secrets in your source code. |
46 | //! // This is just an example. |
47 | //! let mut secret = b"Air shield password: 1,2,3,4,5" .to_vec(); |
48 | //! // [ ... ] open the air shield here |
49 | //! |
50 | //! // Now that we're done using the secret, zero it out. |
51 | //! secret.zeroize(); |
52 | //! } |
53 | //! ``` |
54 | //! |
55 | //! The [`Zeroize`] trait is impl'd on all of Rust's core scalar types including |
56 | //! integers, floats, `bool`, and `char`. |
57 | //! |
58 | //! Additionally, it's implemented on slices and `IterMut`s of the above types. |
59 | //! |
60 | //! When the `alloc` feature is enabled (which it is by default), it's also |
61 | //! impl'd for `Vec<T>` for the above types as well as `String`, where it provides |
62 | //! [`Vec::clear`] / [`String::clear`]-like behavior (truncating to zero-length) |
63 | //! but ensures the backing memory is securely zeroed with some caveats. |
64 | //! |
65 | //! With the `std` feature enabled (which it is **not** by default), [`Zeroize`] |
66 | //! is also implemented for [`CString`]. After calling `zeroize()` on a `CString`, |
67 | //! its internal buffer will contain exactly one nul byte. The backing |
68 | //! memory is zeroed by converting it to a `Vec<u8>` and back into a `CString`. |
69 | //! (NOTE: see "Stack/Heap Zeroing Notes" for important `Vec`/`String`/`CString` details) |
70 | //! |
71 | //! |
72 | //! The [`DefaultIsZeroes`] marker trait can be impl'd on types which also |
73 | //! impl [`Default`], which implements [`Zeroize`] by overwriting a value with |
74 | //! the default value. |
75 | //! |
76 | //! ## Custom Derive Support |
77 | //! |
78 | //! This crate has custom derive support for the `Zeroize` trait, |
79 | //! gated under the `zeroize` crate's `zeroize_derive` Cargo feature, |
80 | //! which automatically calls `zeroize()` on all members of a struct |
81 | //! or tuple struct. |
82 | //! |
83 | //! Attributes supported for `Zeroize`: |
84 | //! |
85 | //! On the item level: |
86 | //! - `#[zeroize(drop)]`: *deprecated* use `ZeroizeOnDrop` instead |
87 | //! - `#[zeroize(bound = "T: MyTrait")]`: this replaces any trait bounds |
88 | //! inferred by zeroize |
89 | //! |
90 | //! On the field level: |
91 | //! - `#[zeroize(skip)]`: skips this field or variant when calling `zeroize()` |
92 | //! |
93 | //! Attributes supported for `ZeroizeOnDrop`: |
94 | //! |
95 | //! On the field level: |
96 | //! - `#[zeroize(skip)]`: skips this field or variant when calling `zeroize()` |
97 | //! |
98 | //! Example which derives `Drop`: |
99 | //! |
100 | //! ``` |
101 | //! # #[cfg (feature = "zeroize_derive" )] |
102 | //! # { |
103 | //! use zeroize::{Zeroize, ZeroizeOnDrop}; |
104 | //! |
105 | //! // This struct will be zeroized on drop |
106 | //! #[derive(Zeroize, ZeroizeOnDrop)] |
107 | //! struct MyStruct([u8; 32]); |
108 | //! # } |
109 | //! ``` |
110 | //! |
111 | //! Example which does not derive `Drop` (useful for e.g. `Copy` types) |
112 | //! |
113 | //! ``` |
114 | //! #[cfg(feature = "zeroize_derive" )] |
115 | //! # { |
116 | //! use zeroize::Zeroize; |
117 | //! |
118 | //! // This struct will *NOT* be zeroized on drop |
119 | //! #[derive(Copy, Clone, Zeroize)] |
120 | //! struct MyStruct([u8; 32]); |
121 | //! # } |
122 | //! ``` |
123 | //! |
124 | //! Example which only derives `Drop`: |
125 | //! |
126 | //! ``` |
127 | //! # #[cfg (feature = "zeroize_derive" )] |
128 | //! # { |
129 | //! use zeroize::ZeroizeOnDrop; |
130 | //! |
131 | //! // This struct will be zeroized on drop |
132 | //! #[derive(ZeroizeOnDrop)] |
133 | //! struct MyStruct([u8; 32]); |
134 | //! # } |
135 | //! ``` |
136 | //! |
137 | //! ## `Zeroizing<Z>`: wrapper for zeroizing arbitrary values on drop |
138 | //! |
139 | //! `Zeroizing<Z: Zeroize>` is a generic wrapper type that impls `Deref` |
140 | //! and `DerefMut`, allowing access to an inner value of type `Z`, and also |
141 | //! impls a `Drop` handler which calls `zeroize()` on its contents: |
142 | //! |
143 | //! ``` |
144 | //! use zeroize::Zeroizing; |
145 | //! |
146 | //! fn main() { |
147 | //! let mut secret = Zeroizing::new([0u8; 5]); |
148 | //! |
149 | //! // Set the air shield password |
150 | //! // Protip (again): don't embed secrets in your source code. |
151 | //! secret.copy_from_slice(&[1, 2, 3, 4, 5]); |
152 | //! assert_eq!(secret.as_ref(), &[1, 2, 3, 4, 5]); |
153 | //! |
154 | //! // The contents of `secret` will be automatically zeroized on drop |
155 | //! } |
156 | //! ``` |
157 | //! |
158 | //! ## What guarantees does this crate provide? |
159 | //! |
160 | //! This crate guarantees the following: |
161 | //! |
162 | //! 1. The zeroing operation can't be "optimized away" by the compiler. |
163 | //! 2. All subsequent reads to memory will see "zeroized" values. |
164 | //! |
165 | //! LLVM's volatile semantics ensure #1 is true. |
166 | //! |
167 | //! Additionally, thanks to work by the [Unsafe Code Guidelines Working Group], |
168 | //! we can now fairly confidently say #2 is true as well. Previously there were |
169 | //! worries that the approach used by this crate (mixing volatile and |
170 | //! non-volatile accesses) was undefined behavior due to language contained |
171 | //! in the documentation for `write_volatile`, however after some discussion |
172 | //! [these remarks have been removed] and the specific usage pattern in this |
173 | //! crate is considered to be well-defined. |
174 | //! |
175 | //! Additionally this crate leverages [`core::sync::atomic::compiler_fence`] |
176 | //! with the strictest ordering |
177 | //! ([`Ordering::SeqCst`]) as a |
178 | //! precaution to help ensure reads are not reordered before memory has been |
179 | //! zeroed. |
180 | //! |
181 | //! All of that said, there is still potential for microarchitectural attacks |
182 | //! (ala Spectre/Meltdown) to leak "zeroized" secrets through covert channels. |
183 | //! This crate makes no guarantees that zeroized values cannot be leaked |
184 | //! through such channels, as they represent flaws in the underlying hardware. |
185 | //! |
186 | //! ## Stack/Heap Zeroing Notes |
187 | //! |
188 | //! This crate can be used to zero values from either the stack or the heap. |
189 | //! |
190 | //! However, be aware several operations in Rust can unintentionally leave |
191 | //! copies of data in memory. This includes but is not limited to: |
192 | //! |
193 | //! - Moves and [`Copy`] |
194 | //! - Heap reallocation when using [`Vec`] and [`String`] |
195 | //! - Borrowers of a reference making copies of the data |
196 | //! |
197 | //! [`Pin`][`core::pin::Pin`] can be leveraged in conjunction with this crate |
198 | //! to ensure data kept on the stack isn't moved. |
199 | //! |
200 | //! The `Zeroize` impls for `Vec`, `String` and `CString` zeroize the entire |
201 | //! capacity of their backing buffer, but cannot guarantee copies of the data |
202 | //! were not previously made by buffer reallocation. It's therefore important |
203 | //! when attempting to zeroize such buffers to initialize them to the correct |
204 | //! capacity, and take care to prevent subsequent reallocation. |
205 | //! |
206 | //! The `secrecy` crate provides higher-level abstractions for eliminating |
207 | //! usage patterns which can cause reallocations: |
208 | //! |
209 | //! <https://crates.io/crates/secrecy> |
210 | //! |
211 | //! ## What about: clearing registers, mlock, mprotect, etc? |
212 | //! |
213 | //! This crate is focused on providing simple, unobtrusive support for reliably |
214 | //! zeroing memory using the best approach possible on stable Rust. |
215 | //! |
216 | //! Clearing registers is a difficult problem that can't easily be solved by |
217 | //! something like a crate, and requires either inline ASM or rustc support. |
218 | //! See <https://github.com/rust-lang/rust/issues/17046> for background on |
219 | //! this particular problem. |
220 | //! |
221 | //! Other memory protection mechanisms are interesting and useful, but often |
222 | //! overkill (e.g. defending against RAM scraping or attackers with swap access). |
223 | //! In as much as there may be merit to these approaches, there are also many |
224 | //! other crates that already implement more sophisticated memory protections. |
225 | //! Such protections are explicitly out-of-scope for this crate. |
226 | //! |
227 | //! Zeroing memory is [good cryptographic hygiene] and this crate seeks to promote |
228 | //! it in the most unobtrusive manner possible. This includes omitting complex |
229 | //! `unsafe` memory protection systems and just trying to make the best memory |
230 | //! zeroing crate available. |
231 | //! |
232 | //! [Zeroing memory securely is hard]: http://www.daemonology.net/blog/2014-09-04-how-to-zero-a-buffer.html |
233 | //! [Unsafe Code Guidelines Working Group]: https://github.com/rust-lang/unsafe-code-guidelines |
234 | //! [these remarks have been removed]: https://github.com/rust-lang/rust/pull/60972 |
235 | //! [good cryptographic hygiene]: https://github.com/veorq/cryptocoding#clean-memory-of-secret-data |
236 | //! [`Ordering::SeqCst`]: core::sync::atomic::Ordering::SeqCst |
237 | |
238 | #[cfg (feature = "alloc" )] |
239 | extern crate alloc; |
240 | |
241 | #[cfg (feature = "std" )] |
242 | extern crate std; |
243 | |
244 | #[cfg (feature = "zeroize_derive" )] |
245 | #[cfg_attr (docsrs, doc(cfg(feature = "zeroize_derive" )))] |
246 | pub use zeroize_derive::{Zeroize, ZeroizeOnDrop}; |
247 | |
248 | #[cfg (all(feature = "aarch64" , target_arch = "aarch64" ))] |
249 | mod aarch64; |
250 | #[cfg (any(target_arch = "x86" , target_arch = "x86_64" ))] |
251 | mod x86; |
252 | |
253 | use core::{ |
254 | marker::{PhantomData, PhantomPinned}, |
255 | mem::{self, MaybeUninit}, |
256 | num::{ |
257 | self, NonZeroI128, NonZeroI16, NonZeroI32, NonZeroI64, NonZeroI8, NonZeroIsize, |
258 | NonZeroU128, NonZeroU16, NonZeroU32, NonZeroU64, NonZeroU8, NonZeroUsize, |
259 | }, |
260 | ops, ptr, |
261 | slice::IterMut, |
262 | sync::atomic, |
263 | }; |
264 | |
265 | #[cfg (feature = "alloc" )] |
266 | use alloc::{boxed::Box, string::String, vec::Vec}; |
267 | |
268 | #[cfg (feature = "std" )] |
269 | use std::ffi::CString; |
270 | |
271 | /// Trait for securely erasing values from memory. |
272 | pub trait Zeroize { |
273 | /// Zero out this object from memory using Rust intrinsics which ensure the |
274 | /// zeroization operation is not "optimized away" by the compiler. |
275 | fn zeroize(&mut self); |
276 | } |
277 | |
278 | /// Marker trait signifying that this type will [`Zeroize::zeroize`] itself on [`Drop`]. |
279 | pub trait ZeroizeOnDrop {} |
280 | |
281 | /// Marker trait for types whose [`Default`] is the desired zeroization result |
282 | pub trait DefaultIsZeroes: Copy + Default + Sized {} |
283 | |
284 | /// Fallible trait for representing cases where zeroization may or may not be |
285 | /// possible. |
286 | /// |
287 | /// This is primarily useful for scenarios like reference counted data, where |
288 | /// zeroization is only possible when the last reference is dropped. |
289 | pub trait TryZeroize { |
290 | /// Try to zero out this object from memory using Rust intrinsics which |
291 | /// ensure the zeroization operation is not "optimized away" by the |
292 | /// compiler. |
293 | #[must_use ] |
294 | fn try_zeroize(&mut self) -> bool; |
295 | } |
296 | |
297 | impl<Z> Zeroize for Z |
298 | where |
299 | Z: DefaultIsZeroes, |
300 | { |
301 | fn zeroize(&mut self) { |
302 | volatile_write(self, Z::default()); |
303 | atomic_fence(); |
304 | } |
305 | } |
306 | |
307 | macro_rules! impl_zeroize_with_default { |
308 | ($($type:ty),+) => { |
309 | $(impl DefaultIsZeroes for $type {})+ |
310 | }; |
311 | } |
312 | |
313 | #[rustfmt::skip] |
314 | impl_zeroize_with_default! { |
315 | PhantomPinned, (), bool, char, |
316 | f32, f64, |
317 | i8, i16, i32, i64, i128, isize, |
318 | u8, u16, u32, u64, u128, usize |
319 | } |
320 | |
321 | /// `PhantomPinned` is zero sized so provide a ZeroizeOnDrop implementation. |
322 | impl ZeroizeOnDrop for PhantomPinned {} |
323 | |
324 | /// `()` is zero sized so provide a ZeroizeOnDrop implementation. |
325 | impl ZeroizeOnDrop for () {} |
326 | |
327 | macro_rules! impl_zeroize_for_non_zero { |
328 | ($($type:ty),+) => { |
329 | $( |
330 | impl Zeroize for $type { |
331 | fn zeroize(&mut self) { |
332 | const ONE: $type = match <$type>::new(1) { |
333 | Some(one) => one, |
334 | None => unreachable!(), |
335 | }; |
336 | volatile_write(self, ONE); |
337 | atomic_fence(); |
338 | } |
339 | } |
340 | )+ |
341 | }; |
342 | } |
343 | |
344 | impl_zeroize_for_non_zero!( |
345 | NonZeroI8, |
346 | NonZeroI16, |
347 | NonZeroI32, |
348 | NonZeroI64, |
349 | NonZeroI128, |
350 | NonZeroIsize, |
351 | NonZeroU8, |
352 | NonZeroU16, |
353 | NonZeroU32, |
354 | NonZeroU64, |
355 | NonZeroU128, |
356 | NonZeroUsize |
357 | ); |
358 | |
359 | impl<Z> Zeroize for num::Wrapping<Z> |
360 | where |
361 | Z: Zeroize, |
362 | { |
363 | fn zeroize(&mut self) { |
364 | self.0.zeroize(); |
365 | } |
366 | } |
367 | |
368 | /// Impl [`Zeroize`] on arrays of types that impl [`Zeroize`]. |
369 | impl<Z, const N: usize> Zeroize for [Z; N] |
370 | where |
371 | Z: Zeroize, |
372 | { |
373 | fn zeroize(&mut self) { |
374 | self.iter_mut().zeroize(); |
375 | } |
376 | } |
377 | |
378 | /// Impl [`ZeroizeOnDrop`] on arrays of types that impl [`ZeroizeOnDrop`]. |
379 | impl<Z, const N: usize> ZeroizeOnDrop for [Z; N] where Z: ZeroizeOnDrop {} |
380 | |
381 | impl<Z> Zeroize for IterMut<'_, Z> |
382 | where |
383 | Z: Zeroize, |
384 | { |
385 | fn zeroize(&mut self) { |
386 | for elem: &mut Z in self { |
387 | elem.zeroize(); |
388 | } |
389 | } |
390 | } |
391 | |
392 | impl<Z> Zeroize for Option<Z> |
393 | where |
394 | Z: Zeroize, |
395 | { |
396 | fn zeroize(&mut self) { |
397 | if let Some(value) = self { |
398 | value.zeroize(); |
399 | |
400 | // Ensures self is None and that the value was dropped. Without the take, the drop |
401 | // of the (zeroized) value isn't called, which might lead to a leak or other |
402 | // unexpected behavior. For example, if this were Option<Vec<T>>, the above call to |
403 | // zeroize would not free the allocated memory, but the the `take` call will. |
404 | self.take(); |
405 | } |
406 | |
407 | // Ensure that if the `Option` were previously `Some` but a value was copied/moved out |
408 | // that the remaining space in the `Option` is zeroized. |
409 | // |
410 | // Safety: |
411 | // |
412 | // The memory pointed to by `self` is valid for `mem::size_of::<Self>()` bytes. |
413 | // It is also properly aligned, because `u8` has an alignment of `1`. |
414 | unsafe { |
415 | volatile_set((self as *mut Self).cast::<u8>(), 0, mem::size_of::<Self>()); |
416 | } |
417 | |
418 | // Ensures self is overwritten with the `None` bit pattern. volatile_write can't be |
419 | // used because Option<Z> is not copy. |
420 | // |
421 | // Safety: |
422 | // |
423 | // self is safe to replace with `None`, which the take() call above should have |
424 | // already done semantically. Any value which needed to be dropped will have been |
425 | // done so by take(). |
426 | unsafe { ptr::write_volatile(self, None) } |
427 | |
428 | atomic_fence(); |
429 | } |
430 | } |
431 | |
432 | impl<Z> ZeroizeOnDrop for Option<Z> where Z: ZeroizeOnDrop {} |
433 | |
434 | /// Impl [`Zeroize`] on [`MaybeUninit`] types. |
435 | /// |
436 | /// This fills the memory with zeroes. |
437 | /// Note that this ignore invariants that `Z` might have, because |
438 | /// [`MaybeUninit`] removes all invariants. |
439 | impl<Z> Zeroize for MaybeUninit<Z> { |
440 | fn zeroize(&mut self) { |
441 | // Safety: |
442 | // `MaybeUninit` is valid for any byte pattern, including zeros. |
443 | unsafe { ptr::write_volatile(self, src:MaybeUninit::zeroed()) } |
444 | atomic_fence(); |
445 | } |
446 | } |
447 | |
448 | /// Impl [`Zeroize`] on slices of [`MaybeUninit`] types. |
449 | /// |
450 | /// This impl can eventually be optimized using an memset intrinsic, |
451 | /// such as [`core::intrinsics::volatile_set_memory`]. |
452 | /// |
453 | /// This fills the slice with zeroes. |
454 | /// |
455 | /// Note that this ignore invariants that `Z` might have, because |
456 | /// [`MaybeUninit`] removes all invariants. |
457 | impl<Z> Zeroize for [MaybeUninit<Z>] { |
458 | fn zeroize(&mut self) { |
459 | let ptr: *mut MaybeUninit = self.as_mut_ptr().cast::<MaybeUninit<u8>>(); |
460 | let size: usize = self.len().checked_mul(mem::size_of::<Z>()).unwrap(); |
461 | assert!(size <= isize::MAX as usize); |
462 | |
463 | // Safety: |
464 | // |
465 | // This is safe, because every valid pointer is well aligned for u8 |
466 | // and it is backed by a single allocated object for at least `self.len() * size_pf::<Z>()` bytes. |
467 | // and 0 is a valid value for `MaybeUninit<Z>` |
468 | // The memory of the slice should not wrap around the address space. |
469 | unsafe { volatile_set(dst:ptr, src:MaybeUninit::zeroed(), count:size) } |
470 | atomic_fence(); |
471 | } |
472 | } |
473 | |
474 | /// Impl [`Zeroize`] on slices of types that can be zeroized with [`Default`]. |
475 | /// |
476 | /// This impl can eventually be optimized using an memset intrinsic, |
477 | /// such as [`core::intrinsics::volatile_set_memory`]. For that reason the |
478 | /// blanket impl on slices is bounded by [`DefaultIsZeroes`]. |
479 | /// |
480 | /// To zeroize a mut slice of `Z: Zeroize` which does not impl |
481 | /// [`DefaultIsZeroes`], call `iter_mut().zeroize()`. |
482 | impl<Z> Zeroize for [Z] |
483 | where |
484 | Z: DefaultIsZeroes, |
485 | { |
486 | fn zeroize(&mut self) { |
487 | assert!(self.len() <= isize::MAX as usize); |
488 | |
489 | // Safety: |
490 | // |
491 | // This is safe, because the slice is well aligned and is backed by a single allocated |
492 | // object for at least `self.len()` elements of type `Z`. |
493 | // `self.len()` is also not larger than an `isize`, because of the assertion above. |
494 | // The memory of the slice should not wrap around the address space. |
495 | unsafe { volatile_set(self.as_mut_ptr(), Z::default(), self.len()) }; |
496 | atomic_fence(); |
497 | } |
498 | } |
499 | |
500 | impl Zeroize for str { |
501 | fn zeroize(&mut self) { |
502 | // Safety: |
503 | // A zeroized byte slice is a valid UTF-8 string. |
504 | unsafe { self.as_bytes_mut().zeroize() } |
505 | } |
506 | } |
507 | |
508 | /// [`PhantomData`] is always zero sized so provide a [`Zeroize`] implementation. |
509 | impl<Z> Zeroize for PhantomData<Z> { |
510 | fn zeroize(&mut self) {} |
511 | } |
512 | |
513 | /// [`PhantomData` is always zero sized so provide a ZeroizeOnDrop implementation. |
514 | impl<Z> ZeroizeOnDrop for PhantomData<Z> {} |
515 | |
516 | macro_rules! impl_zeroize_tuple { |
517 | ( $( $type_name:ident ),+ ) => { |
518 | impl<$($type_name: Zeroize),+> Zeroize for ($($type_name,)+) { |
519 | fn zeroize(&mut self) { |
520 | #[allow(non_snake_case)] |
521 | let ($($type_name,)+) = self; |
522 | $($type_name.zeroize());+ |
523 | } |
524 | } |
525 | |
526 | impl<$($type_name: ZeroizeOnDrop),+> ZeroizeOnDrop for ($($type_name,)+) { } |
527 | } |
528 | } |
529 | |
530 | // Generic implementations for tuples up to 10 parameters. |
531 | impl_zeroize_tuple!(A); |
532 | impl_zeroize_tuple!(A, B); |
533 | impl_zeroize_tuple!(A, B, C); |
534 | impl_zeroize_tuple!(A, B, C, D); |
535 | impl_zeroize_tuple!(A, B, C, D, E); |
536 | impl_zeroize_tuple!(A, B, C, D, E, F); |
537 | impl_zeroize_tuple!(A, B, C, D, E, F, G); |
538 | impl_zeroize_tuple!(A, B, C, D, E, F, G, H); |
539 | impl_zeroize_tuple!(A, B, C, D, E, F, G, H, I); |
540 | impl_zeroize_tuple!(A, B, C, D, E, F, G, H, I, J); |
541 | |
542 | #[cfg (feature = "alloc" )] |
543 | #[cfg_attr (docsrs, doc(cfg(feature = "alloc" )))] |
544 | impl<Z> Zeroize for Vec<Z> |
545 | where |
546 | Z: Zeroize, |
547 | { |
548 | /// "Best effort" zeroization for `Vec`. |
549 | /// |
550 | /// Ensures the entire capacity of the `Vec` is zeroed. Cannot ensure that |
551 | /// previous reallocations did not leave values on the heap. |
552 | fn zeroize(&mut self) { |
553 | // Zeroize all the initialized elements. |
554 | self.iter_mut().zeroize(); |
555 | |
556 | // Set the Vec's length to 0 and drop all the elements. |
557 | self.clear(); |
558 | |
559 | // Zero the full capacity of `Vec`. |
560 | self.spare_capacity_mut().zeroize(); |
561 | } |
562 | } |
563 | |
564 | #[cfg (feature = "alloc" )] |
565 | #[cfg_attr (docsrs, doc(cfg(feature = "alloc" )))] |
566 | impl<Z> ZeroizeOnDrop for Vec<Z> where Z: ZeroizeOnDrop {} |
567 | |
568 | #[cfg (feature = "alloc" )] |
569 | #[cfg_attr (docsrs, doc(cfg(feature = "alloc" )))] |
570 | impl<Z> Zeroize for Box<[Z]> |
571 | where |
572 | Z: Zeroize, |
573 | { |
574 | /// Unlike `Vec`, `Box<[Z]>` cannot reallocate, so we can be sure that we are not leaving |
575 | /// values on the heap. |
576 | fn zeroize(&mut self) { |
577 | self.iter_mut().zeroize(); |
578 | } |
579 | } |
580 | |
581 | #[cfg (feature = "alloc" )] |
582 | #[cfg_attr (docsrs, doc(cfg(feature = "alloc" )))] |
583 | impl<Z> ZeroizeOnDrop for Box<[Z]> where Z: ZeroizeOnDrop {} |
584 | |
585 | #[cfg (feature = "alloc" )] |
586 | #[cfg_attr (docsrs, doc(cfg(feature = "alloc" )))] |
587 | impl Zeroize for Box<str> { |
588 | fn zeroize(&mut self) { |
589 | self.as_mut().zeroize(); |
590 | } |
591 | } |
592 | |
593 | #[cfg (feature = "alloc" )] |
594 | #[cfg_attr (docsrs, doc(cfg(feature = "alloc" )))] |
595 | impl Zeroize for String { |
596 | fn zeroize(&mut self) { |
597 | unsafe { self.as_mut_vec() }.zeroize(); |
598 | } |
599 | } |
600 | |
601 | #[cfg (feature = "std" )] |
602 | #[cfg_attr (docsrs, doc(cfg(feature = "std" )))] |
603 | impl Zeroize for CString { |
604 | fn zeroize(&mut self) { |
605 | // mem::take uses replace internally to swap the pointer |
606 | // Unfortunately this results in an allocation for a Box::new(&[0]) as CString must |
607 | // contain a trailing zero byte |
608 | let this = mem::take(self); |
609 | |
610 | // - CString::into_bytes_with_nul calls ::into_vec which takes ownership of the heap pointer |
611 | // as a Vec<u8> |
612 | // - Calling .zeroize() on the resulting vector clears out the bytes |
613 | // From: https://github.com/RustCrypto/utils/pull/759#issuecomment-1087976570 |
614 | let mut buf = this.into_bytes_with_nul(); |
615 | buf.zeroize(); |
616 | |
617 | // expect() should never fail, because zeroize() truncates the Vec |
618 | let zeroed = CString::new(buf).expect("buf not truncated" ); |
619 | |
620 | // Replace self by the zeroed CString to maintain the original ptr of the buffer |
621 | let _ = mem::replace(self, zeroed); |
622 | } |
623 | } |
624 | |
625 | /// `Zeroizing` is a a wrapper for any `Z: Zeroize` type which implements a |
626 | /// `Drop` handler which zeroizes dropped values. |
627 | #[derive (Debug, Default, Eq, PartialEq)] |
628 | pub struct Zeroizing<Z: Zeroize>(Z); |
629 | |
630 | impl<Z> Zeroizing<Z> |
631 | where |
632 | Z: Zeroize, |
633 | { |
634 | /// Move value inside a `Zeroizing` wrapper which ensures it will be |
635 | /// zeroized when it's dropped. |
636 | #[inline (always)] |
637 | pub fn new(value: Z) -> Self { |
638 | Self(value) |
639 | } |
640 | } |
641 | |
642 | impl<Z: Zeroize + Clone> Clone for Zeroizing<Z> { |
643 | #[inline (always)] |
644 | fn clone(&self) -> Self { |
645 | Self(self.0.clone()) |
646 | } |
647 | |
648 | #[inline (always)] |
649 | fn clone_from(&mut self, source: &Self) { |
650 | self.0.zeroize(); |
651 | self.0.clone_from(&source.0); |
652 | } |
653 | } |
654 | |
655 | impl<Z> From<Z> for Zeroizing<Z> |
656 | where |
657 | Z: Zeroize, |
658 | { |
659 | #[inline (always)] |
660 | fn from(value: Z) -> Zeroizing<Z> { |
661 | Zeroizing(value) |
662 | } |
663 | } |
664 | |
665 | impl<Z> ops::Deref for Zeroizing<Z> |
666 | where |
667 | Z: Zeroize, |
668 | { |
669 | type Target = Z; |
670 | |
671 | #[inline (always)] |
672 | fn deref(&self) -> &Z { |
673 | &self.0 |
674 | } |
675 | } |
676 | |
677 | impl<Z> ops::DerefMut for Zeroizing<Z> |
678 | where |
679 | Z: Zeroize, |
680 | { |
681 | #[inline (always)] |
682 | fn deref_mut(&mut self) -> &mut Z { |
683 | &mut self.0 |
684 | } |
685 | } |
686 | |
687 | impl<T, Z> AsRef<T> for Zeroizing<Z> |
688 | where |
689 | T: ?Sized, |
690 | Z: AsRef<T> + Zeroize, |
691 | { |
692 | #[inline (always)] |
693 | fn as_ref(&self) -> &T { |
694 | self.0.as_ref() |
695 | } |
696 | } |
697 | |
698 | impl<T, Z> AsMut<T> for Zeroizing<Z> |
699 | where |
700 | T: ?Sized, |
701 | Z: AsMut<T> + Zeroize, |
702 | { |
703 | #[inline (always)] |
704 | fn as_mut(&mut self) -> &mut T { |
705 | self.0.as_mut() |
706 | } |
707 | } |
708 | |
709 | impl<Z> Zeroize for Zeroizing<Z> |
710 | where |
711 | Z: Zeroize, |
712 | { |
713 | fn zeroize(&mut self) { |
714 | self.0.zeroize(); |
715 | } |
716 | } |
717 | |
718 | impl<Z> ZeroizeOnDrop for Zeroizing<Z> where Z: Zeroize {} |
719 | |
720 | impl<Z> Drop for Zeroizing<Z> |
721 | where |
722 | Z: Zeroize, |
723 | { |
724 | fn drop(&mut self) { |
725 | self.0.zeroize() |
726 | } |
727 | } |
728 | |
729 | #[cfg (feature = "serde" )] |
730 | impl<Z> serde::Serialize for Zeroizing<Z> |
731 | where |
732 | Z: Zeroize + serde::Serialize, |
733 | { |
734 | #[inline (always)] |
735 | fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> |
736 | where |
737 | S: serde::Serializer, |
738 | { |
739 | self.0.serialize(serializer) |
740 | } |
741 | } |
742 | |
743 | #[cfg (feature = "serde" )] |
744 | impl<'de, Z> serde::Deserialize<'de> for Zeroizing<Z> |
745 | where |
746 | Z: Zeroize + serde::Deserialize<'de>, |
747 | { |
748 | #[inline (always)] |
749 | fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> |
750 | where |
751 | D: serde::Deserializer<'de>, |
752 | { |
753 | Ok(Self(Z::deserialize(deserializer)?)) |
754 | } |
755 | } |
756 | |
757 | /// Use fences to prevent accesses from being reordered before this |
758 | /// point, which should hopefully help ensure that all accessors |
759 | /// see zeroes after this point. |
760 | #[inline (always)] |
761 | fn atomic_fence() { |
762 | atomic::compiler_fence(order:atomic::Ordering::SeqCst); |
763 | } |
764 | |
765 | /// Perform a volatile write to the destination |
766 | #[inline (always)] |
767 | fn volatile_write<T: Copy + Sized>(dst: &mut T, src: T) { |
768 | unsafe { ptr::write_volatile(dst, src) } |
769 | } |
770 | |
771 | /// Perform a volatile `memset` operation which fills a slice with a value |
772 | /// |
773 | /// Safety: |
774 | /// The memory pointed to by `dst` must be a single allocated object that is valid for `count` |
775 | /// contiguous elements of `T`. |
776 | /// `count` must not be larger than an `isize`. |
777 | /// `dst` being offset by `mem::size_of::<T> * count` bytes must not wrap around the address space. |
778 | /// Also `dst` must be properly aligned. |
779 | #[inline (always)] |
780 | unsafe fn volatile_set<T: Copy + Sized>(dst: *mut T, src: T, count: usize) { |
781 | // TODO(tarcieri): use `volatile_set_memory` when stabilized |
782 | for i: usize in 0..count { |
783 | // Safety: |
784 | // |
785 | // This is safe because there is room for at least `count` objects of type `T` in the |
786 | // allocation pointed to by `dst`, because `count <= isize::MAX` and because |
787 | // `dst.add(count)` must not wrap around the address space. |
788 | let ptr: *mut T = dst.add(count:i); |
789 | |
790 | // Safety: |
791 | // |
792 | // This is safe, because the pointer is valid and because `dst` is well aligned for `T` and |
793 | // `ptr` is an offset of `dst` by a multiple of `mem::size_of::<T>()` bytes. |
794 | ptr::write_volatile(dst:ptr, src); |
795 | } |
796 | } |
797 | |
798 | /// Internal module used as support for `AssertZeroizeOnDrop`. |
799 | #[doc (hidden)] |
800 | pub mod __internal { |
801 | use super::*; |
802 | |
803 | /// Auto-deref workaround for deriving `ZeroizeOnDrop`. |
804 | pub trait AssertZeroizeOnDrop { |
805 | fn zeroize_or_on_drop(self); |
806 | } |
807 | |
808 | impl<T: ZeroizeOnDrop + ?Sized> AssertZeroizeOnDrop for &&mut T { |
809 | fn zeroize_or_on_drop(self) {} |
810 | } |
811 | |
812 | /// Auto-deref workaround for deriving `ZeroizeOnDrop`. |
813 | pub trait AssertZeroize { |
814 | fn zeroize_or_on_drop(&mut self); |
815 | } |
816 | |
817 | impl<T: Zeroize + ?Sized> AssertZeroize for T { |
818 | fn zeroize_or_on_drop(&mut self) { |
819 | self.zeroize() |
820 | } |
821 | } |
822 | } |
823 | |