| 1 | #![no_std ] |
| 2 | #![cfg_attr (docsrs, feature(doc_auto_cfg))] |
| 3 | #![doc ( |
| 4 | html_logo_url = "https://raw.githubusercontent.com/RustCrypto/media/6ee8e381/logo.svg" , |
| 5 | html_favicon_url = "https://raw.githubusercontent.com/RustCrypto/media/6ee8e381/logo.svg" |
| 6 | )] |
| 7 | #![warn (missing_docs, rust_2018_idioms, unused_qualifications)] |
| 8 | |
| 9 | //! Securely zero memory with a simple trait ([`Zeroize`]) built on stable Rust |
| 10 | //! primitives which guarantee the operation will not be "optimized away". |
| 11 | //! |
| 12 | //! ## About |
| 13 | //! |
| 14 | //! [Zeroing memory securely is hard] - compilers optimize for performance, and |
| 15 | //! in doing so they love to "optimize away" unnecessary zeroing calls. There are |
| 16 | //! many documented "tricks" to attempt to avoid these optimizations and ensure |
| 17 | //! that a zeroing routine is performed reliably. |
| 18 | //! |
| 19 | //! This crate isn't about tricks: it uses [`core::ptr::write_volatile`] |
| 20 | //! and [`core::sync::atomic`] memory fences to provide easy-to-use, portable |
| 21 | //! zeroing behavior which works on all of Rust's core number types and slices |
| 22 | //! thereof, implemented in pure Rust with no usage of FFI or assembly. |
| 23 | //! |
| 24 | //! - No insecure fallbacks! |
| 25 | //! - No dependencies! |
| 26 | //! - No FFI or inline assembly! **WASM friendly** (and tested)! |
| 27 | //! - `#![no_std]` i.e. **embedded-friendly**! |
| 28 | //! - No functionality besides securely zeroing memory! |
| 29 | //! - (Optional) Custom derive support for zeroing complex structures |
| 30 | //! |
| 31 | //! ## Minimum Supported Rust Version |
| 32 | //! |
| 33 | //! Requires Rust **1.72** or newer. |
| 34 | //! |
| 35 | //! In the future, we reserve the right to change MSRV (i.e. MSRV is out-of-scope |
| 36 | //! for this crate's SemVer guarantees), however when we do it will be accompanied |
| 37 | //! by a minor version bump. |
| 38 | //! |
| 39 | //! ## Usage |
| 40 | //! |
| 41 | //! ``` |
| 42 | //! use zeroize::Zeroize; |
| 43 | //! |
| 44 | //! // Protip: don't embed secrets in your source code. |
| 45 | //! // This is just an example. |
| 46 | //! let mut secret = b"Air shield password: 1,2,3,4,5" .to_vec(); |
| 47 | //! // [ ... ] open the air shield here |
| 48 | //! |
| 49 | //! // Now that we're done using the secret, zero it out. |
| 50 | //! secret.zeroize(); |
| 51 | //! ``` |
| 52 | //! |
| 53 | //! The [`Zeroize`] trait is impl'd on all of Rust's core scalar types including |
| 54 | //! integers, floats, `bool`, and `char`. |
| 55 | //! |
| 56 | //! Additionally, it's implemented on slices and `IterMut`s of the above types. |
| 57 | //! |
| 58 | //! When the `alloc` feature is enabled (which it is by default), it's also |
| 59 | //! impl'd for `Vec<T>` for the above types as well as `String`, where it provides |
| 60 | //! [`Vec::clear`] / [`String::clear`]-like behavior (truncating to zero-length) |
| 61 | //! but ensures the backing memory is securely zeroed with some caveats. |
| 62 | //! |
| 63 | //! With the `std` feature enabled (which it is **not** by default), [`Zeroize`] |
| 64 | //! is also implemented for [`CString`]. After calling `zeroize()` on a `CString`, |
| 65 | //! its internal buffer will contain exactly one nul byte. The backing |
| 66 | //! memory is zeroed by converting it to a `Vec<u8>` and back into a `CString`. |
| 67 | //! (NOTE: see "Stack/Heap Zeroing Notes" for important `Vec`/`String`/`CString` details) |
| 68 | //! |
| 69 | //! [`CString`]: https://doc.rust-lang.org/std/ffi/struct.CString.html |
| 70 | //! |
| 71 | //! The [`DefaultIsZeroes`] marker trait can be impl'd on types which also |
| 72 | //! impl [`Default`], which implements [`Zeroize`] by overwriting a value with |
| 73 | //! the default value. |
| 74 | //! |
| 75 | //! ## Custom Derive Support |
| 76 | //! |
| 77 | //! This crate has custom derive support for the `Zeroize` trait, |
| 78 | //! gated under the `zeroize` crate's `zeroize_derive` Cargo feature, |
| 79 | //! which automatically calls `zeroize()` on all members of a struct |
| 80 | //! or tuple struct. |
| 81 | //! |
| 82 | //! Attributes supported for `Zeroize`: |
| 83 | //! |
| 84 | //! On the item level: |
| 85 | //! - `#[zeroize(drop)]`: *deprecated* use `ZeroizeOnDrop` instead |
| 86 | //! - `#[zeroize(bound = "T: MyTrait")]`: this replaces any trait bounds |
| 87 | //! inferred by zeroize |
| 88 | //! |
| 89 | //! On the field level: |
| 90 | //! - `#[zeroize(skip)]`: skips this field or variant when calling `zeroize()` |
| 91 | //! |
| 92 | //! Attributes supported for `ZeroizeOnDrop`: |
| 93 | //! |
| 94 | //! On the field level: |
| 95 | //! - `#[zeroize(skip)]`: skips this field or variant when calling `zeroize()` |
| 96 | //! |
| 97 | //! Example which derives `Drop`: |
| 98 | //! |
| 99 | //! ``` |
| 100 | //! # #[cfg (feature = "zeroize_derive" )] |
| 101 | //! # { |
| 102 | //! use zeroize::{Zeroize, ZeroizeOnDrop}; |
| 103 | //! |
| 104 | //! // This struct will be zeroized on drop |
| 105 | //! #[derive(Zeroize, ZeroizeOnDrop)] |
| 106 | //! struct MyStruct([u8; 32]); |
| 107 | //! # } |
| 108 | //! ``` |
| 109 | //! |
| 110 | //! Example which does not derive `Drop` (useful for e.g. `Copy` types) |
| 111 | //! |
| 112 | //! ``` |
| 113 | //! #[cfg(feature = "zeroize_derive" )] |
| 114 | //! # { |
| 115 | //! use zeroize::Zeroize; |
| 116 | //! |
| 117 | //! // This struct will *NOT* be zeroized on drop |
| 118 | //! #[derive(Copy, Clone, Zeroize)] |
| 119 | //! struct MyStruct([u8; 32]); |
| 120 | //! # } |
| 121 | //! ``` |
| 122 | //! |
| 123 | //! Example which only derives `Drop`: |
| 124 | //! |
| 125 | //! ``` |
| 126 | //! # #[cfg (feature = "zeroize_derive" )] |
| 127 | //! # { |
| 128 | //! use zeroize::ZeroizeOnDrop; |
| 129 | //! |
| 130 | //! // This struct will be zeroized on drop |
| 131 | //! #[derive(ZeroizeOnDrop)] |
| 132 | //! struct MyStruct([u8; 32]); |
| 133 | //! # } |
| 134 | //! ``` |
| 135 | //! |
| 136 | //! ## `Zeroizing<Z>`: wrapper for zeroizing arbitrary values on drop |
| 137 | //! |
| 138 | //! `Zeroizing<Z: Zeroize>` is a generic wrapper type that impls `Deref` |
| 139 | //! and `DerefMut`, allowing access to an inner value of type `Z`, and also |
| 140 | //! impls a `Drop` handler which calls `zeroize()` on its contents: |
| 141 | //! |
| 142 | //! ``` |
| 143 | //! use zeroize::Zeroizing; |
| 144 | //! |
| 145 | //! fn use_secret() { |
| 146 | //! let mut secret = Zeroizing::new([0u8; 5]); |
| 147 | //! |
| 148 | //! // Set the air shield password |
| 149 | //! // Protip (again): don't embed secrets in your source code. |
| 150 | //! secret.copy_from_slice(&[1, 2, 3, 4, 5]); |
| 151 | //! assert_eq!(secret.as_ref(), &[1, 2, 3, 4, 5]); |
| 152 | //! |
| 153 | //! // The contents of `secret` will be automatically zeroized on drop |
| 154 | //! } |
| 155 | //! |
| 156 | //! # use_secret() |
| 157 | //! ``` |
| 158 | //! |
| 159 | //! ## What guarantees does this crate provide? |
| 160 | //! |
| 161 | //! This crate guarantees the following: |
| 162 | //! |
| 163 | //! 1. The zeroing operation can't be "optimized away" by the compiler. |
| 164 | //! 2. All subsequent reads to memory will see "zeroized" values. |
| 165 | //! |
| 166 | //! LLVM's volatile semantics ensure #1 is true. |
| 167 | //! |
| 168 | //! Additionally, thanks to work by the [Unsafe Code Guidelines Working Group], |
| 169 | //! we can now fairly confidently say #2 is true as well. Previously there were |
| 170 | //! worries that the approach used by this crate (mixing volatile and |
| 171 | //! non-volatile accesses) was undefined behavior due to language contained |
| 172 | //! in the documentation for `write_volatile`, however after some discussion |
| 173 | //! [these remarks have been removed] and the specific usage pattern in this |
| 174 | //! crate is considered to be well-defined. |
| 175 | //! |
| 176 | //! Additionally this crate leverages [`core::sync::atomic::compiler_fence`] |
| 177 | //! with the strictest ordering |
| 178 | //! ([`Ordering::SeqCst`]) as a |
| 179 | //! precaution to help ensure reads are not reordered before memory has been |
| 180 | //! zeroed. |
| 181 | //! |
| 182 | //! All of that said, there is still potential for microarchitectural attacks |
| 183 | //! (ala Spectre/Meltdown) to leak "zeroized" secrets through covert channels. |
| 184 | //! This crate makes no guarantees that zeroized values cannot be leaked |
| 185 | //! through such channels, as they represent flaws in the underlying hardware. |
| 186 | //! |
| 187 | //! ## Stack/Heap Zeroing Notes |
| 188 | //! |
| 189 | //! This crate can be used to zero values from either the stack or the heap. |
| 190 | //! |
| 191 | //! However, be aware several operations in Rust can unintentionally leave |
| 192 | //! copies of data in memory. This includes but is not limited to: |
| 193 | //! |
| 194 | //! - Moves and [`Copy`] |
| 195 | //! - Heap reallocation when using [`Vec`] and [`String`] |
| 196 | //! - Borrowers of a reference making copies of the data |
| 197 | //! |
| 198 | //! [`Pin`][`core::pin::Pin`] can be leveraged in conjunction with this crate |
| 199 | //! to ensure data kept on the stack isn't moved. |
| 200 | //! |
| 201 | //! The `Zeroize` impls for `Vec`, `String` and `CString` zeroize the entire |
| 202 | //! capacity of their backing buffer, but cannot guarantee copies of the data |
| 203 | //! were not previously made by buffer reallocation. It's therefore important |
| 204 | //! when attempting to zeroize such buffers to initialize them to the correct |
| 205 | //! capacity, and take care to prevent subsequent reallocation. |
| 206 | //! |
| 207 | //! The `secrecy` crate provides higher-level abstractions for eliminating |
| 208 | //! usage patterns which can cause reallocations: |
| 209 | //! |
| 210 | //! <https://crates.io/crates/secrecy> |
| 211 | //! |
| 212 | //! ## What about: clearing registers, mlock, mprotect, etc? |
| 213 | //! |
| 214 | //! This crate is focused on providing simple, unobtrusive support for reliably |
| 215 | //! zeroing memory using the best approach possible on stable Rust. |
| 216 | //! |
| 217 | //! Clearing registers is a difficult problem that can't easily be solved by |
| 218 | //! something like a crate, and requires either inline ASM or rustc support. |
| 219 | //! See <https://github.com/rust-lang/rust/issues/17046> for background on |
| 220 | //! this particular problem. |
| 221 | //! |
| 222 | //! Other memory protection mechanisms are interesting and useful, but often |
| 223 | //! overkill (e.g. defending against RAM scraping or attackers with swap access). |
| 224 | //! In as much as there may be merit to these approaches, there are also many |
| 225 | //! other crates that already implement more sophisticated memory protections. |
| 226 | //! Such protections are explicitly out-of-scope for this crate. |
| 227 | //! |
| 228 | //! Zeroing memory is [good cryptographic hygiene] and this crate seeks to promote |
| 229 | //! it in the most unobtrusive manner possible. This includes omitting complex |
| 230 | //! `unsafe` memory protection systems and just trying to make the best memory |
| 231 | //! zeroing crate available. |
| 232 | //! |
| 233 | //! [Zeroing memory securely is hard]: http://www.daemonology.net/blog/2014-09-04-how-to-zero-a-buffer.html |
| 234 | //! [Unsafe Code Guidelines Working Group]: https://github.com/rust-lang/unsafe-code-guidelines |
| 235 | //! [these remarks have been removed]: https://github.com/rust-lang/rust/pull/60972 |
| 236 | //! [good cryptographic hygiene]: https://github.com/veorq/cryptocoding#clean-memory-of-secret-data |
| 237 | //! [`Ordering::SeqCst`]: core::sync::atomic::Ordering::SeqCst |
| 238 | |
| 239 | #[cfg (feature = "alloc" )] |
| 240 | extern crate alloc; |
| 241 | |
| 242 | #[cfg (feature = "std" )] |
| 243 | extern crate std; |
| 244 | |
| 245 | #[cfg (feature = "zeroize_derive" )] |
| 246 | pub use zeroize_derive::{Zeroize, ZeroizeOnDrop}; |
| 247 | |
| 248 | #[cfg (target_arch = "aarch64" )] |
| 249 | mod aarch64; |
| 250 | #[cfg (any(target_arch = "x86" , target_arch = "x86_64" ))] |
| 251 | mod x86; |
| 252 | |
| 253 | use core::{ |
| 254 | marker::{PhantomData, PhantomPinned}, |
| 255 | mem::{self, MaybeUninit}, |
| 256 | num::{ |
| 257 | self, NonZeroI128, NonZeroI16, NonZeroI32, NonZeroI64, NonZeroI8, NonZeroIsize, |
| 258 | NonZeroU128, NonZeroU16, NonZeroU32, NonZeroU64, NonZeroU8, NonZeroUsize, |
| 259 | }, |
| 260 | ops, ptr, |
| 261 | slice::IterMut, |
| 262 | sync::atomic, |
| 263 | }; |
| 264 | |
| 265 | #[cfg (feature = "alloc" )] |
| 266 | use alloc::{boxed::Box, string::String, vec::Vec}; |
| 267 | |
| 268 | #[cfg (feature = "std" )] |
| 269 | use std::ffi::CString; |
| 270 | |
| 271 | /// Trait for securely erasing values from memory. |
| 272 | pub trait Zeroize { |
| 273 | /// Zero out this object from memory using Rust intrinsics which ensure the |
| 274 | /// zeroization operation is not "optimized away" by the compiler. |
| 275 | fn zeroize(&mut self); |
| 276 | } |
| 277 | |
| 278 | /// Marker trait signifying that this type will [`Zeroize::zeroize`] itself on [`Drop`]. |
| 279 | pub trait ZeroizeOnDrop {} |
| 280 | |
| 281 | /// Marker trait for types whose [`Default`] is the desired zeroization result |
| 282 | pub trait DefaultIsZeroes: Copy + Default + Sized {} |
| 283 | |
| 284 | /// Fallible trait for representing cases where zeroization may or may not be |
| 285 | /// possible. |
| 286 | /// |
| 287 | /// This is primarily useful for scenarios like reference counted data, where |
| 288 | /// zeroization is only possible when the last reference is dropped. |
| 289 | pub trait TryZeroize { |
| 290 | /// Try to zero out this object from memory using Rust intrinsics which |
| 291 | /// ensure the zeroization operation is not "optimized away" by the |
| 292 | /// compiler. |
| 293 | #[must_use ] |
| 294 | fn try_zeroize(&mut self) -> bool; |
| 295 | } |
| 296 | |
| 297 | impl<Z> Zeroize for Z |
| 298 | where |
| 299 | Z: DefaultIsZeroes, |
| 300 | { |
| 301 | fn zeroize(&mut self) { |
| 302 | volatile_write(self, Z::default()); |
| 303 | atomic_fence(); |
| 304 | } |
| 305 | } |
| 306 | |
| 307 | macro_rules! impl_zeroize_with_default { |
| 308 | ($($type:ty),+) => { |
| 309 | $(impl DefaultIsZeroes for $type {})+ |
| 310 | }; |
| 311 | } |
| 312 | |
| 313 | #[rustfmt::skip] |
| 314 | impl_zeroize_with_default! { |
| 315 | PhantomPinned, (), bool, char, |
| 316 | f32, f64, |
| 317 | i8, i16, i32, i64, i128, isize, |
| 318 | u8, u16, u32, u64, u128, usize |
| 319 | } |
| 320 | |
| 321 | /// `PhantomPinned` is zero sized so provide a ZeroizeOnDrop implementation. |
| 322 | impl ZeroizeOnDrop for PhantomPinned {} |
| 323 | |
| 324 | /// `()` is zero sized so provide a ZeroizeOnDrop implementation. |
| 325 | impl ZeroizeOnDrop for () {} |
| 326 | |
| 327 | macro_rules! impl_zeroize_for_non_zero { |
| 328 | ($($type:ty),+) => { |
| 329 | $( |
| 330 | impl Zeroize for $type { |
| 331 | fn zeroize(&mut self) { |
| 332 | const ONE: $type = match <$type>::new(1) { |
| 333 | Some(one) => one, |
| 334 | None => unreachable!(), |
| 335 | }; |
| 336 | volatile_write(self, ONE); |
| 337 | atomic_fence(); |
| 338 | } |
| 339 | } |
| 340 | )+ |
| 341 | }; |
| 342 | } |
| 343 | |
| 344 | impl_zeroize_for_non_zero!( |
| 345 | NonZeroI8, |
| 346 | NonZeroI16, |
| 347 | NonZeroI32, |
| 348 | NonZeroI64, |
| 349 | NonZeroI128, |
| 350 | NonZeroIsize, |
| 351 | NonZeroU8, |
| 352 | NonZeroU16, |
| 353 | NonZeroU32, |
| 354 | NonZeroU64, |
| 355 | NonZeroU128, |
| 356 | NonZeroUsize |
| 357 | ); |
| 358 | |
| 359 | impl<Z> Zeroize for num::Wrapping<Z> |
| 360 | where |
| 361 | Z: Zeroize, |
| 362 | { |
| 363 | fn zeroize(&mut self) { |
| 364 | self.0.zeroize(); |
| 365 | } |
| 366 | } |
| 367 | |
| 368 | /// Impl [`Zeroize`] on arrays of types that impl [`Zeroize`]. |
| 369 | impl<Z, const N: usize> Zeroize for [Z; N] |
| 370 | where |
| 371 | Z: Zeroize, |
| 372 | { |
| 373 | fn zeroize(&mut self) { |
| 374 | self.iter_mut().zeroize(); |
| 375 | } |
| 376 | } |
| 377 | |
| 378 | /// Impl [`ZeroizeOnDrop`] on arrays of types that impl [`ZeroizeOnDrop`]. |
| 379 | impl<Z, const N: usize> ZeroizeOnDrop for [Z; N] where Z: ZeroizeOnDrop {} |
| 380 | |
| 381 | impl<Z> Zeroize for IterMut<'_, Z> |
| 382 | where |
| 383 | Z: Zeroize, |
| 384 | { |
| 385 | fn zeroize(&mut self) { |
| 386 | for elem: &mut Z in self { |
| 387 | elem.zeroize(); |
| 388 | } |
| 389 | } |
| 390 | } |
| 391 | |
| 392 | impl<Z> Zeroize for Option<Z> |
| 393 | where |
| 394 | Z: Zeroize, |
| 395 | { |
| 396 | fn zeroize(&mut self) { |
| 397 | if let Some(value) = self { |
| 398 | value.zeroize(); |
| 399 | |
| 400 | // Ensures self is None and that the value was dropped. Without the take, the drop |
| 401 | // of the (zeroized) value isn't called, which might lead to a leak or other |
| 402 | // unexpected behavior. For example, if this were Option<Vec<T>>, the above call to |
| 403 | // zeroize would not free the allocated memory, but the the `take` call will. |
| 404 | self.take(); |
| 405 | } |
| 406 | |
| 407 | // Ensure that if the `Option` were previously `Some` but a value was copied/moved out |
| 408 | // that the remaining space in the `Option` is zeroized. |
| 409 | // |
| 410 | // Safety: |
| 411 | // |
| 412 | // The memory pointed to by `self` is valid for `mem::size_of::<Self>()` bytes. |
| 413 | // It is also properly aligned, because `u8` has an alignment of `1`. |
| 414 | unsafe { |
| 415 | volatile_set((self as *mut Self).cast::<u8>(), 0, mem::size_of::<Self>()); |
| 416 | } |
| 417 | |
| 418 | // Ensures self is overwritten with the `None` bit pattern. volatile_write can't be |
| 419 | // used because Option<Z> is not copy. |
| 420 | // |
| 421 | // Safety: |
| 422 | // |
| 423 | // self is safe to replace with `None`, which the take() call above should have |
| 424 | // already done semantically. Any value which needed to be dropped will have been |
| 425 | // done so by take(). |
| 426 | unsafe { ptr::write_volatile(self, None) } |
| 427 | |
| 428 | atomic_fence(); |
| 429 | } |
| 430 | } |
| 431 | |
| 432 | impl<Z> ZeroizeOnDrop for Option<Z> where Z: ZeroizeOnDrop {} |
| 433 | |
| 434 | /// Impl [`Zeroize`] on [`MaybeUninit`] types. |
| 435 | /// |
| 436 | /// This fills the memory with zeroes. |
| 437 | /// Note that this ignore invariants that `Z` might have, because |
| 438 | /// [`MaybeUninit`] removes all invariants. |
| 439 | impl<Z> Zeroize for MaybeUninit<Z> { |
| 440 | fn zeroize(&mut self) { |
| 441 | // Safety: |
| 442 | // `MaybeUninit` is valid for any byte pattern, including zeros. |
| 443 | unsafe { ptr::write_volatile(self, src:MaybeUninit::zeroed()) } |
| 444 | atomic_fence(); |
| 445 | } |
| 446 | } |
| 447 | |
| 448 | /// Impl [`Zeroize`] on slices of [`MaybeUninit`] types. |
| 449 | /// |
| 450 | /// This impl can eventually be optimized using an memset intrinsic, |
| 451 | /// such as [`core::intrinsics::volatile_set_memory`]. |
| 452 | /// |
| 453 | /// This fills the slice with zeroes. |
| 454 | /// |
| 455 | /// Note that this ignore invariants that `Z` might have, because |
| 456 | /// [`MaybeUninit`] removes all invariants. |
| 457 | impl<Z> Zeroize for [MaybeUninit<Z>] { |
| 458 | fn zeroize(&mut self) { |
| 459 | let ptr: *mut MaybeUninit = self.as_mut_ptr().cast::<MaybeUninit<u8>>(); |
| 460 | let size: usize = self.len().checked_mul(mem::size_of::<Z>()).unwrap(); |
| 461 | assert!(size <= isize::MAX as usize); |
| 462 | |
| 463 | // Safety: |
| 464 | // |
| 465 | // This is safe, because every valid pointer is well aligned for u8 |
| 466 | // and it is backed by a single allocated object for at least `self.len() * size_pf::<Z>()` bytes. |
| 467 | // and 0 is a valid value for `MaybeUninit<Z>` |
| 468 | // The memory of the slice should not wrap around the address space. |
| 469 | unsafe { volatile_set(dst:ptr, src:MaybeUninit::zeroed(), count:size) } |
| 470 | atomic_fence(); |
| 471 | } |
| 472 | } |
| 473 | |
| 474 | /// Impl [`Zeroize`] on slices of types that can be zeroized with [`Default`]. |
| 475 | /// |
| 476 | /// This impl can eventually be optimized using an memset intrinsic, |
| 477 | /// such as [`core::intrinsics::volatile_set_memory`]. For that reason the |
| 478 | /// blanket impl on slices is bounded by [`DefaultIsZeroes`]. |
| 479 | /// |
| 480 | /// To zeroize a mut slice of `Z: Zeroize` which does not impl |
| 481 | /// [`DefaultIsZeroes`], call `iter_mut().zeroize()`. |
| 482 | impl<Z> Zeroize for [Z] |
| 483 | where |
| 484 | Z: DefaultIsZeroes, |
| 485 | { |
| 486 | fn zeroize(&mut self) { |
| 487 | assert!(self.len() <= isize::MAX as usize); |
| 488 | |
| 489 | // Safety: |
| 490 | // |
| 491 | // This is safe, because the slice is well aligned and is backed by a single allocated |
| 492 | // object for at least `self.len()` elements of type `Z`. |
| 493 | // `self.len()` is also not larger than an `isize`, because of the assertion above. |
| 494 | // The memory of the slice should not wrap around the address space. |
| 495 | unsafe { volatile_set(self.as_mut_ptr(), Z::default(), self.len()) }; |
| 496 | atomic_fence(); |
| 497 | } |
| 498 | } |
| 499 | |
| 500 | impl Zeroize for str { |
| 501 | fn zeroize(&mut self) { |
| 502 | // Safety: |
| 503 | // A zeroized byte slice is a valid UTF-8 string. |
| 504 | unsafe { self.as_bytes_mut().zeroize() } |
| 505 | } |
| 506 | } |
| 507 | |
| 508 | /// [`PhantomData`] is always zero sized so provide a [`Zeroize`] implementation. |
| 509 | impl<Z> Zeroize for PhantomData<Z> { |
| 510 | fn zeroize(&mut self) {} |
| 511 | } |
| 512 | |
| 513 | /// [`PhantomData` is always zero sized so provide a ZeroizeOnDrop implementation. |
| 514 | impl<Z> ZeroizeOnDrop for PhantomData<Z> {} |
| 515 | |
| 516 | macro_rules! impl_zeroize_tuple { |
| 517 | ( $( $type_name:ident ),+ ) => { |
| 518 | impl<$($type_name: Zeroize),+> Zeroize for ($($type_name,)+) { |
| 519 | fn zeroize(&mut self) { |
| 520 | #[allow(non_snake_case)] |
| 521 | let ($($type_name,)+) = self; |
| 522 | $($type_name.zeroize());+ |
| 523 | } |
| 524 | } |
| 525 | |
| 526 | impl<$($type_name: ZeroizeOnDrop),+> ZeroizeOnDrop for ($($type_name,)+) { } |
| 527 | } |
| 528 | } |
| 529 | |
| 530 | // Generic implementations for tuples up to 10 parameters. |
| 531 | impl_zeroize_tuple!(A); |
| 532 | impl_zeroize_tuple!(A, B); |
| 533 | impl_zeroize_tuple!(A, B, C); |
| 534 | impl_zeroize_tuple!(A, B, C, D); |
| 535 | impl_zeroize_tuple!(A, B, C, D, E); |
| 536 | impl_zeroize_tuple!(A, B, C, D, E, F); |
| 537 | impl_zeroize_tuple!(A, B, C, D, E, F, G); |
| 538 | impl_zeroize_tuple!(A, B, C, D, E, F, G, H); |
| 539 | impl_zeroize_tuple!(A, B, C, D, E, F, G, H, I); |
| 540 | impl_zeroize_tuple!(A, B, C, D, E, F, G, H, I, J); |
| 541 | |
| 542 | #[cfg (feature = "alloc" )] |
| 543 | impl<Z> Zeroize for Vec<Z> |
| 544 | where |
| 545 | Z: Zeroize, |
| 546 | { |
| 547 | /// "Best effort" zeroization for `Vec`. |
| 548 | /// |
| 549 | /// Ensures the entire capacity of the `Vec` is zeroed. Cannot ensure that |
| 550 | /// previous reallocations did not leave values on the heap. |
| 551 | fn zeroize(&mut self) { |
| 552 | // Zeroize all the initialized elements. |
| 553 | self.iter_mut().zeroize(); |
| 554 | |
| 555 | // Set the Vec's length to 0 and drop all the elements. |
| 556 | self.clear(); |
| 557 | |
| 558 | // Zero the full capacity of `Vec`. |
| 559 | self.spare_capacity_mut().zeroize(); |
| 560 | } |
| 561 | } |
| 562 | |
| 563 | #[cfg (feature = "alloc" )] |
| 564 | impl<Z> ZeroizeOnDrop for Vec<Z> where Z: ZeroizeOnDrop {} |
| 565 | |
| 566 | #[cfg (feature = "alloc" )] |
| 567 | impl<Z> Zeroize for Box<[Z]> |
| 568 | where |
| 569 | Z: Zeroize, |
| 570 | { |
| 571 | /// Unlike `Vec`, `Box<[Z]>` cannot reallocate, so we can be sure that we are not leaving |
| 572 | /// values on the heap. |
| 573 | fn zeroize(&mut self) { |
| 574 | self.iter_mut().zeroize(); |
| 575 | } |
| 576 | } |
| 577 | |
| 578 | #[cfg (feature = "alloc" )] |
| 579 | impl<Z> ZeroizeOnDrop for Box<[Z]> where Z: ZeroizeOnDrop {} |
| 580 | |
| 581 | #[cfg (feature = "alloc" )] |
| 582 | impl Zeroize for Box<str> { |
| 583 | fn zeroize(&mut self) { |
| 584 | self.as_mut().zeroize(); |
| 585 | } |
| 586 | } |
| 587 | |
| 588 | #[cfg (feature = "alloc" )] |
| 589 | impl Zeroize for String { |
| 590 | fn zeroize(&mut self) { |
| 591 | unsafe { self.as_mut_vec() }.zeroize(); |
| 592 | } |
| 593 | } |
| 594 | |
| 595 | #[cfg (feature = "std" )] |
| 596 | impl Zeroize for CString { |
| 597 | fn zeroize(&mut self) { |
| 598 | // mem::take uses replace internally to swap the pointer |
| 599 | // Unfortunately this results in an allocation for a Box::new(&[0]) as CString must |
| 600 | // contain a trailing zero byte |
| 601 | let this = mem::take(self); |
| 602 | |
| 603 | // - CString::into_bytes_with_nul calls ::into_vec which takes ownership of the heap pointer |
| 604 | // as a Vec<u8> |
| 605 | // - Calling .zeroize() on the resulting vector clears out the bytes |
| 606 | // From: https://github.com/RustCrypto/utils/pull/759#issuecomment-1087976570 |
| 607 | let mut buf = this.into_bytes_with_nul(); |
| 608 | buf.zeroize(); |
| 609 | |
| 610 | // expect() should never fail, because zeroize() truncates the Vec |
| 611 | let zeroed = CString::new(buf).expect("buf not truncated" ); |
| 612 | |
| 613 | // Replace self by the zeroed CString to maintain the original ptr of the buffer |
| 614 | let _ = mem::replace(self, zeroed); |
| 615 | } |
| 616 | } |
| 617 | |
| 618 | /// `Zeroizing` is a a wrapper for any `Z: Zeroize` type which implements a |
| 619 | /// `Drop` handler which zeroizes dropped values. |
| 620 | #[derive (Debug, Default, Eq, PartialEq)] |
| 621 | pub struct Zeroizing<Z: Zeroize>(Z); |
| 622 | |
| 623 | impl<Z> Zeroizing<Z> |
| 624 | where |
| 625 | Z: Zeroize, |
| 626 | { |
| 627 | /// Move value inside a `Zeroizing` wrapper which ensures it will be |
| 628 | /// zeroized when it's dropped. |
| 629 | #[inline (always)] |
| 630 | pub fn new(value: Z) -> Self { |
| 631 | Self(value) |
| 632 | } |
| 633 | } |
| 634 | |
| 635 | impl<Z: Zeroize + Clone> Clone for Zeroizing<Z> { |
| 636 | #[inline (always)] |
| 637 | fn clone(&self) -> Self { |
| 638 | Self(self.0.clone()) |
| 639 | } |
| 640 | |
| 641 | #[inline (always)] |
| 642 | fn clone_from(&mut self, source: &Self) { |
| 643 | self.0.zeroize(); |
| 644 | self.0.clone_from(&source.0); |
| 645 | } |
| 646 | } |
| 647 | |
| 648 | impl<Z> From<Z> for Zeroizing<Z> |
| 649 | where |
| 650 | Z: Zeroize, |
| 651 | { |
| 652 | #[inline (always)] |
| 653 | fn from(value: Z) -> Zeroizing<Z> { |
| 654 | Zeroizing(value) |
| 655 | } |
| 656 | } |
| 657 | |
| 658 | impl<Z> ops::Deref for Zeroizing<Z> |
| 659 | where |
| 660 | Z: Zeroize, |
| 661 | { |
| 662 | type Target = Z; |
| 663 | |
| 664 | #[inline (always)] |
| 665 | fn deref(&self) -> &Z { |
| 666 | &self.0 |
| 667 | } |
| 668 | } |
| 669 | |
| 670 | impl<Z> ops::DerefMut for Zeroizing<Z> |
| 671 | where |
| 672 | Z: Zeroize, |
| 673 | { |
| 674 | #[inline (always)] |
| 675 | fn deref_mut(&mut self) -> &mut Z { |
| 676 | &mut self.0 |
| 677 | } |
| 678 | } |
| 679 | |
| 680 | impl<T, Z> AsRef<T> for Zeroizing<Z> |
| 681 | where |
| 682 | T: ?Sized, |
| 683 | Z: AsRef<T> + Zeroize, |
| 684 | { |
| 685 | #[inline (always)] |
| 686 | fn as_ref(&self) -> &T { |
| 687 | self.0.as_ref() |
| 688 | } |
| 689 | } |
| 690 | |
| 691 | impl<T, Z> AsMut<T> for Zeroizing<Z> |
| 692 | where |
| 693 | T: ?Sized, |
| 694 | Z: AsMut<T> + Zeroize, |
| 695 | { |
| 696 | #[inline (always)] |
| 697 | fn as_mut(&mut self) -> &mut T { |
| 698 | self.0.as_mut() |
| 699 | } |
| 700 | } |
| 701 | |
| 702 | impl<Z> Zeroize for Zeroizing<Z> |
| 703 | where |
| 704 | Z: Zeroize, |
| 705 | { |
| 706 | fn zeroize(&mut self) { |
| 707 | self.0.zeroize(); |
| 708 | } |
| 709 | } |
| 710 | |
| 711 | impl<Z> ZeroizeOnDrop for Zeroizing<Z> where Z: Zeroize {} |
| 712 | |
| 713 | impl<Z> Drop for Zeroizing<Z> |
| 714 | where |
| 715 | Z: Zeroize, |
| 716 | { |
| 717 | fn drop(&mut self) { |
| 718 | self.0.zeroize() |
| 719 | } |
| 720 | } |
| 721 | |
| 722 | #[cfg (feature = "serde" )] |
| 723 | impl<Z> serde::Serialize for Zeroizing<Z> |
| 724 | where |
| 725 | Z: Zeroize + serde::Serialize, |
| 726 | { |
| 727 | #[inline (always)] |
| 728 | fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> |
| 729 | where |
| 730 | S: serde::Serializer, |
| 731 | { |
| 732 | self.0.serialize(serializer) |
| 733 | } |
| 734 | } |
| 735 | |
| 736 | #[cfg (feature = "serde" )] |
| 737 | impl<'de, Z> serde::Deserialize<'de> for Zeroizing<Z> |
| 738 | where |
| 739 | Z: Zeroize + serde::Deserialize<'de>, |
| 740 | { |
| 741 | #[inline (always)] |
| 742 | fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> |
| 743 | where |
| 744 | D: serde::Deserializer<'de>, |
| 745 | { |
| 746 | Ok(Self(Z::deserialize(deserializer)?)) |
| 747 | } |
| 748 | } |
| 749 | |
| 750 | /// Use fences to prevent accesses from being reordered before this |
| 751 | /// point, which should hopefully help ensure that all accessors |
| 752 | /// see zeroes after this point. |
| 753 | #[inline (always)] |
| 754 | fn atomic_fence() { |
| 755 | atomic::compiler_fence(order:atomic::Ordering::SeqCst); |
| 756 | } |
| 757 | |
| 758 | /// Perform a volatile write to the destination |
| 759 | #[inline (always)] |
| 760 | fn volatile_write<T: Copy + Sized>(dst: &mut T, src: T) { |
| 761 | unsafe { ptr::write_volatile(dst, src) } |
| 762 | } |
| 763 | |
| 764 | /// Perform a volatile `memset` operation which fills a slice with a value |
| 765 | /// |
| 766 | /// Safety: |
| 767 | /// The memory pointed to by `dst` must be a single allocated object that is valid for `count` |
| 768 | /// contiguous elements of `T`. |
| 769 | /// `count` must not be larger than an `isize`. |
| 770 | /// `dst` being offset by `mem::size_of::<T> * count` bytes must not wrap around the address space. |
| 771 | /// Also `dst` must be properly aligned. |
| 772 | #[inline (always)] |
| 773 | unsafe fn volatile_set<T: Copy + Sized>(dst: *mut T, src: T, count: usize) { |
| 774 | // TODO(tarcieri): use `volatile_set_memory` when stabilized |
| 775 | for i: usize in 0..count { |
| 776 | // Safety: |
| 777 | // |
| 778 | // This is safe because there is room for at least `count` objects of type `T` in the |
| 779 | // allocation pointed to by `dst`, because `count <= isize::MAX` and because |
| 780 | // `dst.add(count)` must not wrap around the address space. |
| 781 | let ptr: *mut T = dst.add(count:i); |
| 782 | |
| 783 | // Safety: |
| 784 | // |
| 785 | // This is safe, because the pointer is valid and because `dst` is well aligned for `T` and |
| 786 | // `ptr` is an offset of `dst` by a multiple of `mem::size_of::<T>()` bytes. |
| 787 | ptr::write_volatile(dst:ptr, src); |
| 788 | } |
| 789 | } |
| 790 | |
| 791 | /// Zeroizes a flat type/struct. Only zeroizes the values that it owns, and it does not work on |
| 792 | /// dynamically sized values or trait objects. It would be inefficient to use this function on a |
| 793 | /// type that already implements `ZeroizeOnDrop`. |
| 794 | /// |
| 795 | /// # Safety |
| 796 | /// - The type must not contain references to outside data or dynamically sized data, such as |
| 797 | /// `Vec<T>` or `String`. |
| 798 | /// - Values stored in the type must not have `Drop` impls. |
| 799 | /// - This function can invalidate the type if it is used after this function is called on it. |
| 800 | /// It is advisable to call this function only in `impl Drop`. |
| 801 | /// - The bit pattern of all zeroes must be valid for the data being zeroized. This may not be |
| 802 | /// true for enums and pointers. |
| 803 | /// |
| 804 | /// # Incompatible data types |
| 805 | /// Some data types that cannot be safely zeroized using `zeroize_flat_type` include, |
| 806 | /// but are not limited to: |
| 807 | /// - References: `&T` and `&mut T` |
| 808 | /// - Non-nullable types: `NonNull<T>`, `NonZeroU32`, etc. |
| 809 | /// - Enums with explicit non-zero tags. |
| 810 | /// - Smart pointers and collections: `Arc<T>`, `Box<T>`, `Vec<T>`, `HashMap<K, V>`, `String`, etc. |
| 811 | /// |
| 812 | /// # Examples |
| 813 | /// Safe usage for a struct containing strictly flat data: |
| 814 | /// ``` |
| 815 | /// use zeroize::{ZeroizeOnDrop, zeroize_flat_type}; |
| 816 | /// |
| 817 | /// struct DataToZeroize { |
| 818 | /// flat_data_1: [u8; 32], |
| 819 | /// flat_data_2: SomeMoreFlatData, |
| 820 | /// } |
| 821 | /// |
| 822 | /// struct SomeMoreFlatData(u64); |
| 823 | /// |
| 824 | /// impl Drop for DataToZeroize { |
| 825 | /// fn drop(&mut self) { |
| 826 | /// unsafe { zeroize_flat_type(self as *mut Self) } |
| 827 | /// } |
| 828 | /// } |
| 829 | /// impl ZeroizeOnDrop for DataToZeroize {} |
| 830 | /// |
| 831 | /// let mut data = DataToZeroize { |
| 832 | /// flat_data_1: [3u8; 32], |
| 833 | /// flat_data_2: SomeMoreFlatData(123u64) |
| 834 | /// }; |
| 835 | /// |
| 836 | /// // data gets zeroized when dropped |
| 837 | /// ``` |
| 838 | #[inline (always)] |
| 839 | pub unsafe fn zeroize_flat_type<F: Sized>(data: *mut F) { |
| 840 | let size: usize = mem::size_of::<F>(); |
| 841 | // Safety: |
| 842 | // |
| 843 | // This is safe because `mem::size_of<T>()` returns the exact size of the object in memory, and |
| 844 | // `data_ptr` points directly to the first byte of the data. |
| 845 | volatile_set(dst:data as *mut u8, src:0, count:size); |
| 846 | atomic_fence() |
| 847 | } |
| 848 | |
| 849 | /// Internal module used as support for `AssertZeroizeOnDrop`. |
| 850 | #[doc (hidden)] |
| 851 | pub mod __internal { |
| 852 | use super::*; |
| 853 | |
| 854 | /// Auto-deref workaround for deriving `ZeroizeOnDrop`. |
| 855 | pub trait AssertZeroizeOnDrop { |
| 856 | fn zeroize_or_on_drop(self); |
| 857 | } |
| 858 | |
| 859 | impl<T: ZeroizeOnDrop + ?Sized> AssertZeroizeOnDrop for &&mut T { |
| 860 | fn zeroize_or_on_drop(self) {} |
| 861 | } |
| 862 | |
| 863 | /// Auto-deref workaround for deriving `ZeroizeOnDrop`. |
| 864 | pub trait AssertZeroize { |
| 865 | fn zeroize_or_on_drop(&mut self); |
| 866 | } |
| 867 | |
| 868 | impl<T: Zeroize + ?Sized> AssertZeroize for T { |
| 869 | fn zeroize_or_on_drop(&mut self) { |
| 870 | self.zeroize() |
| 871 | } |
| 872 | } |
| 873 | } |
| 874 | |