1 | use crate::cmp::Ordering; |
2 | use crate::fmt; |
3 | use crate::hash; |
4 | use crate::intrinsics; |
5 | use crate::marker::Unsize; |
6 | use crate::mem::{MaybeUninit, SizedTypeProperties}; |
7 | use crate::num::NonZero; |
8 | use crate::ops::{CoerceUnsized, DispatchFromDyn}; |
9 | use crate::ptr; |
10 | use crate::ptr::Unique; |
11 | use crate::slice::{self, SliceIndex}; |
12 | use crate::ub_checks::assert_unsafe_precondition; |
13 | |
14 | /// `*mut T` but non-zero and [covariant]. |
15 | /// |
16 | /// This is often the correct thing to use when building data structures using |
17 | /// raw pointers, but is ultimately more dangerous to use because of its additional |
18 | /// properties. If you're not sure if you should use `NonNull<T>`, just use `*mut T`! |
19 | /// |
20 | /// Unlike `*mut T`, the pointer must always be non-null, even if the pointer |
21 | /// is never dereferenced. This is so that enums may use this forbidden value |
22 | /// as a discriminant -- `Option<NonNull<T>>` has the same size as `*mut T`. |
23 | /// However the pointer may still dangle if it isn't dereferenced. |
24 | /// |
25 | /// Unlike `*mut T`, `NonNull<T>` was chosen to be covariant over `T`. This makes it |
26 | /// possible to use `NonNull<T>` when building covariant types, but introduces the |
27 | /// risk of unsoundness if used in a type that shouldn't actually be covariant. |
28 | /// (The opposite choice was made for `*mut T` even though technically the unsoundness |
29 | /// could only be caused by calling unsafe functions.) |
30 | /// |
31 | /// Covariance is correct for most safe abstractions, such as `Box`, `Rc`, `Arc`, `Vec`, |
32 | /// and `LinkedList`. This is the case because they provide a public API that follows the |
33 | /// normal shared XOR mutable rules of Rust. |
34 | /// |
35 | /// If your type cannot safely be covariant, you must ensure it contains some |
36 | /// additional field to provide invariance. Often this field will be a [`PhantomData`] |
37 | /// type like `PhantomData<Cell<T>>` or `PhantomData<&'a mut T>`. |
38 | /// |
39 | /// Notice that `NonNull<T>` has a `From` instance for `&T`. However, this does |
40 | /// not change the fact that mutating through a (pointer derived from a) shared |
41 | /// reference is undefined behavior unless the mutation happens inside an |
42 | /// [`UnsafeCell<T>`]. The same goes for creating a mutable reference from a shared |
43 | /// reference. When using this `From` instance without an `UnsafeCell<T>`, |
44 | /// it is your responsibility to ensure that `as_mut` is never called, and `as_ptr` |
45 | /// is never used for mutation. |
46 | /// |
47 | /// # Representation |
48 | /// |
49 | /// Thanks to the [null pointer optimization], |
50 | /// `NonNull<T>` and `Option<NonNull<T>>` |
51 | /// are guaranteed to have the same size and alignment: |
52 | /// |
53 | /// ``` |
54 | /// # use std::mem::{size_of, align_of}; |
55 | /// use std::ptr::NonNull; |
56 | /// |
57 | /// assert_eq!(size_of::<NonNull<i16>>(), size_of::<Option<NonNull<i16>>>()); |
58 | /// assert_eq!(align_of::<NonNull<i16>>(), align_of::<Option<NonNull<i16>>>()); |
59 | /// |
60 | /// assert_eq!(size_of::<NonNull<str>>(), size_of::<Option<NonNull<str>>>()); |
61 | /// assert_eq!(align_of::<NonNull<str>>(), align_of::<Option<NonNull<str>>>()); |
62 | /// ``` |
63 | /// |
64 | /// [covariant]: https://doc.rust-lang.org/reference/subtyping.html |
65 | /// [`PhantomData`]: crate::marker::PhantomData |
66 | /// [`UnsafeCell<T>`]: crate::cell::UnsafeCell |
67 | /// [null pointer optimization]: crate::option#representation |
68 | #[stable (feature = "nonnull" , since = "1.25.0" )] |
69 | #[repr (transparent)] |
70 | #[rustc_layout_scalar_valid_range_start (1)] |
71 | #[rustc_nonnull_optimization_guaranteed ] |
72 | #[rustc_diagnostic_item = "NonNull" ] |
73 | pub struct NonNull<T: ?Sized> { |
74 | pointer: *const T, |
75 | } |
76 | |
77 | /// `NonNull` pointers are not `Send` because the data they reference may be aliased. |
78 | // N.B., this impl is unnecessary, but should provide better error messages. |
79 | #[stable (feature = "nonnull" , since = "1.25.0" )] |
80 | impl<T: ?Sized> !Send for NonNull<T> {} |
81 | |
82 | /// `NonNull` pointers are not `Sync` because the data they reference may be aliased. |
83 | // N.B., this impl is unnecessary, but should provide better error messages. |
84 | #[stable (feature = "nonnull" , since = "1.25.0" )] |
85 | impl<T: ?Sized> !Sync for NonNull<T> {} |
86 | |
87 | impl<T: Sized> NonNull<T> { |
88 | /// Creates a new `NonNull` that is dangling, but well-aligned. |
89 | /// |
90 | /// This is useful for initializing types which lazily allocate, like |
91 | /// `Vec::new` does. |
92 | /// |
93 | /// Note that the pointer value may potentially represent a valid pointer to |
94 | /// a `T`, which means this must not be used as a "not yet initialized" |
95 | /// sentinel value. Types that lazily allocate must track initialization by |
96 | /// some other means. |
97 | /// |
98 | /// # Examples |
99 | /// |
100 | /// ``` |
101 | /// use std::ptr::NonNull; |
102 | /// |
103 | /// let ptr = NonNull::<u32>::dangling(); |
104 | /// // Important: don't try to access the value of `ptr` without |
105 | /// // initializing it first! The pointer is not null but isn't valid either! |
106 | /// ``` |
107 | #[stable (feature = "nonnull" , since = "1.25.0" )] |
108 | #[rustc_const_stable (feature = "const_nonnull_dangling" , since = "1.36.0" )] |
109 | #[must_use ] |
110 | #[inline ] |
111 | pub const fn dangling() -> Self { |
112 | // SAFETY: mem::align_of() returns a non-zero usize which is then casted |
113 | // to a *mut T. Therefore, `ptr` is not null and the conditions for |
114 | // calling new_unchecked() are respected. |
115 | unsafe { |
116 | let ptr = crate::ptr::dangling_mut::<T>(); |
117 | NonNull::new_unchecked(ptr) |
118 | } |
119 | } |
120 | |
121 | /// Returns a shared references to the value. In contrast to [`as_ref`], this does not require |
122 | /// that the value has to be initialized. |
123 | /// |
124 | /// For the mutable counterpart see [`as_uninit_mut`]. |
125 | /// |
126 | /// [`as_ref`]: NonNull::as_ref |
127 | /// [`as_uninit_mut`]: NonNull::as_uninit_mut |
128 | /// |
129 | /// # Safety |
130 | /// |
131 | /// When calling this method, you have to ensure that all of the following is true: |
132 | /// |
133 | /// * The pointer must be properly aligned. |
134 | /// |
135 | /// * It must be "dereferenceable" in the sense defined in [the module documentation]. |
136 | /// |
137 | /// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is |
138 | /// arbitrarily chosen and does not necessarily reflect the actual lifetime of the data. |
139 | /// In particular, while this reference exists, the memory the pointer points to must |
140 | /// not get mutated (except inside `UnsafeCell`). |
141 | /// |
142 | /// This applies even if the result of this method is unused! |
143 | /// |
144 | /// [the module documentation]: crate::ptr#safety |
145 | #[inline ] |
146 | #[must_use ] |
147 | #[unstable (feature = "ptr_as_uninit" , issue = "75402" )] |
148 | #[rustc_const_unstable (feature = "const_ptr_as_ref" , issue = "91822" )] |
149 | pub const unsafe fn as_uninit_ref<'a>(self) -> &'a MaybeUninit<T> { |
150 | // SAFETY: the caller must guarantee that `self` meets all the |
151 | // requirements for a reference. |
152 | unsafe { &*self.cast().as_ptr() } |
153 | } |
154 | |
155 | /// Returns a unique references to the value. In contrast to [`as_mut`], this does not require |
156 | /// that the value has to be initialized. |
157 | /// |
158 | /// For the shared counterpart see [`as_uninit_ref`]. |
159 | /// |
160 | /// [`as_mut`]: NonNull::as_mut |
161 | /// [`as_uninit_ref`]: NonNull::as_uninit_ref |
162 | /// |
163 | /// # Safety |
164 | /// |
165 | /// When calling this method, you have to ensure that all of the following is true: |
166 | /// |
167 | /// * The pointer must be properly aligned. |
168 | /// |
169 | /// * It must be "dereferenceable" in the sense defined in [the module documentation]. |
170 | /// |
171 | /// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is |
172 | /// arbitrarily chosen and does not necessarily reflect the actual lifetime of the data. |
173 | /// In particular, while this reference exists, the memory the pointer points to must |
174 | /// not get accessed (read or written) through any other pointer. |
175 | /// |
176 | /// This applies even if the result of this method is unused! |
177 | /// |
178 | /// [the module documentation]: crate::ptr#safety |
179 | #[inline ] |
180 | #[must_use ] |
181 | #[unstable (feature = "ptr_as_uninit" , issue = "75402" )] |
182 | #[rustc_const_unstable (feature = "const_ptr_as_ref" , issue = "91822" )] |
183 | pub const unsafe fn as_uninit_mut<'a>(self) -> &'a mut MaybeUninit<T> { |
184 | // SAFETY: the caller must guarantee that `self` meets all the |
185 | // requirements for a reference. |
186 | unsafe { &mut *self.cast().as_ptr() } |
187 | } |
188 | } |
189 | |
190 | impl<T: ?Sized> NonNull<T> { |
191 | /// Creates a new `NonNull`. |
192 | /// |
193 | /// # Safety |
194 | /// |
195 | /// `ptr` must be non-null. |
196 | /// |
197 | /// # Examples |
198 | /// |
199 | /// ``` |
200 | /// use std::ptr::NonNull; |
201 | /// |
202 | /// let mut x = 0u32; |
203 | /// let ptr = unsafe { NonNull::new_unchecked(&mut x as *mut _) }; |
204 | /// ``` |
205 | /// |
206 | /// *Incorrect* usage of this function: |
207 | /// |
208 | /// ```rust,no_run |
209 | /// use std::ptr::NonNull; |
210 | /// |
211 | /// // NEVER DO THAT!!! This is undefined behavior. ⚠️ |
212 | /// let ptr = unsafe { NonNull::<u32>::new_unchecked(std::ptr::null_mut()) }; |
213 | /// ``` |
214 | #[stable (feature = "nonnull" , since = "1.25.0" )] |
215 | #[rustc_const_stable (feature = "const_nonnull_new_unchecked" , since = "1.25.0" )] |
216 | #[inline ] |
217 | pub const unsafe fn new_unchecked(ptr: *mut T) -> Self { |
218 | // SAFETY: the caller must guarantee that `ptr` is non-null. |
219 | unsafe { |
220 | assert_unsafe_precondition!( |
221 | check_language_ub, |
222 | "NonNull::new_unchecked requires that the pointer is non-null" , |
223 | (ptr: *mut () = ptr as *mut ()) => !ptr.is_null() |
224 | ); |
225 | NonNull { pointer: ptr as _ } |
226 | } |
227 | } |
228 | |
229 | /// Creates a new `NonNull` if `ptr` is non-null. |
230 | /// |
231 | /// # Examples |
232 | /// |
233 | /// ``` |
234 | /// use std::ptr::NonNull; |
235 | /// |
236 | /// let mut x = 0u32; |
237 | /// let ptr = NonNull::<u32>::new(&mut x as *mut _).expect("ptr is null!" ); |
238 | /// |
239 | /// if let Some(ptr) = NonNull::<u32>::new(std::ptr::null_mut()) { |
240 | /// unreachable!(); |
241 | /// } |
242 | /// ``` |
243 | #[stable (feature = "nonnull" , since = "1.25.0" )] |
244 | #[rustc_const_unstable (feature = "const_nonnull_new" , issue = "93235" )] |
245 | #[inline ] |
246 | pub const fn new(ptr: *mut T) -> Option<Self> { |
247 | if !ptr.is_null() { |
248 | // SAFETY: The pointer is already checked and is not null |
249 | Some(unsafe { Self::new_unchecked(ptr) }) |
250 | } else { |
251 | None |
252 | } |
253 | } |
254 | |
255 | /// Performs the same functionality as [`std::ptr::from_raw_parts`], except that a |
256 | /// `NonNull` pointer is returned, as opposed to a raw `*const` pointer. |
257 | /// |
258 | /// See the documentation of [`std::ptr::from_raw_parts`] for more details. |
259 | /// |
260 | /// [`std::ptr::from_raw_parts`]: crate::ptr::from_raw_parts |
261 | #[unstable (feature = "ptr_metadata" , issue = "81513" )] |
262 | #[rustc_const_unstable (feature = "ptr_metadata" , issue = "81513" )] |
263 | #[inline ] |
264 | pub const fn from_raw_parts( |
265 | data_pointer: NonNull<()>, |
266 | metadata: <T as super::Pointee>::Metadata, |
267 | ) -> NonNull<T> { |
268 | // SAFETY: The result of `ptr::from::raw_parts_mut` is non-null because `data_pointer` is. |
269 | unsafe { |
270 | NonNull::new_unchecked(super::from_raw_parts_mut(data_pointer.as_ptr(), metadata)) |
271 | } |
272 | } |
273 | |
274 | /// Decompose a (possibly wide) pointer into its data pointer and metadata components. |
275 | /// |
276 | /// The pointer can be later reconstructed with [`NonNull::from_raw_parts`]. |
277 | #[unstable (feature = "ptr_metadata" , issue = "81513" )] |
278 | #[rustc_const_unstable (feature = "ptr_metadata" , issue = "81513" )] |
279 | #[must_use = "this returns the result of the operation, \ |
280 | without modifying the original" ] |
281 | #[inline ] |
282 | pub const fn to_raw_parts(self) -> (NonNull<()>, <T as super::Pointee>::Metadata) { |
283 | (self.cast(), super::metadata(self.as_ptr())) |
284 | } |
285 | |
286 | /// Gets the "address" portion of the pointer. |
287 | /// |
288 | /// For more details see the equivalent method on a raw pointer, [`pointer::addr`]. |
289 | /// |
290 | /// This API and its claimed semantics are part of the Strict Provenance experiment, |
291 | /// see the [`ptr` module documentation][crate::ptr]. |
292 | #[must_use ] |
293 | #[inline ] |
294 | #[unstable (feature = "strict_provenance" , issue = "95228" )] |
295 | pub fn addr(self) -> NonZero<usize> { |
296 | // SAFETY: The pointer is guaranteed by the type to be non-null, |
297 | // meaning that the address will be non-zero. |
298 | unsafe { NonZero::new_unchecked(self.pointer.addr()) } |
299 | } |
300 | |
301 | /// Creates a new pointer with the given address. |
302 | /// |
303 | /// For more details see the equivalent method on a raw pointer, [`pointer::with_addr`]. |
304 | /// |
305 | /// This API and its claimed semantics are part of the Strict Provenance experiment, |
306 | /// see the [`ptr` module documentation][crate::ptr]. |
307 | #[must_use ] |
308 | #[inline ] |
309 | #[unstable (feature = "strict_provenance" , issue = "95228" )] |
310 | pub fn with_addr(self, addr: NonZero<usize>) -> Self { |
311 | // SAFETY: The result of `ptr::from::with_addr` is non-null because `addr` is guaranteed to be non-zero. |
312 | unsafe { NonNull::new_unchecked(self.pointer.with_addr(addr.get()) as *mut _) } |
313 | } |
314 | |
315 | /// Creates a new pointer by mapping `self`'s address to a new one. |
316 | /// |
317 | /// For more details see the equivalent method on a raw pointer, [`pointer::map_addr`]. |
318 | /// |
319 | /// This API and its claimed semantics are part of the Strict Provenance experiment, |
320 | /// see the [`ptr` module documentation][crate::ptr]. |
321 | #[must_use ] |
322 | #[inline ] |
323 | #[unstable (feature = "strict_provenance" , issue = "95228" )] |
324 | pub fn map_addr(self, f: impl FnOnce(NonZero<usize>) -> NonZero<usize>) -> Self { |
325 | self.with_addr(f(self.addr())) |
326 | } |
327 | |
328 | /// Acquires the underlying `*mut` pointer. |
329 | /// |
330 | /// # Examples |
331 | /// |
332 | /// ``` |
333 | /// use std::ptr::NonNull; |
334 | /// |
335 | /// let mut x = 0u32; |
336 | /// let ptr = NonNull::new(&mut x).expect("ptr is null!" ); |
337 | /// |
338 | /// let x_value = unsafe { *ptr.as_ptr() }; |
339 | /// assert_eq!(x_value, 0); |
340 | /// |
341 | /// unsafe { *ptr.as_ptr() += 2; } |
342 | /// let x_value = unsafe { *ptr.as_ptr() }; |
343 | /// assert_eq!(x_value, 2); |
344 | /// ``` |
345 | #[stable (feature = "nonnull" , since = "1.25.0" )] |
346 | #[rustc_const_stable (feature = "const_nonnull_as_ptr" , since = "1.32.0" )] |
347 | #[rustc_never_returns_null_ptr ] |
348 | #[must_use ] |
349 | #[inline (always)] |
350 | pub const fn as_ptr(self) -> *mut T { |
351 | self.pointer as *mut T |
352 | } |
353 | |
354 | /// Returns a shared reference to the value. If the value may be uninitialized, [`as_uninit_ref`] |
355 | /// must be used instead. |
356 | /// |
357 | /// For the mutable counterpart see [`as_mut`]. |
358 | /// |
359 | /// [`as_uninit_ref`]: NonNull::as_uninit_ref |
360 | /// [`as_mut`]: NonNull::as_mut |
361 | /// |
362 | /// # Safety |
363 | /// |
364 | /// When calling this method, you have to ensure that all of the following is true: |
365 | /// |
366 | /// * The pointer must be properly aligned. |
367 | /// |
368 | /// * It must be "dereferenceable" in the sense defined in [the module documentation]. |
369 | /// |
370 | /// * The pointer must point to an initialized instance of `T`. |
371 | /// |
372 | /// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is |
373 | /// arbitrarily chosen and does not necessarily reflect the actual lifetime of the data. |
374 | /// In particular, while this reference exists, the memory the pointer points to must |
375 | /// not get mutated (except inside `UnsafeCell`). |
376 | /// |
377 | /// This applies even if the result of this method is unused! |
378 | /// (The part about being initialized is not yet fully decided, but until |
379 | /// it is, the only safe approach is to ensure that they are indeed initialized.) |
380 | /// |
381 | /// # Examples |
382 | /// |
383 | /// ``` |
384 | /// use std::ptr::NonNull; |
385 | /// |
386 | /// let mut x = 0u32; |
387 | /// let ptr = NonNull::new(&mut x as *mut _).expect("ptr is null!" ); |
388 | /// |
389 | /// let ref_x = unsafe { ptr.as_ref() }; |
390 | /// println!("{ref_x}" ); |
391 | /// ``` |
392 | /// |
393 | /// [the module documentation]: crate::ptr#safety |
394 | #[stable (feature = "nonnull" , since = "1.25.0" )] |
395 | #[rustc_const_stable (feature = "const_nonnull_as_ref" , since = "1.73.0" )] |
396 | #[must_use ] |
397 | #[inline (always)] |
398 | pub const unsafe fn as_ref<'a>(&self) -> &'a T { |
399 | // SAFETY: the caller must guarantee that `self` meets all the |
400 | // requirements for a reference. |
401 | // `cast_const` avoids a mutable raw pointer deref. |
402 | unsafe { &*self.as_ptr().cast_const() } |
403 | } |
404 | |
405 | /// Returns a unique reference to the value. If the value may be uninitialized, [`as_uninit_mut`] |
406 | /// must be used instead. |
407 | /// |
408 | /// For the shared counterpart see [`as_ref`]. |
409 | /// |
410 | /// [`as_uninit_mut`]: NonNull::as_uninit_mut |
411 | /// [`as_ref`]: NonNull::as_ref |
412 | /// |
413 | /// # Safety |
414 | /// |
415 | /// When calling this method, you have to ensure that all of the following is true: |
416 | /// |
417 | /// * The pointer must be properly aligned. |
418 | /// |
419 | /// * It must be "dereferenceable" in the sense defined in [the module documentation]. |
420 | /// |
421 | /// * The pointer must point to an initialized instance of `T`. |
422 | /// |
423 | /// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is |
424 | /// arbitrarily chosen and does not necessarily reflect the actual lifetime of the data. |
425 | /// In particular, while this reference exists, the memory the pointer points to must |
426 | /// not get accessed (read or written) through any other pointer. |
427 | /// |
428 | /// This applies even if the result of this method is unused! |
429 | /// (The part about being initialized is not yet fully decided, but until |
430 | /// it is, the only safe approach is to ensure that they are indeed initialized.) |
431 | /// # Examples |
432 | /// |
433 | /// ``` |
434 | /// use std::ptr::NonNull; |
435 | /// |
436 | /// let mut x = 0u32; |
437 | /// let mut ptr = NonNull::new(&mut x).expect("null pointer" ); |
438 | /// |
439 | /// let x_ref = unsafe { ptr.as_mut() }; |
440 | /// assert_eq!(*x_ref, 0); |
441 | /// *x_ref += 2; |
442 | /// assert_eq!(*x_ref, 2); |
443 | /// ``` |
444 | /// |
445 | /// [the module documentation]: crate::ptr#safety |
446 | #[stable (feature = "nonnull" , since = "1.25.0" )] |
447 | #[rustc_const_unstable (feature = "const_ptr_as_ref" , issue = "91822" )] |
448 | #[must_use ] |
449 | #[inline (always)] |
450 | pub const unsafe fn as_mut<'a>(&mut self) -> &'a mut T { |
451 | // SAFETY: the caller must guarantee that `self` meets all the |
452 | // requirements for a mutable reference. |
453 | unsafe { &mut *self.as_ptr() } |
454 | } |
455 | |
456 | /// Casts to a pointer of another type. |
457 | /// |
458 | /// # Examples |
459 | /// |
460 | /// ``` |
461 | /// use std::ptr::NonNull; |
462 | /// |
463 | /// let mut x = 0u32; |
464 | /// let ptr = NonNull::new(&mut x as *mut _).expect("null pointer" ); |
465 | /// |
466 | /// let casted_ptr = ptr.cast::<i8>(); |
467 | /// let raw_ptr: *mut i8 = casted_ptr.as_ptr(); |
468 | /// ``` |
469 | #[stable (feature = "nonnull_cast" , since = "1.27.0" )] |
470 | #[rustc_const_stable (feature = "const_nonnull_cast" , since = "1.36.0" )] |
471 | #[must_use = "this returns the result of the operation, \ |
472 | without modifying the original" ] |
473 | #[inline ] |
474 | pub const fn cast<U>(self) -> NonNull<U> { |
475 | // SAFETY: `self` is a `NonNull` pointer which is necessarily non-null |
476 | unsafe { NonNull { pointer: self.as_ptr() as *mut U } } |
477 | } |
478 | |
479 | /// Calculates the offset from a pointer. |
480 | /// |
481 | /// `count` is in units of T; e.g., a `count` of 3 represents a pointer |
482 | /// offset of `3 * size_of::<T>()` bytes. |
483 | /// |
484 | /// # Safety |
485 | /// |
486 | /// If any of the following conditions are violated, the result is Undefined |
487 | /// Behavior: |
488 | /// |
489 | /// * Both the starting and resulting pointer must be either in bounds or one |
490 | /// byte past the end of the same [allocated object]. |
491 | /// |
492 | /// * The computed offset, **in bytes**, cannot overflow an `isize`. |
493 | /// |
494 | /// * The offset being in bounds cannot rely on "wrapping around" the address |
495 | /// space. That is, the infinite-precision sum, **in bytes** must fit in a usize. |
496 | /// |
497 | /// The compiler and standard library generally tries to ensure allocations |
498 | /// never reach a size where an offset is a concern. For instance, `Vec` |
499 | /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so |
500 | /// `vec.as_ptr().add(vec.len())` is always safe. |
501 | /// |
502 | /// Most platforms fundamentally can't even construct such an allocation. |
503 | /// For instance, no known 64-bit platform can ever serve a request |
504 | /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space. |
505 | /// However, some 32-bit and 16-bit platforms may successfully serve a request for |
506 | /// more than `isize::MAX` bytes with things like Physical Address |
507 | /// Extension. As such, memory acquired directly from allocators or memory |
508 | /// mapped files *may* be too large to handle with this function. |
509 | /// |
510 | /// [allocated object]: crate::ptr#allocated-object |
511 | /// |
512 | /// # Examples |
513 | /// |
514 | /// ``` |
515 | /// #![feature(non_null_convenience)] |
516 | /// use std::ptr::NonNull; |
517 | /// |
518 | /// let mut s = [1, 2, 3]; |
519 | /// let ptr: NonNull<u32> = NonNull::new(s.as_mut_ptr()).unwrap(); |
520 | /// |
521 | /// unsafe { |
522 | /// println!("{}" , ptr.offset(1).read()); |
523 | /// println!("{}" , ptr.offset(2).read()); |
524 | /// } |
525 | /// ``` |
526 | #[unstable (feature = "non_null_convenience" , issue = "117691" )] |
527 | #[rustc_const_unstable (feature = "non_null_convenience" , issue = "117691" )] |
528 | #[must_use = "returns a new pointer rather than modifying its argument" ] |
529 | #[inline (always)] |
530 | #[cfg_attr (miri, track_caller)] // even without panics, this helps for Miri backtraces |
531 | pub const unsafe fn offset(self, count: isize) -> NonNull<T> |
532 | where |
533 | T: Sized, |
534 | { |
535 | // SAFETY: the caller must uphold the safety contract for `offset`. |
536 | // Additionally safety contract of `offset` guarantees that the resulting pointer is |
537 | // pointing to an allocation, there can't be an allocation at null, thus it's safe to |
538 | // construct `NonNull`. |
539 | unsafe { NonNull { pointer: intrinsics::offset(self.pointer, count) } } |
540 | } |
541 | |
542 | /// Calculates the offset from a pointer in bytes. |
543 | /// |
544 | /// `count` is in units of **bytes**. |
545 | /// |
546 | /// This is purely a convenience for casting to a `u8` pointer and |
547 | /// using [offset][pointer::offset] on it. See that method for documentation |
548 | /// and safety requirements. |
549 | /// |
550 | /// For non-`Sized` pointees this operation changes only the data pointer, |
551 | /// leaving the metadata untouched. |
552 | #[unstable (feature = "non_null_convenience" , issue = "117691" )] |
553 | #[rustc_const_unstable (feature = "non_null_convenience" , issue = "117691" )] |
554 | #[must_use ] |
555 | #[inline (always)] |
556 | #[cfg_attr (miri, track_caller)] // even without panics, this helps for Miri backtraces |
557 | pub const unsafe fn byte_offset(self, count: isize) -> Self { |
558 | // SAFETY: the caller must uphold the safety contract for `offset` and `byte_offset` has |
559 | // the same safety contract. |
560 | // Additionally safety contract of `offset` guarantees that the resulting pointer is |
561 | // pointing to an allocation, there can't be an allocation at null, thus it's safe to |
562 | // construct `NonNull`. |
563 | unsafe { NonNull { pointer: self.pointer.byte_offset(count) } } |
564 | } |
565 | |
566 | /// Calculates the offset from a pointer (convenience for `.offset(count as isize)`). |
567 | /// |
568 | /// `count` is in units of T; e.g., a `count` of 3 represents a pointer |
569 | /// offset of `3 * size_of::<T>()` bytes. |
570 | /// |
571 | /// # Safety |
572 | /// |
573 | /// If any of the following conditions are violated, the result is Undefined |
574 | /// Behavior: |
575 | /// |
576 | /// * Both the starting and resulting pointer must be either in bounds or one |
577 | /// byte past the end of the same [allocated object]. |
578 | /// |
579 | /// * The computed offset, **in bytes**, cannot overflow an `isize`. |
580 | /// |
581 | /// * The offset being in bounds cannot rely on "wrapping around" the address |
582 | /// space. That is, the infinite-precision sum must fit in a `usize`. |
583 | /// |
584 | /// The compiler and standard library generally tries to ensure allocations |
585 | /// never reach a size where an offset is a concern. For instance, `Vec` |
586 | /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so |
587 | /// `vec.as_ptr().add(vec.len())` is always safe. |
588 | /// |
589 | /// Most platforms fundamentally can't even construct such an allocation. |
590 | /// For instance, no known 64-bit platform can ever serve a request |
591 | /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space. |
592 | /// However, some 32-bit and 16-bit platforms may successfully serve a request for |
593 | /// more than `isize::MAX` bytes with things like Physical Address |
594 | /// Extension. As such, memory acquired directly from allocators or memory |
595 | /// mapped files *may* be too large to handle with this function. |
596 | /// |
597 | /// [allocated object]: crate::ptr#allocated-object |
598 | /// |
599 | /// # Examples |
600 | /// |
601 | /// ``` |
602 | /// #![feature(non_null_convenience)] |
603 | /// use std::ptr::NonNull; |
604 | /// |
605 | /// let s: &str = "123" ; |
606 | /// let ptr: NonNull<u8> = NonNull::new(s.as_ptr().cast_mut()).unwrap(); |
607 | /// |
608 | /// unsafe { |
609 | /// println!("{}" , ptr.add(1).read() as char); |
610 | /// println!("{}" , ptr.add(2).read() as char); |
611 | /// } |
612 | /// ``` |
613 | #[unstable (feature = "non_null_convenience" , issue = "117691" )] |
614 | #[rustc_const_unstable (feature = "non_null_convenience" , issue = "117691" )] |
615 | #[must_use = "returns a new pointer rather than modifying its argument" ] |
616 | #[inline (always)] |
617 | #[cfg_attr (miri, track_caller)] // even without panics, this helps for Miri backtraces |
618 | pub const unsafe fn add(self, count: usize) -> Self |
619 | where |
620 | T: Sized, |
621 | { |
622 | // SAFETY: the caller must uphold the safety contract for `offset`. |
623 | // Additionally safety contract of `offset` guarantees that the resulting pointer is |
624 | // pointing to an allocation, there can't be an allocation at null, thus it's safe to |
625 | // construct `NonNull`. |
626 | unsafe { NonNull { pointer: intrinsics::offset(self.pointer, count) } } |
627 | } |
628 | |
629 | /// Calculates the offset from a pointer in bytes (convenience for `.byte_offset(count as isize)`). |
630 | /// |
631 | /// `count` is in units of bytes. |
632 | /// |
633 | /// This is purely a convenience for casting to a `u8` pointer and |
634 | /// using [`add`][NonNull::add] on it. See that method for documentation |
635 | /// and safety requirements. |
636 | /// |
637 | /// For non-`Sized` pointees this operation changes only the data pointer, |
638 | /// leaving the metadata untouched. |
639 | #[unstable (feature = "non_null_convenience" , issue = "117691" )] |
640 | #[rustc_const_unstable (feature = "non_null_convenience" , issue = "117691" )] |
641 | #[must_use ] |
642 | #[inline (always)] |
643 | #[rustc_allow_const_fn_unstable (set_ptr_value)] |
644 | #[cfg_attr (miri, track_caller)] // even without panics, this helps for Miri backtraces |
645 | pub const unsafe fn byte_add(self, count: usize) -> Self { |
646 | // SAFETY: the caller must uphold the safety contract for `add` and `byte_add` has the same |
647 | // safety contract. |
648 | // Additionally safety contract of `add` guarantees that the resulting pointer is pointing |
649 | // to an allocation, there can't be an allocation at null, thus it's safe to construct |
650 | // `NonNull`. |
651 | unsafe { NonNull { pointer: self.pointer.byte_add(count) } } |
652 | } |
653 | |
654 | /// Calculates the offset from a pointer (convenience for |
655 | /// `.offset((count as isize).wrapping_neg())`). |
656 | /// |
657 | /// `count` is in units of T; e.g., a `count` of 3 represents a pointer |
658 | /// offset of `3 * size_of::<T>()` bytes. |
659 | /// |
660 | /// # Safety |
661 | /// |
662 | /// If any of the following conditions are violated, the result is Undefined |
663 | /// Behavior: |
664 | /// |
665 | /// * Both the starting and resulting pointer must be either in bounds or one |
666 | /// byte past the end of the same [allocated object]. |
667 | /// |
668 | /// * The computed offset cannot exceed `isize::MAX` **bytes**. |
669 | /// |
670 | /// * The offset being in bounds cannot rely on "wrapping around" the address |
671 | /// space. That is, the infinite-precision sum must fit in a usize. |
672 | /// |
673 | /// The compiler and standard library generally tries to ensure allocations |
674 | /// never reach a size where an offset is a concern. For instance, `Vec` |
675 | /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so |
676 | /// `vec.as_ptr().add(vec.len()).sub(vec.len())` is always safe. |
677 | /// |
678 | /// Most platforms fundamentally can't even construct such an allocation. |
679 | /// For instance, no known 64-bit platform can ever serve a request |
680 | /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space. |
681 | /// However, some 32-bit and 16-bit platforms may successfully serve a request for |
682 | /// more than `isize::MAX` bytes with things like Physical Address |
683 | /// Extension. As such, memory acquired directly from allocators or memory |
684 | /// mapped files *may* be too large to handle with this function. |
685 | /// |
686 | /// [allocated object]: crate::ptr#allocated-object |
687 | /// |
688 | /// # Examples |
689 | /// |
690 | /// ``` |
691 | /// #![feature(non_null_convenience)] |
692 | /// use std::ptr::NonNull; |
693 | /// |
694 | /// let s: &str = "123" ; |
695 | /// |
696 | /// unsafe { |
697 | /// let end: NonNull<u8> = NonNull::new(s.as_ptr().cast_mut()).unwrap().add(3); |
698 | /// println!("{}" , end.sub(1).read() as char); |
699 | /// println!("{}" , end.sub(2).read() as char); |
700 | /// } |
701 | /// ``` |
702 | #[unstable (feature = "non_null_convenience" , issue = "117691" )] |
703 | #[rustc_const_unstable (feature = "non_null_convenience" , issue = "117691" )] |
704 | #[must_use = "returns a new pointer rather than modifying its argument" ] |
705 | #[inline (always)] |
706 | #[cfg_attr (miri, track_caller)] // even without panics, this helps for Miri backtraces |
707 | pub const unsafe fn sub(self, count: usize) -> Self |
708 | where |
709 | T: Sized, |
710 | { |
711 | if T::IS_ZST { |
712 | // Pointer arithmetic does nothing when the pointee is a ZST. |
713 | self |
714 | } else { |
715 | // SAFETY: the caller must uphold the safety contract for `offset`. |
716 | // Because the pointee is *not* a ZST, that means that `count` is |
717 | // at most `isize::MAX`, and thus the negation cannot overflow. |
718 | unsafe { self.offset(intrinsics::unchecked_sub(0, count as isize)) } |
719 | } |
720 | } |
721 | |
722 | /// Calculates the offset from a pointer in bytes (convenience for |
723 | /// `.byte_offset((count as isize).wrapping_neg())`). |
724 | /// |
725 | /// `count` is in units of bytes. |
726 | /// |
727 | /// This is purely a convenience for casting to a `u8` pointer and |
728 | /// using [`sub`][NonNull::sub] on it. See that method for documentation |
729 | /// and safety requirements. |
730 | /// |
731 | /// For non-`Sized` pointees this operation changes only the data pointer, |
732 | /// leaving the metadata untouched. |
733 | #[unstable (feature = "non_null_convenience" , issue = "117691" )] |
734 | #[rustc_const_unstable (feature = "non_null_convenience" , issue = "117691" )] |
735 | #[must_use ] |
736 | #[inline (always)] |
737 | #[rustc_allow_const_fn_unstable (set_ptr_value)] |
738 | #[cfg_attr (miri, track_caller)] // even without panics, this helps for Miri backtraces |
739 | pub const unsafe fn byte_sub(self, count: usize) -> Self { |
740 | // SAFETY: the caller must uphold the safety contract for `sub` and `byte_sub` has the same |
741 | // safety contract. |
742 | // Additionally safety contract of `sub` guarantees that the resulting pointer is pointing |
743 | // to an allocation, there can't be an allocation at null, thus it's safe to construct |
744 | // `NonNull`. |
745 | unsafe { NonNull { pointer: self.pointer.byte_sub(count) } } |
746 | } |
747 | |
748 | /// Calculates the distance between two pointers. The returned value is in |
749 | /// units of T: the distance in bytes divided by `mem::size_of::<T>()`. |
750 | /// |
751 | /// This is equivalent to `(self as isize - origin as isize) / (mem::size_of::<T>() as isize)`, |
752 | /// except that it has a lot more opportunities for UB, in exchange for the compiler |
753 | /// better understanding what you are doing. |
754 | /// |
755 | /// The primary motivation of this method is for computing the `len` of an array/slice |
756 | /// of `T` that you are currently representing as a "start" and "end" pointer |
757 | /// (and "end" is "one past the end" of the array). |
758 | /// In that case, `end.offset_from(start)` gets you the length of the array. |
759 | /// |
760 | /// All of the following safety requirements are trivially satisfied for this usecase. |
761 | /// |
762 | /// [`offset`]: #method.offset |
763 | /// |
764 | /// # Safety |
765 | /// |
766 | /// If any of the following conditions are violated, the result is Undefined |
767 | /// Behavior: |
768 | /// |
769 | /// * Both `self` and `origin` must be either in bounds or one |
770 | /// byte past the end of the same [allocated object]. |
771 | /// |
772 | /// * Both pointers must be *derived from* a pointer to the same object. |
773 | /// (See below for an example.) |
774 | /// |
775 | /// * The distance between the pointers, in bytes, must be an exact multiple |
776 | /// of the size of `T`. |
777 | /// |
778 | /// * The distance between the pointers, **in bytes**, cannot overflow an `isize`. |
779 | /// |
780 | /// * The distance being in bounds cannot rely on "wrapping around" the address space. |
781 | /// |
782 | /// Rust types are never larger than `isize::MAX` and Rust allocations never wrap around the |
783 | /// address space, so two pointers within some value of any Rust type `T` will always satisfy |
784 | /// the last two conditions. The standard library also generally ensures that allocations |
785 | /// never reach a size where an offset is a concern. For instance, `Vec` and `Box` ensure they |
786 | /// never allocate more than `isize::MAX` bytes, so `ptr_into_vec.offset_from(vec.as_ptr())` |
787 | /// always satisfies the last two conditions. |
788 | /// |
789 | /// Most platforms fundamentally can't even construct such a large allocation. |
790 | /// For instance, no known 64-bit platform can ever serve a request |
791 | /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space. |
792 | /// However, some 32-bit and 16-bit platforms may successfully serve a request for |
793 | /// more than `isize::MAX` bytes with things like Physical Address |
794 | /// Extension. As such, memory acquired directly from allocators or memory |
795 | /// mapped files *may* be too large to handle with this function. |
796 | /// (Note that [`offset`] and [`add`] also have a similar limitation and hence cannot be used on |
797 | /// such large allocations either.) |
798 | /// |
799 | /// The requirement for pointers to be derived from the same allocated object is primarily |
800 | /// needed for `const`-compatibility: the distance between pointers into *different* allocated |
801 | /// objects is not known at compile-time. However, the requirement also exists at |
802 | /// runtime and may be exploited by optimizations. If you wish to compute the difference between |
803 | /// pointers that are not guaranteed to be from the same allocation, use `(self as isize - |
804 | /// origin as isize) / mem::size_of::<T>()`. |
805 | // FIXME: recommend `addr()` instead of `as usize` once that is stable. |
806 | /// |
807 | /// [`add`]: #method.add |
808 | /// [allocated object]: crate::ptr#allocated-object |
809 | /// |
810 | /// # Panics |
811 | /// |
812 | /// This function panics if `T` is a Zero-Sized Type ("ZST"). |
813 | /// |
814 | /// # Examples |
815 | /// |
816 | /// Basic usage: |
817 | /// |
818 | /// ``` |
819 | /// #![feature(non_null_convenience)] |
820 | /// use std::ptr::NonNull; |
821 | /// |
822 | /// let a = [0; 5]; |
823 | /// let ptr1: NonNull<u32> = NonNull::from(&a[1]); |
824 | /// let ptr2: NonNull<u32> = NonNull::from(&a[3]); |
825 | /// unsafe { |
826 | /// assert_eq!(ptr2.offset_from(ptr1), 2); |
827 | /// assert_eq!(ptr1.offset_from(ptr2), -2); |
828 | /// assert_eq!(ptr1.offset(2), ptr2); |
829 | /// assert_eq!(ptr2.offset(-2), ptr1); |
830 | /// } |
831 | /// ``` |
832 | /// |
833 | /// *Incorrect* usage: |
834 | /// |
835 | /// ```rust,no_run |
836 | /// #![feature(non_null_convenience, strict_provenance)] |
837 | /// use std::ptr::NonNull; |
838 | /// |
839 | /// let ptr1 = NonNull::new(Box::into_raw(Box::new(0u8))).unwrap(); |
840 | /// let ptr2 = NonNull::new(Box::into_raw(Box::new(1u8))).unwrap(); |
841 | /// let diff = (ptr2.addr().get() as isize).wrapping_sub(ptr1.addr().get() as isize); |
842 | /// // Make ptr2_other an "alias" of ptr2, but derived from ptr1. |
843 | /// let ptr2_other = NonNull::new(ptr1.as_ptr().wrapping_byte_offset(diff)).unwrap(); |
844 | /// assert_eq!(ptr2.addr(), ptr2_other.addr()); |
845 | /// // Since ptr2_other and ptr2 are derived from pointers to different objects, |
846 | /// // computing their offset is undefined behavior, even though |
847 | /// // they point to the same address! |
848 | /// unsafe { |
849 | /// let zero = ptr2_other.offset_from(ptr2); // Undefined Behavior |
850 | /// } |
851 | /// ``` |
852 | #[unstable (feature = "non_null_convenience" , issue = "117691" )] |
853 | #[rustc_const_unstable (feature = "non_null_convenience" , issue = "117691" )] |
854 | #[inline ] |
855 | #[cfg_attr (miri, track_caller)] // even without panics, this helps for Miri backtraces |
856 | pub const unsafe fn offset_from(self, origin: NonNull<T>) -> isize |
857 | where |
858 | T: Sized, |
859 | { |
860 | // SAFETY: the caller must uphold the safety contract for `offset_from`. |
861 | unsafe { self.pointer.offset_from(origin.pointer) } |
862 | } |
863 | |
864 | /// Calculates the distance between two pointers. The returned value is in |
865 | /// units of **bytes**. |
866 | /// |
867 | /// This is purely a convenience for casting to a `u8` pointer and |
868 | /// using [`offset_from`][NonNull::offset_from] on it. See that method for |
869 | /// documentation and safety requirements. |
870 | /// |
871 | /// For non-`Sized` pointees this operation considers only the data pointers, |
872 | /// ignoring the metadata. |
873 | #[unstable (feature = "non_null_convenience" , issue = "117691" )] |
874 | #[rustc_const_unstable (feature = "non_null_convenience" , issue = "117691" )] |
875 | #[inline (always)] |
876 | #[cfg_attr (miri, track_caller)] // even without panics, this helps for Miri backtraces |
877 | pub const unsafe fn byte_offset_from<U: ?Sized>(self, origin: NonNull<U>) -> isize { |
878 | // SAFETY: the caller must uphold the safety contract for `byte_offset_from`. |
879 | unsafe { self.pointer.byte_offset_from(origin.pointer) } |
880 | } |
881 | |
882 | // N.B. `wrapping_offset``, `wrapping_add`, etc are not implemented because they can wrap to null |
883 | |
884 | /// Calculates the distance between two pointers, *where it's known that |
885 | /// `self` is equal to or greater than `origin`*. The returned value is in |
886 | /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`. |
887 | /// |
888 | /// This computes the same value that [`offset_from`](#method.offset_from) |
889 | /// would compute, but with the added precondition that the offset is |
890 | /// guaranteed to be non-negative. This method is equivalent to |
891 | /// `usize::try_from(self.offset_from(origin)).unwrap_unchecked()`, |
892 | /// but it provides slightly more information to the optimizer, which can |
893 | /// sometimes allow it to optimize slightly better with some backends. |
894 | /// |
895 | /// This method can be though of as recovering the `count` that was passed |
896 | /// to [`add`](#method.add) (or, with the parameters in the other order, |
897 | /// to [`sub`](#method.sub)). The following are all equivalent, assuming |
898 | /// that their safety preconditions are met: |
899 | /// ```rust |
900 | /// # #![feature (non_null_convenience)] |
901 | /// # unsafe fn blah(ptr: std::ptr::NonNull<u32>, origin: std::ptr::NonNull<u32>, count: usize) -> bool { |
902 | /// ptr.sub_ptr(origin) == count |
903 | /// # && |
904 | /// origin.add(count) == ptr |
905 | /// # && |
906 | /// ptr.sub(count) == origin |
907 | /// # } |
908 | /// ``` |
909 | /// |
910 | /// # Safety |
911 | /// |
912 | /// - The distance between the pointers must be non-negative (`self >= origin`) |
913 | /// |
914 | /// - *All* the safety conditions of [`offset_from`](#method.offset_from) |
915 | /// apply to this method as well; see it for the full details. |
916 | /// |
917 | /// Importantly, despite the return type of this method being able to represent |
918 | /// a larger offset, it's still *not permitted* to pass pointers which differ |
919 | /// by more than `isize::MAX` *bytes*. As such, the result of this method will |
920 | /// always be less than or equal to `isize::MAX as usize`. |
921 | /// |
922 | /// # Panics |
923 | /// |
924 | /// This function panics if `T` is a Zero-Sized Type ("ZST"). |
925 | /// |
926 | /// # Examples |
927 | /// |
928 | /// ``` |
929 | /// #![feature(non_null_convenience)] |
930 | /// use std::ptr::NonNull; |
931 | /// |
932 | /// let a = [0; 5]; |
933 | /// let ptr1: NonNull<u32> = NonNull::from(&a[1]); |
934 | /// let ptr2: NonNull<u32> = NonNull::from(&a[3]); |
935 | /// unsafe { |
936 | /// assert_eq!(ptr2.sub_ptr(ptr1), 2); |
937 | /// assert_eq!(ptr1.add(2), ptr2); |
938 | /// assert_eq!(ptr2.sub(2), ptr1); |
939 | /// assert_eq!(ptr2.sub_ptr(ptr2), 0); |
940 | /// } |
941 | /// |
942 | /// // This would be incorrect, as the pointers are not correctly ordered: |
943 | /// // ptr1.sub_ptr(ptr2) |
944 | /// ``` |
945 | #[unstable (feature = "non_null_convenience" , issue = "117691" )] |
946 | #[rustc_const_unstable (feature = "non_null_convenience" , issue = "117691" )] |
947 | // #[unstable(feature = "ptr_sub_ptr", issue = "95892")] |
948 | // #[rustc_const_unstable(feature = "const_ptr_sub_ptr", issue = "95892")] |
949 | #[inline ] |
950 | #[cfg_attr (miri, track_caller)] // even without panics, this helps for Miri backtraces |
951 | pub const unsafe fn sub_ptr(self, subtracted: NonNull<T>) -> usize |
952 | where |
953 | T: Sized, |
954 | { |
955 | // SAFETY: the caller must uphold the safety contract for `sub_ptr`. |
956 | unsafe { self.pointer.sub_ptr(subtracted.pointer) } |
957 | } |
958 | |
959 | /// Reads the value from `self` without moving it. This leaves the |
960 | /// memory in `self` unchanged. |
961 | /// |
962 | /// See [`ptr::read`] for safety concerns and examples. |
963 | /// |
964 | /// [`ptr::read`]: crate::ptr::read() |
965 | #[unstable (feature = "non_null_convenience" , issue = "117691" )] |
966 | #[rustc_const_unstable (feature = "non_null_convenience" , issue = "117691" )] |
967 | #[inline ] |
968 | #[cfg_attr (miri, track_caller)] // even without panics, this helps for Miri backtraces |
969 | pub const unsafe fn read(self) -> T |
970 | where |
971 | T: Sized, |
972 | { |
973 | // SAFETY: the caller must uphold the safety contract for `read`. |
974 | unsafe { ptr::read(self.pointer) } |
975 | } |
976 | |
977 | /// Performs a volatile read of the value from `self` without moving it. This |
978 | /// leaves the memory in `self` unchanged. |
979 | /// |
980 | /// Volatile operations are intended to act on I/O memory, and are guaranteed |
981 | /// to not be elided or reordered by the compiler across other volatile |
982 | /// operations. |
983 | /// |
984 | /// See [`ptr::read_volatile`] for safety concerns and examples. |
985 | /// |
986 | /// [`ptr::read_volatile`]: crate::ptr::read_volatile() |
987 | #[unstable (feature = "non_null_convenience" , issue = "117691" )] |
988 | #[inline ] |
989 | #[cfg_attr (miri, track_caller)] // even without panics, this helps for Miri backtraces |
990 | pub unsafe fn read_volatile(self) -> T |
991 | where |
992 | T: Sized, |
993 | { |
994 | // SAFETY: the caller must uphold the safety contract for `read_volatile`. |
995 | unsafe { ptr::read_volatile(self.pointer) } |
996 | } |
997 | |
998 | /// Reads the value from `self` without moving it. This leaves the |
999 | /// memory in `self` unchanged. |
1000 | /// |
1001 | /// Unlike `read`, the pointer may be unaligned. |
1002 | /// |
1003 | /// See [`ptr::read_unaligned`] for safety concerns and examples. |
1004 | /// |
1005 | /// [`ptr::read_unaligned`]: crate::ptr::read_unaligned() |
1006 | #[unstable (feature = "non_null_convenience" , issue = "117691" )] |
1007 | #[rustc_const_unstable (feature = "non_null_convenience" , issue = "117691" )] |
1008 | #[inline ] |
1009 | #[cfg_attr (miri, track_caller)] // even without panics, this helps for Miri backtraces |
1010 | pub const unsafe fn read_unaligned(self) -> T |
1011 | where |
1012 | T: Sized, |
1013 | { |
1014 | // SAFETY: the caller must uphold the safety contract for `read_unaligned`. |
1015 | unsafe { ptr::read_unaligned(self.pointer) } |
1016 | } |
1017 | |
1018 | /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source |
1019 | /// and destination may overlap. |
1020 | /// |
1021 | /// NOTE: this has the *same* argument order as [`ptr::copy`]. |
1022 | /// |
1023 | /// See [`ptr::copy`] for safety concerns and examples. |
1024 | /// |
1025 | /// [`ptr::copy`]: crate::ptr::copy() |
1026 | #[unstable (feature = "non_null_convenience" , issue = "117691" )] |
1027 | #[rustc_const_unstable (feature = "non_null_convenience" , issue = "117691" )] |
1028 | #[inline (always)] |
1029 | #[cfg_attr (miri, track_caller)] // even without panics, this helps for Miri backtraces |
1030 | pub const unsafe fn copy_to(self, dest: NonNull<T>, count: usize) |
1031 | where |
1032 | T: Sized, |
1033 | { |
1034 | // SAFETY: the caller must uphold the safety contract for `copy`. |
1035 | unsafe { ptr::copy(self.pointer, dest.as_ptr(), count) } |
1036 | } |
1037 | |
1038 | /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source |
1039 | /// and destination may *not* overlap. |
1040 | /// |
1041 | /// NOTE: this has the *same* argument order as [`ptr::copy_nonoverlapping`]. |
1042 | /// |
1043 | /// See [`ptr::copy_nonoverlapping`] for safety concerns and examples. |
1044 | /// |
1045 | /// [`ptr::copy_nonoverlapping`]: crate::ptr::copy_nonoverlapping() |
1046 | #[unstable (feature = "non_null_convenience" , issue = "117691" )] |
1047 | #[rustc_const_unstable (feature = "non_null_convenience" , issue = "117691" )] |
1048 | #[inline (always)] |
1049 | #[cfg_attr (miri, track_caller)] // even without panics, this helps for Miri backtraces |
1050 | pub const unsafe fn copy_to_nonoverlapping(self, dest: NonNull<T>, count: usize) |
1051 | where |
1052 | T: Sized, |
1053 | { |
1054 | // SAFETY: the caller must uphold the safety contract for `copy_nonoverlapping`. |
1055 | unsafe { ptr::copy_nonoverlapping(self.pointer, dest.as_ptr(), count) } |
1056 | } |
1057 | |
1058 | /// Copies `count * size_of<T>` bytes from `src` to `self`. The source |
1059 | /// and destination may overlap. |
1060 | /// |
1061 | /// NOTE: this has the *opposite* argument order of [`ptr::copy`]. |
1062 | /// |
1063 | /// See [`ptr::copy`] for safety concerns and examples. |
1064 | /// |
1065 | /// [`ptr::copy`]: crate::ptr::copy() |
1066 | #[unstable (feature = "non_null_convenience" , issue = "117691" )] |
1067 | #[rustc_const_unstable (feature = "non_null_convenience" , issue = "117691" )] |
1068 | #[inline (always)] |
1069 | #[cfg_attr (miri, track_caller)] // even without panics, this helps for Miri backtraces |
1070 | pub const unsafe fn copy_from(self, src: NonNull<T>, count: usize) |
1071 | where |
1072 | T: Sized, |
1073 | { |
1074 | // SAFETY: the caller must uphold the safety contract for `copy`. |
1075 | unsafe { ptr::copy(src.pointer, self.as_ptr(), count) } |
1076 | } |
1077 | |
1078 | /// Copies `count * size_of<T>` bytes from `src` to `self`. The source |
1079 | /// and destination may *not* overlap. |
1080 | /// |
1081 | /// NOTE: this has the *opposite* argument order of [`ptr::copy_nonoverlapping`]. |
1082 | /// |
1083 | /// See [`ptr::copy_nonoverlapping`] for safety concerns and examples. |
1084 | /// |
1085 | /// [`ptr::copy_nonoverlapping`]: crate::ptr::copy_nonoverlapping() |
1086 | #[unstable (feature = "non_null_convenience" , issue = "117691" )] |
1087 | #[rustc_const_unstable (feature = "non_null_convenience" , issue = "117691" )] |
1088 | #[inline (always)] |
1089 | #[cfg_attr (miri, track_caller)] // even without panics, this helps for Miri backtraces |
1090 | pub const unsafe fn copy_from_nonoverlapping(self, src: NonNull<T>, count: usize) |
1091 | where |
1092 | T: Sized, |
1093 | { |
1094 | // SAFETY: the caller must uphold the safety contract for `copy_nonoverlapping`. |
1095 | unsafe { ptr::copy_nonoverlapping(src.pointer, self.as_ptr(), count) } |
1096 | } |
1097 | |
1098 | /// Executes the destructor (if any) of the pointed-to value. |
1099 | /// |
1100 | /// See [`ptr::drop_in_place`] for safety concerns and examples. |
1101 | /// |
1102 | /// [`ptr::drop_in_place`]: crate::ptr::drop_in_place() |
1103 | #[unstable (feature = "non_null_convenience" , issue = "117691" )] |
1104 | #[inline (always)] |
1105 | pub unsafe fn drop_in_place(self) { |
1106 | // SAFETY: the caller must uphold the safety contract for `drop_in_place`. |
1107 | unsafe { ptr::drop_in_place(self.as_ptr()) } |
1108 | } |
1109 | |
1110 | /// Overwrites a memory location with the given value without reading or |
1111 | /// dropping the old value. |
1112 | /// |
1113 | /// See [`ptr::write`] for safety concerns and examples. |
1114 | /// |
1115 | /// [`ptr::write`]: crate::ptr::write() |
1116 | #[unstable (feature = "non_null_convenience" , issue = "117691" )] |
1117 | #[rustc_const_unstable (feature = "non_null_convenience" , issue = "117691" )] |
1118 | //#[rustc_const_unstable(feature = "const_ptr_write", issue = "86302")] |
1119 | #[inline (always)] |
1120 | #[cfg_attr (miri, track_caller)] // even without panics, this helps for Miri backtraces |
1121 | pub const unsafe fn write(self, val: T) |
1122 | where |
1123 | T: Sized, |
1124 | { |
1125 | // SAFETY: the caller must uphold the safety contract for `write`. |
1126 | unsafe { ptr::write(self.as_ptr(), val) } |
1127 | } |
1128 | |
1129 | /// Invokes memset on the specified pointer, setting `count * size_of::<T>()` |
1130 | /// bytes of memory starting at `self` to `val`. |
1131 | /// |
1132 | /// See [`ptr::write_bytes`] for safety concerns and examples. |
1133 | /// |
1134 | /// [`ptr::write_bytes`]: crate::ptr::write_bytes() |
1135 | #[doc (alias = "memset" )] |
1136 | #[unstable (feature = "non_null_convenience" , issue = "117691" )] |
1137 | #[rustc_const_unstable (feature = "non_null_convenience" , issue = "117691" )] |
1138 | //#[rustc_const_unstable(feature = "const_ptr_write", issue = "86302")] |
1139 | #[inline (always)] |
1140 | #[cfg_attr (miri, track_caller)] // even without panics, this helps for Miri backtraces |
1141 | pub const unsafe fn write_bytes(self, val: u8, count: usize) |
1142 | where |
1143 | T: Sized, |
1144 | { |
1145 | // SAFETY: the caller must uphold the safety contract for `write_bytes`. |
1146 | unsafe { ptr::write_bytes(self.as_ptr(), val, count) } |
1147 | } |
1148 | |
1149 | /// Performs a volatile write of a memory location with the given value without |
1150 | /// reading or dropping the old value. |
1151 | /// |
1152 | /// Volatile operations are intended to act on I/O memory, and are guaranteed |
1153 | /// to not be elided or reordered by the compiler across other volatile |
1154 | /// operations. |
1155 | /// |
1156 | /// See [`ptr::write_volatile`] for safety concerns and examples. |
1157 | /// |
1158 | /// [`ptr::write_volatile`]: crate::ptr::write_volatile() |
1159 | #[unstable (feature = "non_null_convenience" , issue = "117691" )] |
1160 | #[inline (always)] |
1161 | #[cfg_attr (miri, track_caller)] // even without panics, this helps for Miri backtraces |
1162 | pub unsafe fn write_volatile(self, val: T) |
1163 | where |
1164 | T: Sized, |
1165 | { |
1166 | // SAFETY: the caller must uphold the safety contract for `write_volatile`. |
1167 | unsafe { ptr::write_volatile(self.as_ptr(), val) } |
1168 | } |
1169 | |
1170 | /// Overwrites a memory location with the given value without reading or |
1171 | /// dropping the old value. |
1172 | /// |
1173 | /// Unlike `write`, the pointer may be unaligned. |
1174 | /// |
1175 | /// See [`ptr::write_unaligned`] for safety concerns and examples. |
1176 | /// |
1177 | /// [`ptr::write_unaligned`]: crate::ptr::write_unaligned() |
1178 | #[unstable (feature = "non_null_convenience" , issue = "117691" )] |
1179 | #[rustc_const_unstable (feature = "non_null_convenience" , issue = "117691" )] |
1180 | //#[rustc_const_unstable(feature = "const_ptr_write", issue = "86302")] |
1181 | #[inline (always)] |
1182 | #[cfg_attr (miri, track_caller)] // even without panics, this helps for Miri backtraces |
1183 | pub const unsafe fn write_unaligned(self, val: T) |
1184 | where |
1185 | T: Sized, |
1186 | { |
1187 | // SAFETY: the caller must uphold the safety contract for `write_unaligned`. |
1188 | unsafe { ptr::write_unaligned(self.as_ptr(), val) } |
1189 | } |
1190 | |
1191 | /// Replaces the value at `self` with `src`, returning the old |
1192 | /// value, without dropping either. |
1193 | /// |
1194 | /// See [`ptr::replace`] for safety concerns and examples. |
1195 | /// |
1196 | /// [`ptr::replace`]: crate::ptr::replace() |
1197 | #[unstable (feature = "non_null_convenience" , issue = "117691" )] |
1198 | #[inline (always)] |
1199 | pub unsafe fn replace(self, src: T) -> T |
1200 | where |
1201 | T: Sized, |
1202 | { |
1203 | // SAFETY: the caller must uphold the safety contract for `replace`. |
1204 | unsafe { ptr::replace(self.as_ptr(), src) } |
1205 | } |
1206 | |
1207 | /// Swaps the values at two mutable locations of the same type, without |
1208 | /// deinitializing either. They may overlap, unlike `mem::swap` which is |
1209 | /// otherwise equivalent. |
1210 | /// |
1211 | /// See [`ptr::swap`] for safety concerns and examples. |
1212 | /// |
1213 | /// [`ptr::swap`]: crate::ptr::swap() |
1214 | #[unstable (feature = "non_null_convenience" , issue = "117691" )] |
1215 | #[rustc_const_unstable (feature = "non_null_convenience" , issue = "117691" )] |
1216 | //#[rustc_const_unstable(feature = "const_swap", issue = "83163")] |
1217 | #[inline (always)] |
1218 | pub const unsafe fn swap(self, with: NonNull<T>) |
1219 | where |
1220 | T: Sized, |
1221 | { |
1222 | // SAFETY: the caller must uphold the safety contract for `swap`. |
1223 | unsafe { ptr::swap(self.as_ptr(), with.as_ptr()) } |
1224 | } |
1225 | |
1226 | /// Computes the offset that needs to be applied to the pointer in order to make it aligned to |
1227 | /// `align`. |
1228 | /// |
1229 | /// If it is not possible to align the pointer, the implementation returns |
1230 | /// `usize::MAX`. It is permissible for the implementation to *always* |
1231 | /// return `usize::MAX`. Only your algorithm's performance can depend |
1232 | /// on getting a usable offset here, not its correctness. |
1233 | /// |
1234 | /// The offset is expressed in number of `T` elements, and not bytes. |
1235 | /// |
1236 | /// There are no guarantees whatsoever that offsetting the pointer will not overflow or go |
1237 | /// beyond the allocation that the pointer points into. It is up to the caller to ensure that |
1238 | /// the returned offset is correct in all terms other than alignment. |
1239 | /// |
1240 | /// # Panics |
1241 | /// |
1242 | /// The function panics if `align` is not a power-of-two. |
1243 | /// |
1244 | /// # Examples |
1245 | /// |
1246 | /// Accessing adjacent `u8` as `u16` |
1247 | /// |
1248 | /// ``` |
1249 | /// #![feature(non_null_convenience)] |
1250 | /// use std::mem::align_of; |
1251 | /// use std::ptr::NonNull; |
1252 | /// |
1253 | /// # unsafe { |
1254 | /// let x = [5_u8, 6, 7, 8, 9]; |
1255 | /// let ptr = NonNull::new(x.as_ptr() as *mut u8).unwrap(); |
1256 | /// let offset = ptr.align_offset(align_of::<u16>()); |
1257 | /// |
1258 | /// if offset < x.len() - 1 { |
1259 | /// let u16_ptr = ptr.add(offset).cast::<u16>(); |
1260 | /// assert!(u16_ptr.read() == u16::from_ne_bytes([5, 6]) || u16_ptr.read() == u16::from_ne_bytes([6, 7])); |
1261 | /// } else { |
1262 | /// // while the pointer can be aligned via `offset`, it would point |
1263 | /// // outside the allocation |
1264 | /// } |
1265 | /// # } |
1266 | /// ``` |
1267 | #[unstable (feature = "non_null_convenience" , issue = "117691" )] |
1268 | #[rustc_const_unstable (feature = "non_null_convenience" , issue = "117691" )] |
1269 | //#[rustc_const_unstable(feature = "const_align_offset", issue = "90962")] |
1270 | #[must_use ] |
1271 | #[inline ] |
1272 | pub const fn align_offset(self, align: usize) -> usize |
1273 | where |
1274 | T: Sized, |
1275 | { |
1276 | if !align.is_power_of_two() { |
1277 | panic!("align_offset: align is not a power-of-two" ); |
1278 | } |
1279 | |
1280 | { |
1281 | // SAFETY: `align` has been checked to be a power of 2 above. |
1282 | unsafe { ptr::align_offset(self.pointer, align) } |
1283 | } |
1284 | } |
1285 | |
1286 | /// Returns whether the pointer is properly aligned for `T`. |
1287 | /// |
1288 | /// # Examples |
1289 | /// |
1290 | /// ``` |
1291 | /// use std::ptr::NonNull; |
1292 | /// |
1293 | /// // On some platforms, the alignment of i32 is less than 4. |
1294 | /// #[repr(align(4))] |
1295 | /// struct AlignedI32(i32); |
1296 | /// |
1297 | /// let data = AlignedI32(42); |
1298 | /// let ptr = NonNull::<AlignedI32>::from(&data); |
1299 | /// |
1300 | /// assert!(ptr.is_aligned()); |
1301 | /// assert!(!NonNull::new(ptr.as_ptr().wrapping_byte_add(1)).unwrap().is_aligned()); |
1302 | /// ``` |
1303 | /// |
1304 | /// # At compiletime |
1305 | /// **Note: Alignment at compiletime is experimental and subject to change. See the |
1306 | /// [tracking issue] for details.** |
1307 | /// |
1308 | /// At compiletime, the compiler may not know where a value will end up in memory. |
1309 | /// Calling this function on a pointer created from a reference at compiletime will only |
1310 | /// return `true` if the pointer is guaranteed to be aligned. This means that the pointer |
1311 | /// is never aligned if cast to a type with a stricter alignment than the reference's |
1312 | /// underlying allocation. |
1313 | /// |
1314 | /// ``` |
1315 | /// #![feature(const_pointer_is_aligned)] |
1316 | /// #![feature(non_null_convenience)] |
1317 | /// #![feature(const_option)] |
1318 | /// #![feature(const_nonnull_new)] |
1319 | /// use std::ptr::NonNull; |
1320 | /// |
1321 | /// // On some platforms, the alignment of primitives is less than their size. |
1322 | /// #[repr(align(4))] |
1323 | /// struct AlignedI32(i32); |
1324 | /// #[repr(align(8))] |
1325 | /// struct AlignedI64(i64); |
1326 | /// |
1327 | /// const _: () = { |
1328 | /// let data = [AlignedI32(42), AlignedI32(42)]; |
1329 | /// let ptr = NonNull::<AlignedI32>::new(&data[0] as *const _ as *mut _).unwrap(); |
1330 | /// assert!(ptr.is_aligned()); |
1331 | /// |
1332 | /// // At runtime either `ptr1` or `ptr2` would be aligned, but at compiletime neither is aligned. |
1333 | /// let ptr1 = ptr.cast::<AlignedI64>(); |
1334 | /// let ptr2 = unsafe { ptr.add(1).cast::<AlignedI64>() }; |
1335 | /// assert!(!ptr1.is_aligned()); |
1336 | /// assert!(!ptr2.is_aligned()); |
1337 | /// }; |
1338 | /// ``` |
1339 | /// |
1340 | /// Due to this behavior, it is possible that a runtime pointer derived from a compiletime |
1341 | /// pointer is aligned, even if the compiletime pointer wasn't aligned. |
1342 | /// |
1343 | /// ``` |
1344 | /// #![feature(const_pointer_is_aligned)] |
1345 | /// |
1346 | /// // On some platforms, the alignment of primitives is less than their size. |
1347 | /// #[repr(align(4))] |
1348 | /// struct AlignedI32(i32); |
1349 | /// #[repr(align(8))] |
1350 | /// struct AlignedI64(i64); |
1351 | /// |
1352 | /// // At compiletime, neither `COMPTIME_PTR` nor `COMPTIME_PTR + 1` is aligned. |
1353 | /// const COMPTIME_PTR: *const AlignedI32 = &AlignedI32(42); |
1354 | /// const _: () = assert!(!COMPTIME_PTR.cast::<AlignedI64>().is_aligned()); |
1355 | /// const _: () = assert!(!COMPTIME_PTR.wrapping_add(1).cast::<AlignedI64>().is_aligned()); |
1356 | /// |
1357 | /// // At runtime, either `runtime_ptr` or `runtime_ptr + 1` is aligned. |
1358 | /// let runtime_ptr = COMPTIME_PTR; |
1359 | /// assert_ne!( |
1360 | /// runtime_ptr.cast::<AlignedI64>().is_aligned(), |
1361 | /// runtime_ptr.wrapping_add(1).cast::<AlignedI64>().is_aligned(), |
1362 | /// ); |
1363 | /// ``` |
1364 | /// |
1365 | /// If a pointer is created from a fixed address, this function behaves the same during |
1366 | /// runtime and compiletime. |
1367 | /// |
1368 | /// ``` |
1369 | /// #![feature(const_pointer_is_aligned)] |
1370 | /// #![feature(const_option)] |
1371 | /// #![feature(const_nonnull_new)] |
1372 | /// use std::ptr::NonNull; |
1373 | /// |
1374 | /// // On some platforms, the alignment of primitives is less than their size. |
1375 | /// #[repr(align(4))] |
1376 | /// struct AlignedI32(i32); |
1377 | /// #[repr(align(8))] |
1378 | /// struct AlignedI64(i64); |
1379 | /// |
1380 | /// const _: () = { |
1381 | /// let ptr = NonNull::new(40 as *mut AlignedI32).unwrap(); |
1382 | /// assert!(ptr.is_aligned()); |
1383 | /// |
1384 | /// // For pointers with a known address, runtime and compiletime behavior are identical. |
1385 | /// let ptr1 = ptr.cast::<AlignedI64>(); |
1386 | /// let ptr2 = NonNull::new(ptr.as_ptr().wrapping_add(1)).unwrap().cast::<AlignedI64>(); |
1387 | /// assert!(ptr1.is_aligned()); |
1388 | /// assert!(!ptr2.is_aligned()); |
1389 | /// }; |
1390 | /// ``` |
1391 | /// |
1392 | /// [tracking issue]: https://github.com/rust-lang/rust/issues/104203 |
1393 | #[stable (feature = "pointer_is_aligned" , since = "CURRENT_RUSTC_VERSION" )] |
1394 | #[rustc_const_unstable (feature = "const_pointer_is_aligned" , issue = "104203" )] |
1395 | #[must_use ] |
1396 | #[inline ] |
1397 | pub const fn is_aligned(self) -> bool |
1398 | where |
1399 | T: Sized, |
1400 | { |
1401 | self.pointer.is_aligned() |
1402 | } |
1403 | |
1404 | /// Returns whether the pointer is aligned to `align`. |
1405 | /// |
1406 | /// For non-`Sized` pointees this operation considers only the data pointer, |
1407 | /// ignoring the metadata. |
1408 | /// |
1409 | /// # Panics |
1410 | /// |
1411 | /// The function panics if `align` is not a power-of-two (this includes 0). |
1412 | /// |
1413 | /// # Examples |
1414 | /// |
1415 | /// ``` |
1416 | /// #![feature(pointer_is_aligned_to)] |
1417 | /// |
1418 | /// // On some platforms, the alignment of i32 is less than 4. |
1419 | /// #[repr(align(4))] |
1420 | /// struct AlignedI32(i32); |
1421 | /// |
1422 | /// let data = AlignedI32(42); |
1423 | /// let ptr = &data as *const AlignedI32; |
1424 | /// |
1425 | /// assert!(ptr.is_aligned_to(1)); |
1426 | /// assert!(ptr.is_aligned_to(2)); |
1427 | /// assert!(ptr.is_aligned_to(4)); |
1428 | /// |
1429 | /// assert!(ptr.wrapping_byte_add(2).is_aligned_to(2)); |
1430 | /// assert!(!ptr.wrapping_byte_add(2).is_aligned_to(4)); |
1431 | /// |
1432 | /// assert_ne!(ptr.is_aligned_to(8), ptr.wrapping_add(1).is_aligned_to(8)); |
1433 | /// ``` |
1434 | /// |
1435 | /// # At compiletime |
1436 | /// **Note: Alignment at compiletime is experimental and subject to change. See the |
1437 | /// [tracking issue] for details.** |
1438 | /// |
1439 | /// At compiletime, the compiler may not know where a value will end up in memory. |
1440 | /// Calling this function on a pointer created from a reference at compiletime will only |
1441 | /// return `true` if the pointer is guaranteed to be aligned. This means that the pointer |
1442 | /// cannot be stricter aligned than the reference's underlying allocation. |
1443 | /// |
1444 | /// ``` |
1445 | /// #![feature(pointer_is_aligned_to)] |
1446 | /// #![feature(const_pointer_is_aligned)] |
1447 | /// |
1448 | /// // On some platforms, the alignment of i32 is less than 4. |
1449 | /// #[repr(align(4))] |
1450 | /// struct AlignedI32(i32); |
1451 | /// |
1452 | /// const _: () = { |
1453 | /// let data = AlignedI32(42); |
1454 | /// let ptr = &data as *const AlignedI32; |
1455 | /// |
1456 | /// assert!(ptr.is_aligned_to(1)); |
1457 | /// assert!(ptr.is_aligned_to(2)); |
1458 | /// assert!(ptr.is_aligned_to(4)); |
1459 | /// |
1460 | /// // At compiletime, we know for sure that the pointer isn't aligned to 8. |
1461 | /// assert!(!ptr.is_aligned_to(8)); |
1462 | /// assert!(!ptr.wrapping_add(1).is_aligned_to(8)); |
1463 | /// }; |
1464 | /// ``` |
1465 | /// |
1466 | /// Due to this behavior, it is possible that a runtime pointer derived from a compiletime |
1467 | /// pointer is aligned, even if the compiletime pointer wasn't aligned. |
1468 | /// |
1469 | /// ``` |
1470 | /// #![feature(pointer_is_aligned_to)] |
1471 | /// #![feature(const_pointer_is_aligned)] |
1472 | /// |
1473 | /// // On some platforms, the alignment of i32 is less than 4. |
1474 | /// #[repr(align(4))] |
1475 | /// struct AlignedI32(i32); |
1476 | /// |
1477 | /// // At compiletime, neither `COMPTIME_PTR` nor `COMPTIME_PTR + 1` is aligned. |
1478 | /// const COMPTIME_PTR: *const AlignedI32 = &AlignedI32(42); |
1479 | /// const _: () = assert!(!COMPTIME_PTR.is_aligned_to(8)); |
1480 | /// const _: () = assert!(!COMPTIME_PTR.wrapping_add(1).is_aligned_to(8)); |
1481 | /// |
1482 | /// // At runtime, either `runtime_ptr` or `runtime_ptr + 1` is aligned. |
1483 | /// let runtime_ptr = COMPTIME_PTR; |
1484 | /// assert_ne!( |
1485 | /// runtime_ptr.is_aligned_to(8), |
1486 | /// runtime_ptr.wrapping_add(1).is_aligned_to(8), |
1487 | /// ); |
1488 | /// ``` |
1489 | /// |
1490 | /// If a pointer is created from a fixed address, this function behaves the same during |
1491 | /// runtime and compiletime. |
1492 | /// |
1493 | /// ``` |
1494 | /// #![feature(pointer_is_aligned_to)] |
1495 | /// #![feature(const_pointer_is_aligned)] |
1496 | /// |
1497 | /// const _: () = { |
1498 | /// let ptr = 40 as *const u8; |
1499 | /// assert!(ptr.is_aligned_to(1)); |
1500 | /// assert!(ptr.is_aligned_to(2)); |
1501 | /// assert!(ptr.is_aligned_to(4)); |
1502 | /// assert!(ptr.is_aligned_to(8)); |
1503 | /// assert!(!ptr.is_aligned_to(16)); |
1504 | /// }; |
1505 | /// ``` |
1506 | /// |
1507 | /// [tracking issue]: https://github.com/rust-lang/rust/issues/104203 |
1508 | #[unstable (feature = "pointer_is_aligned_to" , issue = "96284" )] |
1509 | #[rustc_const_unstable (feature = "const_pointer_is_aligned" , issue = "104203" )] |
1510 | #[must_use ] |
1511 | #[inline ] |
1512 | pub const fn is_aligned_to(self, align: usize) -> bool { |
1513 | self.pointer.is_aligned_to(align) |
1514 | } |
1515 | } |
1516 | |
1517 | impl<T> NonNull<[T]> { |
1518 | /// Creates a non-null raw slice from a thin pointer and a length. |
1519 | /// |
1520 | /// The `len` argument is the number of **elements**, not the number of bytes. |
1521 | /// |
1522 | /// This function is safe, but dereferencing the return value is unsafe. |
1523 | /// See the documentation of [`slice::from_raw_parts`] for slice safety requirements. |
1524 | /// |
1525 | /// # Examples |
1526 | /// |
1527 | /// ```rust |
1528 | /// use std::ptr::NonNull; |
1529 | /// |
1530 | /// // create a slice pointer when starting out with a pointer to the first element |
1531 | /// let mut x = [5, 6, 7]; |
1532 | /// let nonnull_pointer = NonNull::new(x.as_mut_ptr()).unwrap(); |
1533 | /// let slice = NonNull::slice_from_raw_parts(nonnull_pointer, 3); |
1534 | /// assert_eq!(unsafe { slice.as_ref()[2] }, 7); |
1535 | /// ``` |
1536 | /// |
1537 | /// (Note that this example artificially demonstrates a use of this method, |
1538 | /// but `let slice = NonNull::from(&x[..]);` would be a better way to write code like this.) |
1539 | #[stable (feature = "nonnull_slice_from_raw_parts" , since = "1.70.0" )] |
1540 | #[rustc_const_unstable (feature = "const_slice_from_raw_parts_mut" , issue = "67456" )] |
1541 | #[must_use ] |
1542 | #[inline ] |
1543 | pub const fn slice_from_raw_parts(data: NonNull<T>, len: usize) -> Self { |
1544 | // SAFETY: `data` is a `NonNull` pointer which is necessarily non-null |
1545 | unsafe { Self::new_unchecked(super::slice_from_raw_parts_mut(data.as_ptr(), len)) } |
1546 | } |
1547 | |
1548 | /// Returns the length of a non-null raw slice. |
1549 | /// |
1550 | /// The returned value is the number of **elements**, not the number of bytes. |
1551 | /// |
1552 | /// This function is safe, even when the non-null raw slice cannot be dereferenced to a slice |
1553 | /// because the pointer does not have a valid address. |
1554 | /// |
1555 | /// # Examples |
1556 | /// |
1557 | /// ```rust |
1558 | /// use std::ptr::NonNull; |
1559 | /// |
1560 | /// let slice: NonNull<[i8]> = NonNull::slice_from_raw_parts(NonNull::dangling(), 3); |
1561 | /// assert_eq!(slice.len(), 3); |
1562 | /// ``` |
1563 | #[stable (feature = "slice_ptr_len_nonnull" , since = "1.63.0" )] |
1564 | #[rustc_const_stable (feature = "const_slice_ptr_len_nonnull" , since = "1.63.0" )] |
1565 | #[must_use ] |
1566 | #[inline ] |
1567 | pub const fn len(self) -> usize { |
1568 | self.as_ptr().len() |
1569 | } |
1570 | |
1571 | /// Returns `true` if the non-null raw slice has a length of 0. |
1572 | /// |
1573 | /// # Examples |
1574 | /// |
1575 | /// ```rust |
1576 | /// use std::ptr::NonNull; |
1577 | /// |
1578 | /// let slice: NonNull<[i8]> = NonNull::slice_from_raw_parts(NonNull::dangling(), 3); |
1579 | /// assert!(!slice.is_empty()); |
1580 | /// ``` |
1581 | #[stable (feature = "slice_ptr_is_empty_nonnull" , since = "CURRENT_RUSTC_VERSION" )] |
1582 | #[rustc_const_stable ( |
1583 | feature = "const_slice_ptr_is_empty_nonnull" , |
1584 | since = "CURRENT_RUSTC_VERSION" |
1585 | )] |
1586 | #[must_use ] |
1587 | #[inline ] |
1588 | pub const fn is_empty(self) -> bool { |
1589 | self.len() == 0 |
1590 | } |
1591 | |
1592 | /// Returns a non-null pointer to the slice's buffer. |
1593 | /// |
1594 | /// # Examples |
1595 | /// |
1596 | /// ```rust |
1597 | /// #![feature(slice_ptr_get)] |
1598 | /// use std::ptr::NonNull; |
1599 | /// |
1600 | /// let slice: NonNull<[i8]> = NonNull::slice_from_raw_parts(NonNull::dangling(), 3); |
1601 | /// assert_eq!(slice.as_non_null_ptr(), NonNull::<i8>::dangling()); |
1602 | /// ``` |
1603 | #[inline ] |
1604 | #[must_use ] |
1605 | #[unstable (feature = "slice_ptr_get" , issue = "74265" )] |
1606 | #[rustc_const_unstable (feature = "slice_ptr_get" , issue = "74265" )] |
1607 | pub const fn as_non_null_ptr(self) -> NonNull<T> { |
1608 | self.cast() |
1609 | } |
1610 | |
1611 | /// Returns a raw pointer to the slice's buffer. |
1612 | /// |
1613 | /// # Examples |
1614 | /// |
1615 | /// ```rust |
1616 | /// #![feature(slice_ptr_get)] |
1617 | /// use std::ptr::NonNull; |
1618 | /// |
1619 | /// let slice: NonNull<[i8]> = NonNull::slice_from_raw_parts(NonNull::dangling(), 3); |
1620 | /// assert_eq!(slice.as_mut_ptr(), NonNull::<i8>::dangling().as_ptr()); |
1621 | /// ``` |
1622 | #[inline ] |
1623 | #[must_use ] |
1624 | #[unstable (feature = "slice_ptr_get" , issue = "74265" )] |
1625 | #[rustc_const_unstable (feature = "slice_ptr_get" , issue = "74265" )] |
1626 | #[rustc_never_returns_null_ptr ] |
1627 | pub const fn as_mut_ptr(self) -> *mut T { |
1628 | self.as_non_null_ptr().as_ptr() |
1629 | } |
1630 | |
1631 | /// Returns a shared reference to a slice of possibly uninitialized values. In contrast to |
1632 | /// [`as_ref`], this does not require that the value has to be initialized. |
1633 | /// |
1634 | /// For the mutable counterpart see [`as_uninit_slice_mut`]. |
1635 | /// |
1636 | /// [`as_ref`]: NonNull::as_ref |
1637 | /// [`as_uninit_slice_mut`]: NonNull::as_uninit_slice_mut |
1638 | /// |
1639 | /// # Safety |
1640 | /// |
1641 | /// When calling this method, you have to ensure that all of the following is true: |
1642 | /// |
1643 | /// * The pointer must be [valid] for reads for `ptr.len() * mem::size_of::<T>()` many bytes, |
1644 | /// and it must be properly aligned. This means in particular: |
1645 | /// |
1646 | /// * The entire memory range of this slice must be contained within a single allocated object! |
1647 | /// Slices can never span across multiple allocated objects. |
1648 | /// |
1649 | /// * The pointer must be aligned even for zero-length slices. One |
1650 | /// reason for this is that enum layout optimizations may rely on references |
1651 | /// (including slices of any length) being aligned and non-null to distinguish |
1652 | /// them from other data. You can obtain a pointer that is usable as `data` |
1653 | /// for zero-length slices using [`NonNull::dangling()`]. |
1654 | /// |
1655 | /// * The total size `ptr.len() * mem::size_of::<T>()` of the slice must be no larger than `isize::MAX`. |
1656 | /// See the safety documentation of [`pointer::offset`]. |
1657 | /// |
1658 | /// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is |
1659 | /// arbitrarily chosen and does not necessarily reflect the actual lifetime of the data. |
1660 | /// In particular, while this reference exists, the memory the pointer points to must |
1661 | /// not get mutated (except inside `UnsafeCell`). |
1662 | /// |
1663 | /// This applies even if the result of this method is unused! |
1664 | /// |
1665 | /// See also [`slice::from_raw_parts`]. |
1666 | /// |
1667 | /// [valid]: crate::ptr#safety |
1668 | #[inline ] |
1669 | #[must_use ] |
1670 | #[unstable (feature = "ptr_as_uninit" , issue = "75402" )] |
1671 | #[rustc_const_unstable (feature = "const_ptr_as_ref" , issue = "91822" )] |
1672 | pub const unsafe fn as_uninit_slice<'a>(self) -> &'a [MaybeUninit<T>] { |
1673 | // SAFETY: the caller must uphold the safety contract for `as_uninit_slice`. |
1674 | unsafe { slice::from_raw_parts(self.cast().as_ptr(), self.len()) } |
1675 | } |
1676 | |
1677 | /// Returns a unique reference to a slice of possibly uninitialized values. In contrast to |
1678 | /// [`as_mut`], this does not require that the value has to be initialized. |
1679 | /// |
1680 | /// For the shared counterpart see [`as_uninit_slice`]. |
1681 | /// |
1682 | /// [`as_mut`]: NonNull::as_mut |
1683 | /// [`as_uninit_slice`]: NonNull::as_uninit_slice |
1684 | /// |
1685 | /// # Safety |
1686 | /// |
1687 | /// When calling this method, you have to ensure that all of the following is true: |
1688 | /// |
1689 | /// * The pointer must be [valid] for reads and writes for `ptr.len() * mem::size_of::<T>()` |
1690 | /// many bytes, and it must be properly aligned. This means in particular: |
1691 | /// |
1692 | /// * The entire memory range of this slice must be contained within a single allocated object! |
1693 | /// Slices can never span across multiple allocated objects. |
1694 | /// |
1695 | /// * The pointer must be aligned even for zero-length slices. One |
1696 | /// reason for this is that enum layout optimizations may rely on references |
1697 | /// (including slices of any length) being aligned and non-null to distinguish |
1698 | /// them from other data. You can obtain a pointer that is usable as `data` |
1699 | /// for zero-length slices using [`NonNull::dangling()`]. |
1700 | /// |
1701 | /// * The total size `ptr.len() * mem::size_of::<T>()` of the slice must be no larger than `isize::MAX`. |
1702 | /// See the safety documentation of [`pointer::offset`]. |
1703 | /// |
1704 | /// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is |
1705 | /// arbitrarily chosen and does not necessarily reflect the actual lifetime of the data. |
1706 | /// In particular, while this reference exists, the memory the pointer points to must |
1707 | /// not get accessed (read or written) through any other pointer. |
1708 | /// |
1709 | /// This applies even if the result of this method is unused! |
1710 | /// |
1711 | /// See also [`slice::from_raw_parts_mut`]. |
1712 | /// |
1713 | /// [valid]: crate::ptr#safety |
1714 | /// |
1715 | /// # Examples |
1716 | /// |
1717 | /// ```rust |
1718 | /// #![feature(allocator_api, ptr_as_uninit)] |
1719 | /// |
1720 | /// use std::alloc::{Allocator, Layout, Global}; |
1721 | /// use std::mem::MaybeUninit; |
1722 | /// use std::ptr::NonNull; |
1723 | /// |
1724 | /// let memory: NonNull<[u8]> = Global.allocate(Layout::new::<[u8; 32]>())?; |
1725 | /// // This is safe as `memory` is valid for reads and writes for `memory.len()` many bytes. |
1726 | /// // Note that calling `memory.as_mut()` is not allowed here as the content may be uninitialized. |
1727 | /// # #[allow (unused_variables)] |
1728 | /// let slice: &mut [MaybeUninit<u8>] = unsafe { memory.as_uninit_slice_mut() }; |
1729 | /// # Ok::<_, std::alloc::AllocError>(()) |
1730 | /// ``` |
1731 | #[inline ] |
1732 | #[must_use ] |
1733 | #[unstable (feature = "ptr_as_uninit" , issue = "75402" )] |
1734 | #[rustc_const_unstable (feature = "const_ptr_as_ref" , issue = "91822" )] |
1735 | pub const unsafe fn as_uninit_slice_mut<'a>(self) -> &'a mut [MaybeUninit<T>] { |
1736 | // SAFETY: the caller must uphold the safety contract for `as_uninit_slice_mut`. |
1737 | unsafe { slice::from_raw_parts_mut(self.cast().as_ptr(), self.len()) } |
1738 | } |
1739 | |
1740 | /// Returns a raw pointer to an element or subslice, without doing bounds |
1741 | /// checking. |
1742 | /// |
1743 | /// Calling this method with an out-of-bounds index or when `self` is not dereferenceable |
1744 | /// is *[undefined behavior]* even if the resulting pointer is not used. |
1745 | /// |
1746 | /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html |
1747 | /// |
1748 | /// # Examples |
1749 | /// |
1750 | /// ``` |
1751 | /// #![feature(slice_ptr_get)] |
1752 | /// use std::ptr::NonNull; |
1753 | /// |
1754 | /// let x = &mut [1, 2, 4]; |
1755 | /// let x = NonNull::slice_from_raw_parts(NonNull::new(x.as_mut_ptr()).unwrap(), x.len()); |
1756 | /// |
1757 | /// unsafe { |
1758 | /// assert_eq!(x.get_unchecked_mut(1).as_ptr(), x.as_non_null_ptr().as_ptr().add(1)); |
1759 | /// } |
1760 | /// ``` |
1761 | #[unstable (feature = "slice_ptr_get" , issue = "74265" )] |
1762 | #[inline ] |
1763 | pub unsafe fn get_unchecked_mut<I>(self, index: I) -> NonNull<I::Output> |
1764 | where |
1765 | I: SliceIndex<[T]>, |
1766 | { |
1767 | // SAFETY: the caller ensures that `self` is dereferenceable and `index` in-bounds. |
1768 | // As a consequence, the resulting pointer cannot be null. |
1769 | unsafe { NonNull::new_unchecked(self.as_ptr().get_unchecked_mut(index)) } |
1770 | } |
1771 | } |
1772 | |
1773 | #[stable (feature = "nonnull" , since = "1.25.0" )] |
1774 | impl<T: ?Sized> Clone for NonNull<T> { |
1775 | #[inline (always)] |
1776 | fn clone(&self) -> Self { |
1777 | *self |
1778 | } |
1779 | } |
1780 | |
1781 | #[stable (feature = "nonnull" , since = "1.25.0" )] |
1782 | impl<T: ?Sized> Copy for NonNull<T> {} |
1783 | |
1784 | #[unstable (feature = "coerce_unsized" , issue = "18598" )] |
1785 | impl<T: ?Sized, U: ?Sized> CoerceUnsized<NonNull<U>> for NonNull<T> where T: Unsize<U> {} |
1786 | |
1787 | #[unstable (feature = "dispatch_from_dyn" , issue = "none" )] |
1788 | impl<T: ?Sized, U: ?Sized> DispatchFromDyn<NonNull<U>> for NonNull<T> where T: Unsize<U> {} |
1789 | |
1790 | #[stable (feature = "nonnull" , since = "1.25.0" )] |
1791 | impl<T: ?Sized> fmt::Debug for NonNull<T> { |
1792 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
1793 | fmt::Pointer::fmt(&self.as_ptr(), f) |
1794 | } |
1795 | } |
1796 | |
1797 | #[stable (feature = "nonnull" , since = "1.25.0" )] |
1798 | impl<T: ?Sized> fmt::Pointer for NonNull<T> { |
1799 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
1800 | fmt::Pointer::fmt(&self.as_ptr(), f) |
1801 | } |
1802 | } |
1803 | |
1804 | #[stable (feature = "nonnull" , since = "1.25.0" )] |
1805 | impl<T: ?Sized> Eq for NonNull<T> {} |
1806 | |
1807 | #[stable (feature = "nonnull" , since = "1.25.0" )] |
1808 | impl<T: ?Sized> PartialEq for NonNull<T> { |
1809 | #[inline ] |
1810 | #[allow (ambiguous_wide_pointer_comparisons)] |
1811 | fn eq(&self, other: &Self) -> bool { |
1812 | self.as_ptr() == other.as_ptr() |
1813 | } |
1814 | } |
1815 | |
1816 | #[stable (feature = "nonnull" , since = "1.25.0" )] |
1817 | impl<T: ?Sized> Ord for NonNull<T> { |
1818 | #[inline ] |
1819 | #[allow (ambiguous_wide_pointer_comparisons)] |
1820 | fn cmp(&self, other: &Self) -> Ordering { |
1821 | self.as_ptr().cmp(&other.as_ptr()) |
1822 | } |
1823 | } |
1824 | |
1825 | #[stable (feature = "nonnull" , since = "1.25.0" )] |
1826 | impl<T: ?Sized> PartialOrd for NonNull<T> { |
1827 | #[inline ] |
1828 | #[allow (ambiguous_wide_pointer_comparisons)] |
1829 | fn partial_cmp(&self, other: &Self) -> Option<Ordering> { |
1830 | self.as_ptr().partial_cmp(&other.as_ptr()) |
1831 | } |
1832 | } |
1833 | |
1834 | #[stable (feature = "nonnull" , since = "1.25.0" )] |
1835 | impl<T: ?Sized> hash::Hash for NonNull<T> { |
1836 | #[inline ] |
1837 | fn hash<H: hash::Hasher>(&self, state: &mut H) { |
1838 | self.as_ptr().hash(state) |
1839 | } |
1840 | } |
1841 | |
1842 | #[unstable (feature = "ptr_internals" , issue = "none" )] |
1843 | impl<T: ?Sized> From<Unique<T>> for NonNull<T> { |
1844 | #[inline ] |
1845 | fn from(unique: Unique<T>) -> Self { |
1846 | unique.as_non_null_ptr() |
1847 | } |
1848 | } |
1849 | |
1850 | #[stable (feature = "nonnull" , since = "1.25.0" )] |
1851 | impl<T: ?Sized> From<&mut T> for NonNull<T> { |
1852 | /// Converts a `&mut T` to a `NonNull<T>`. |
1853 | /// |
1854 | /// This conversion is safe and infallible since references cannot be null. |
1855 | #[inline ] |
1856 | fn from(reference: &mut T) -> Self { |
1857 | // SAFETY: A mutable reference cannot be null. |
1858 | unsafe { NonNull { pointer: reference as *mut T } } |
1859 | } |
1860 | } |
1861 | |
1862 | #[stable (feature = "nonnull" , since = "1.25.0" )] |
1863 | impl<T: ?Sized> From<&T> for NonNull<T> { |
1864 | /// Converts a `&T` to a `NonNull<T>`. |
1865 | /// |
1866 | /// This conversion is safe and infallible since references cannot be null. |
1867 | #[inline ] |
1868 | fn from(reference: &T) -> Self { |
1869 | // SAFETY: A reference cannot be null. |
1870 | unsafe { NonNull { pointer: reference as *const T } } |
1871 | } |
1872 | } |
1873 | |