1use super::*;
2use crate::cmp::Ordering::{Equal, Greater, Less};
3use crate::intrinsics::const_eval_select;
4use crate::mem::SizedTypeProperties;
5use crate::slice::{self, SliceIndex};
6
7impl<T: ?Sized> *const T {
8 /// Returns `true` if the pointer is null.
9 ///
10 /// Note that unsized types have many possible null pointers, as only the
11 /// raw data pointer is considered, not their length, vtable, etc.
12 /// Therefore, two pointers that are null may still not compare equal to
13 /// each other.
14 ///
15 /// ## Behavior during const evaluation
16 ///
17 /// When this function is used during const evaluation, it may return `false` for pointers
18 /// that turn out to be null at runtime. Specifically, when a pointer to some memory
19 /// is offset beyond its bounds in such a way that the resulting pointer is null,
20 /// the function will still return `false`. There is no way for CTFE to know
21 /// the absolute position of that memory, so we cannot tell if the pointer is
22 /// null or not.
23 ///
24 /// # Examples
25 ///
26 /// ```
27 /// let s: &str = "Follow the rabbit";
28 /// let ptr: *const u8 = s.as_ptr();
29 /// assert!(!ptr.is_null());
30 /// ```
31 #[stable(feature = "rust1", since = "1.0.0")]
32 #[rustc_const_unstable(feature = "const_ptr_is_null", issue = "74939")]
33 #[rustc_diagnostic_item = "ptr_const_is_null"]
34 #[inline]
35 pub const fn is_null(self) -> bool {
36 #[inline]
37 fn runtime_impl(ptr: *const u8) -> bool {
38 ptr.addr() == 0
39 }
40
41 #[inline]
42 const fn const_impl(ptr: *const u8) -> bool {
43 // Compare via a cast to a thin pointer, so fat pointers are only
44 // considering their "data" part for null-ness.
45 match (ptr).guaranteed_eq(null_mut()) {
46 None => false,
47 Some(res) => res,
48 }
49 }
50
51 #[allow(unused_unsafe)]
52 const_eval_select((self as *const u8,), const_impl, runtime_impl)
53 }
54
55 /// Casts to a pointer of another type.
56 #[stable(feature = "ptr_cast", since = "1.38.0")]
57 #[rustc_const_stable(feature = "const_ptr_cast", since = "1.38.0")]
58 #[rustc_diagnostic_item = "const_ptr_cast"]
59 #[inline(always)]
60 pub const fn cast<U>(self) -> *const U {
61 self as _
62 }
63
64 /// Use the pointer value in a new pointer of another type.
65 ///
66 /// In case `meta` is a (fat) pointer to an unsized type, this operation
67 /// will ignore the pointer part, whereas for (thin) pointers to sized
68 /// types, this has the same effect as a simple cast.
69 ///
70 /// The resulting pointer will have provenance of `self`, i.e., for a fat
71 /// pointer, this operation is semantically the same as creating a new
72 /// fat pointer with the data pointer value of `self` but the metadata of
73 /// `meta`.
74 ///
75 /// # Examples
76 ///
77 /// This function is primarily useful for allowing byte-wise pointer
78 /// arithmetic on potentially fat pointers:
79 ///
80 /// ```
81 /// #![feature(set_ptr_value)]
82 /// # use core::fmt::Debug;
83 /// let arr: [i32; 3] = [1, 2, 3];
84 /// let mut ptr = arr.as_ptr() as *const dyn Debug;
85 /// let thin = ptr as *const u8;
86 /// unsafe {
87 /// ptr = thin.add(8).with_metadata_of(ptr);
88 /// # assert_eq!(*(ptr as *const i32), 3);
89 /// println!("{:?}", &*ptr); // will print "3"
90 /// }
91 /// ```
92 #[unstable(feature = "set_ptr_value", issue = "75091")]
93 #[rustc_const_unstable(feature = "set_ptr_value", issue = "75091")]
94 #[must_use = "returns a new pointer rather than modifying its argument"]
95 #[inline]
96 pub const fn with_metadata_of<U>(self, meta: *const U) -> *const U
97 where
98 U: ?Sized,
99 {
100 from_raw_parts::<U>(self as *const (), metadata(meta))
101 }
102
103 /// Changes constness without changing the type.
104 ///
105 /// This is a bit safer than `as` because it wouldn't silently change the type if the code is
106 /// refactored.
107 #[stable(feature = "ptr_const_cast", since = "1.65.0")]
108 #[rustc_const_stable(feature = "ptr_const_cast", since = "1.65.0")]
109 #[rustc_diagnostic_item = "ptr_cast_mut"]
110 #[inline(always)]
111 pub const fn cast_mut(self) -> *mut T {
112 self as _
113 }
114
115 /// Casts a pointer to its raw bits.
116 ///
117 /// This is equivalent to `as usize`, but is more specific to enhance readability.
118 /// The inverse method is [`from_bits`](#method.from_bits).
119 ///
120 /// In particular, `*p as usize` and `p as usize` will both compile for
121 /// pointers to numeric types but do very different things, so using this
122 /// helps emphasize that reading the bits was intentional.
123 ///
124 /// # Examples
125 ///
126 /// ```
127 /// #![feature(ptr_to_from_bits)]
128 /// # #[cfg(not(miri))] { // doctest does not work with strict provenance
129 /// let array = [13, 42];
130 /// let p0: *const i32 = &array[0];
131 /// assert_eq!(<*const _>::from_bits(p0.to_bits()), p0);
132 /// let p1: *const i32 = &array[1];
133 /// assert_eq!(p1.to_bits() - p0.to_bits(), 4);
134 /// # }
135 /// ```
136 #[unstable(feature = "ptr_to_from_bits", issue = "91126")]
137 #[deprecated(
138 since = "1.67.0",
139 note = "replaced by the `expose_provenance` method, or update your code \
140 to follow the strict provenance rules using its APIs"
141 )]
142 #[inline(always)]
143 pub fn to_bits(self) -> usize
144 where
145 T: Sized,
146 {
147 self as usize
148 }
149
150 /// Creates a pointer from its raw bits.
151 ///
152 /// This is equivalent to `as *const T`, but is more specific to enhance readability.
153 /// The inverse method is [`to_bits`](#method.to_bits).
154 ///
155 /// # Examples
156 ///
157 /// ```
158 /// #![feature(ptr_to_from_bits)]
159 /// # #[cfg(not(miri))] { // doctest does not work with strict provenance
160 /// use std::ptr::NonNull;
161 /// let dangling: *const u8 = NonNull::dangling().as_ptr();
162 /// assert_eq!(<*const u8>::from_bits(1), dangling);
163 /// # }
164 /// ```
165 #[unstable(feature = "ptr_to_from_bits", issue = "91126")]
166 #[deprecated(
167 since = "1.67.0",
168 note = "replaced by the `ptr::with_exposed_provenance` function, or update \
169 your code to follow the strict provenance rules using its APIs"
170 )]
171 #[allow(fuzzy_provenance_casts)] // this is an unstable and semi-deprecated cast function
172 #[inline(always)]
173 pub fn from_bits(bits: usize) -> Self
174 where
175 T: Sized,
176 {
177 bits as Self
178 }
179
180 /// Gets the "address" portion of the pointer.
181 ///
182 /// This is similar to `self as usize`, which semantically discards *provenance* and
183 /// *address-space* information. However, unlike `self as usize`, casting the returned address
184 /// back to a pointer yields a [pointer without provenance][without_provenance], which is undefined behavior to dereference. To
185 /// properly restore the lost information and obtain a dereferenceable pointer, use
186 /// [`with_addr`][pointer::with_addr] or [`map_addr`][pointer::map_addr].
187 ///
188 /// If using those APIs is not possible because there is no way to preserve a pointer with the
189 /// required provenance, then Strict Provenance might not be for you. Use pointer-integer casts
190 /// or [`expose_provenance`][pointer::expose_provenance] and [`with_exposed_provenance`][with_exposed_provenance]
191 /// instead. However, note that this makes your code less portable and less amenable to tools
192 /// that check for compliance with the Rust memory model.
193 ///
194 /// On most platforms this will produce a value with the same bytes as the original
195 /// pointer, because all the bytes are dedicated to describing the address.
196 /// Platforms which need to store additional information in the pointer may
197 /// perform a change of representation to produce a value containing only the address
198 /// portion of the pointer. What that means is up to the platform to define.
199 ///
200 /// This API and its claimed semantics are part of the Strict Provenance experiment, and as such
201 /// might change in the future (including possibly weakening this so it becomes wholly
202 /// equivalent to `self as usize`). See the [module documentation][crate::ptr] for details.
203 #[must_use]
204 #[inline(always)]
205 #[unstable(feature = "strict_provenance", issue = "95228")]
206 pub fn addr(self) -> usize {
207 // FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
208 // SAFETY: Pointer-to-integer transmutes are valid (if you are okay with losing the
209 // provenance).
210 unsafe { mem::transmute(self.cast::<()>()) }
211 }
212
213 /// Exposes the "provenance" part of the pointer for future use in
214 /// [`with_exposed_provenance`][] and returns the "address" portion.
215 ///
216 /// This is equivalent to `self as usize`, which semantically discards *provenance* and
217 /// *address-space* information. Furthermore, this (like the `as` cast) has the implicit
218 /// side-effect of marking the provenance as 'exposed', so on platforms that support it you can
219 /// later call [`with_exposed_provenance`][] to reconstitute the original pointer including its
220 /// provenance. (Reconstructing address space information, if required, is your responsibility.)
221 ///
222 /// Using this method means that code is *not* following [Strict
223 /// Provenance][super#strict-provenance] rules. Supporting
224 /// [`with_exposed_provenance`][] complicates specification and reasoning and may not be supported by
225 /// tools that help you to stay conformant with the Rust memory model, so it is recommended to
226 /// use [`addr`][pointer::addr] wherever possible.
227 ///
228 /// On most platforms this will produce a value with the same bytes as the original pointer,
229 /// because all the bytes are dedicated to describing the address. Platforms which need to store
230 /// additional information in the pointer may not support this operation, since the 'expose'
231 /// side-effect which is required for [`with_exposed_provenance`][] to work is typically not
232 /// available.
233 ///
234 /// It is unclear whether this method can be given a satisfying unambiguous specification. This
235 /// API and its claimed semantics are part of [Exposed Provenance][super#exposed-provenance].
236 ///
237 /// [`with_exposed_provenance`]: with_exposed_provenance
238 #[must_use]
239 #[inline(always)]
240 #[unstable(feature = "exposed_provenance", issue = "95228")]
241 pub fn expose_provenance(self) -> usize {
242 // FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
243 self.cast::<()>() as usize
244 }
245
246 /// Creates a new pointer with the given address.
247 ///
248 /// This performs the same operation as an `addr as ptr` cast, but copies
249 /// the *address-space* and *provenance* of `self` to the new pointer.
250 /// This allows us to dynamically preserve and propagate this important
251 /// information in a way that is otherwise impossible with a unary cast.
252 ///
253 /// This is equivalent to using [`wrapping_offset`][pointer::wrapping_offset] to offset
254 /// `self` to the given address, and therefore has all the same capabilities and restrictions.
255 ///
256 /// This API and its claimed semantics are part of the Strict Provenance experiment,
257 /// see the [module documentation][crate::ptr] for details.
258 #[must_use]
259 #[inline]
260 #[unstable(feature = "strict_provenance", issue = "95228")]
261 pub fn with_addr(self, addr: usize) -> Self {
262 // FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
263 //
264 // In the mean-time, this operation is defined to be "as if" it was
265 // a wrapping_offset, so we can emulate it as such. This should properly
266 // restore pointer provenance even under today's compiler.
267 let self_addr = self.addr() as isize;
268 let dest_addr = addr as isize;
269 let offset = dest_addr.wrapping_sub(self_addr);
270
271 // This is the canonical desugaring of this operation
272 self.wrapping_byte_offset(offset)
273 }
274
275 /// Creates a new pointer by mapping `self`'s address to a new one.
276 ///
277 /// This is a convenience for [`with_addr`][pointer::with_addr], see that method for details.
278 ///
279 /// This API and its claimed semantics are part of the Strict Provenance experiment,
280 /// see the [module documentation][crate::ptr] for details.
281 #[must_use]
282 #[inline]
283 #[unstable(feature = "strict_provenance", issue = "95228")]
284 pub fn map_addr(self, f: impl FnOnce(usize) -> usize) -> Self {
285 self.with_addr(f(self.addr()))
286 }
287
288 /// Decompose a (possibly wide) pointer into its data pointer and metadata components.
289 ///
290 /// The pointer can be later reconstructed with [`from_raw_parts`].
291 #[unstable(feature = "ptr_metadata", issue = "81513")]
292 #[rustc_const_unstable(feature = "ptr_metadata", issue = "81513")]
293 #[inline]
294 pub const fn to_raw_parts(self) -> (*const (), <T as super::Pointee>::Metadata) {
295 (self.cast(), metadata(self))
296 }
297
298 /// Returns `None` if the pointer is null, or else returns a shared reference to
299 /// the value wrapped in `Some`. If the value may be uninitialized, [`as_uninit_ref`]
300 /// must be used instead.
301 ///
302 /// [`as_uninit_ref`]: #method.as_uninit_ref
303 ///
304 /// # Safety
305 ///
306 /// When calling this method, you have to ensure that *either* the pointer is null *or*
307 /// all of the following is true:
308 ///
309 /// * The pointer must be properly aligned.
310 ///
311 /// * It must be "dereferenceable" in the sense defined in [the module documentation].
312 ///
313 /// * The pointer must point to an initialized instance of `T`.
314 ///
315 /// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is
316 /// arbitrarily chosen and does not necessarily reflect the actual lifetime of the data.
317 /// In particular, while this reference exists, the memory the pointer points to must
318 /// not get mutated (except inside `UnsafeCell`).
319 ///
320 /// This applies even if the result of this method is unused!
321 /// (The part about being initialized is not yet fully decided, but until
322 /// it is, the only safe approach is to ensure that they are indeed initialized.)
323 ///
324 /// [the module documentation]: crate::ptr#safety
325 ///
326 /// # Examples
327 ///
328 /// ```
329 /// let ptr: *const u8 = &10u8 as *const u8;
330 ///
331 /// unsafe {
332 /// if let Some(val_back) = ptr.as_ref() {
333 /// println!("We got back the value: {val_back}!");
334 /// }
335 /// }
336 /// ```
337 ///
338 /// # Null-unchecked version
339 ///
340 /// If you are sure the pointer can never be null and are looking for some kind of
341 /// `as_ref_unchecked` that returns the `&T` instead of `Option<&T>`, know that you can
342 /// dereference the pointer directly.
343 ///
344 /// ```
345 /// let ptr: *const u8 = &10u8 as *const u8;
346 ///
347 /// unsafe {
348 /// let val_back = &*ptr;
349 /// println!("We got back the value: {val_back}!");
350 /// }
351 /// ```
352 #[stable(feature = "ptr_as_ref", since = "1.9.0")]
353 #[rustc_const_unstable(feature = "const_ptr_as_ref", issue = "91822")]
354 #[inline]
355 pub const unsafe fn as_ref<'a>(self) -> Option<&'a T> {
356 // SAFETY: the caller must guarantee that `self` is valid
357 // for a reference if it isn't null.
358 if self.is_null() { None } else { unsafe { Some(&*self) } }
359 }
360
361 /// Returns `None` if the pointer is null, or else returns a shared reference to
362 /// the value wrapped in `Some`. In contrast to [`as_ref`], this does not require
363 /// that the value has to be initialized.
364 ///
365 /// [`as_ref`]: #method.as_ref
366 ///
367 /// # Safety
368 ///
369 /// When calling this method, you have to ensure that *either* the pointer is null *or*
370 /// all of the following is true:
371 ///
372 /// * The pointer must be properly aligned.
373 ///
374 /// * It must be "dereferenceable" in the sense defined in [the module documentation].
375 ///
376 /// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is
377 /// arbitrarily chosen and does not necessarily reflect the actual lifetime of the data.
378 /// In particular, while this reference exists, the memory the pointer points to must
379 /// not get mutated (except inside `UnsafeCell`).
380 ///
381 /// This applies even if the result of this method is unused!
382 ///
383 /// [the module documentation]: crate::ptr#safety
384 ///
385 /// # Examples
386 ///
387 /// ```
388 /// #![feature(ptr_as_uninit)]
389 ///
390 /// let ptr: *const u8 = &10u8 as *const u8;
391 ///
392 /// unsafe {
393 /// if let Some(val_back) = ptr.as_uninit_ref() {
394 /// println!("We got back the value: {}!", val_back.assume_init());
395 /// }
396 /// }
397 /// ```
398 #[inline]
399 #[unstable(feature = "ptr_as_uninit", issue = "75402")]
400 #[rustc_const_unstable(feature = "const_ptr_as_ref", issue = "91822")]
401 pub const unsafe fn as_uninit_ref<'a>(self) -> Option<&'a MaybeUninit<T>>
402 where
403 T: Sized,
404 {
405 // SAFETY: the caller must guarantee that `self` meets all the
406 // requirements for a reference.
407 if self.is_null() { None } else { Some(unsafe { &*(self as *const MaybeUninit<T>) }) }
408 }
409
410 /// Calculates the offset from a pointer.
411 ///
412 /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
413 /// offset of `3 * size_of::<T>()` bytes.
414 ///
415 /// # Safety
416 ///
417 /// If any of the following conditions are violated, the result is Undefined
418 /// Behavior:
419 ///
420 /// * Both the starting and resulting pointer must be either in bounds or one
421 /// byte past the end of the same [allocated object].
422 ///
423 /// * The computed offset, **in bytes**, cannot overflow an `isize`.
424 ///
425 /// * The offset being in bounds cannot rely on "wrapping around" the address
426 /// space. That is, the infinite-precision sum, **in bytes** must fit in a usize.
427 ///
428 /// The compiler and standard library generally tries to ensure allocations
429 /// never reach a size where an offset is a concern. For instance, `Vec`
430 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
431 /// `vec.as_ptr().add(vec.len())` is always safe.
432 ///
433 /// Most platforms fundamentally can't even construct such an allocation.
434 /// For instance, no known 64-bit platform can ever serve a request
435 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
436 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
437 /// more than `isize::MAX` bytes with things like Physical Address
438 /// Extension. As such, memory acquired directly from allocators or memory
439 /// mapped files *may* be too large to handle with this function.
440 ///
441 /// Consider using [`wrapping_offset`] instead if these constraints are
442 /// difficult to satisfy. The only advantage of this method is that it
443 /// enables more aggressive compiler optimizations.
444 ///
445 /// [`wrapping_offset`]: #method.wrapping_offset
446 /// [allocated object]: crate::ptr#allocated-object
447 ///
448 /// # Examples
449 ///
450 /// ```
451 /// let s: &str = "123";
452 /// let ptr: *const u8 = s.as_ptr();
453 ///
454 /// unsafe {
455 /// println!("{}", *ptr.offset(1) as char);
456 /// println!("{}", *ptr.offset(2) as char);
457 /// }
458 /// ```
459 #[stable(feature = "rust1", since = "1.0.0")]
460 #[must_use = "returns a new pointer rather than modifying its argument"]
461 #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
462 #[inline(always)]
463 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
464 pub const unsafe fn offset(self, count: isize) -> *const T
465 where
466 T: Sized,
467 {
468 // SAFETY: the caller must uphold the safety contract for `offset`.
469 unsafe { intrinsics::offset(self, count) }
470 }
471
472 /// Calculates the offset from a pointer in bytes.
473 ///
474 /// `count` is in units of **bytes**.
475 ///
476 /// This is purely a convenience for casting to a `u8` pointer and
477 /// using [offset][pointer::offset] on it. See that method for documentation
478 /// and safety requirements.
479 ///
480 /// For non-`Sized` pointees this operation changes only the data pointer,
481 /// leaving the metadata untouched.
482 #[must_use]
483 #[inline(always)]
484 #[stable(feature = "pointer_byte_offsets", since = "1.75.0")]
485 #[rustc_const_stable(feature = "const_pointer_byte_offsets", since = "1.75.0")]
486 #[rustc_allow_const_fn_unstable(set_ptr_value)]
487 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
488 pub const unsafe fn byte_offset(self, count: isize) -> Self {
489 // SAFETY: the caller must uphold the safety contract for `offset`.
490 unsafe { self.cast::<u8>().offset(count).with_metadata_of(self) }
491 }
492
493 /// Calculates the offset from a pointer using wrapping arithmetic.
494 ///
495 /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
496 /// offset of `3 * size_of::<T>()` bytes.
497 ///
498 /// # Safety
499 ///
500 /// This operation itself is always safe, but using the resulting pointer is not.
501 ///
502 /// The resulting pointer "remembers" the [allocated object] that `self` points to; it must not
503 /// be used to read or write other allocated objects.
504 ///
505 /// In other words, `let z = x.wrapping_offset((y as isize) - (x as isize))` does *not* make `z`
506 /// the same as `y` even if we assume `T` has size `1` and there is no overflow: `z` is still
507 /// attached to the object `x` is attached to, and dereferencing it is Undefined Behavior unless
508 /// `x` and `y` point into the same allocated object.
509 ///
510 /// Compared to [`offset`], this method basically delays the requirement of staying within the
511 /// same allocated object: [`offset`] is immediate Undefined Behavior when crossing object
512 /// boundaries; `wrapping_offset` produces a pointer but still leads to Undefined Behavior if a
513 /// pointer is dereferenced when it is out-of-bounds of the object it is attached to. [`offset`]
514 /// can be optimized better and is thus preferable in performance-sensitive code.
515 ///
516 /// The delayed check only considers the value of the pointer that was dereferenced, not the
517 /// intermediate values used during the computation of the final result. For example,
518 /// `x.wrapping_offset(o).wrapping_offset(o.wrapping_neg())` is always the same as `x`. In other
519 /// words, leaving the allocated object and then re-entering it later is permitted.
520 ///
521 /// [`offset`]: #method.offset
522 /// [allocated object]: crate::ptr#allocated-object
523 ///
524 /// # Examples
525 ///
526 /// ```
527 /// // Iterate using a raw pointer in increments of two elements
528 /// let data = [1u8, 2, 3, 4, 5];
529 /// let mut ptr: *const u8 = data.as_ptr();
530 /// let step = 2;
531 /// let end_rounded_up = ptr.wrapping_offset(6);
532 ///
533 /// // This loop prints "1, 3, 5, "
534 /// while ptr != end_rounded_up {
535 /// unsafe {
536 /// print!("{}, ", *ptr);
537 /// }
538 /// ptr = ptr.wrapping_offset(step);
539 /// }
540 /// ```
541 #[stable(feature = "ptr_wrapping_offset", since = "1.16.0")]
542 #[must_use = "returns a new pointer rather than modifying its argument"]
543 #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
544 #[inline(always)]
545 pub const fn wrapping_offset(self, count: isize) -> *const T
546 where
547 T: Sized,
548 {
549 // SAFETY: the `arith_offset` intrinsic has no prerequisites to be called.
550 unsafe { intrinsics::arith_offset(self, count) }
551 }
552
553 /// Calculates the offset from a pointer in bytes using wrapping arithmetic.
554 ///
555 /// `count` is in units of **bytes**.
556 ///
557 /// This is purely a convenience for casting to a `u8` pointer and
558 /// using [wrapping_offset][pointer::wrapping_offset] on it. See that method
559 /// for documentation.
560 ///
561 /// For non-`Sized` pointees this operation changes only the data pointer,
562 /// leaving the metadata untouched.
563 #[must_use]
564 #[inline(always)]
565 #[stable(feature = "pointer_byte_offsets", since = "1.75.0")]
566 #[rustc_const_stable(feature = "const_pointer_byte_offsets", since = "1.75.0")]
567 #[rustc_allow_const_fn_unstable(set_ptr_value)]
568 pub const fn wrapping_byte_offset(self, count: isize) -> Self {
569 self.cast::<u8>().wrapping_offset(count).with_metadata_of(self)
570 }
571
572 /// Masks out bits of the pointer according to a mask.
573 ///
574 /// This is convenience for `ptr.map_addr(|a| a & mask)`.
575 ///
576 /// For non-`Sized` pointees this operation changes only the data pointer,
577 /// leaving the metadata untouched.
578 ///
579 /// ## Examples
580 ///
581 /// ```
582 /// #![feature(ptr_mask, strict_provenance)]
583 /// let v = 17_u32;
584 /// let ptr: *const u32 = &v;
585 ///
586 /// // `u32` is 4 bytes aligned,
587 /// // which means that lower 2 bits are always 0.
588 /// let tag_mask = 0b11;
589 /// let ptr_mask = !tag_mask;
590 ///
591 /// // We can store something in these lower bits
592 /// let tagged_ptr = ptr.map_addr(|a| a | 0b10);
593 ///
594 /// // Get the "tag" back
595 /// let tag = tagged_ptr.addr() & tag_mask;
596 /// assert_eq!(tag, 0b10);
597 ///
598 /// // Note that `tagged_ptr` is unaligned, it's UB to read from it.
599 /// // To get original pointer `mask` can be used:
600 /// let masked_ptr = tagged_ptr.mask(ptr_mask);
601 /// assert_eq!(unsafe { *masked_ptr }, 17);
602 /// ```
603 #[unstable(feature = "ptr_mask", issue = "98290")]
604 #[must_use = "returns a new pointer rather than modifying its argument"]
605 #[inline(always)]
606 pub fn mask(self, mask: usize) -> *const T {
607 intrinsics::ptr_mask(self.cast::<()>(), mask).with_metadata_of(self)
608 }
609
610 /// Calculates the distance between two pointers. The returned value is in
611 /// units of T: the distance in bytes divided by `mem::size_of::<T>()`.
612 ///
613 /// This is equivalent to `(self as isize - origin as isize) / (mem::size_of::<T>() as isize)`,
614 /// except that it has a lot more opportunities for UB, in exchange for the compiler
615 /// better understanding what you are doing.
616 ///
617 /// The primary motivation of this method is for computing the `len` of an array/slice
618 /// of `T` that you are currently representing as a "start" and "end" pointer
619 /// (and "end" is "one past the end" of the array).
620 /// In that case, `end.offset_from(start)` gets you the length of the array.
621 ///
622 /// All of the following safety requirements are trivially satisfied for this usecase.
623 ///
624 /// [`offset`]: #method.offset
625 ///
626 /// # Safety
627 ///
628 /// If any of the following conditions are violated, the result is Undefined
629 /// Behavior:
630 ///
631 /// * Both `self` and `origin` must be either in bounds or one
632 /// byte past the end of the same [allocated object].
633 ///
634 /// * Both pointers must be *derived from* a pointer to the same object.
635 /// (See below for an example.)
636 ///
637 /// * The distance between the pointers, in bytes, must be an exact multiple
638 /// of the size of `T`.
639 ///
640 /// * The distance between the pointers, **in bytes**, cannot overflow an `isize`.
641 ///
642 /// * The distance being in bounds cannot rely on "wrapping around" the address space.
643 ///
644 /// Rust types are never larger than `isize::MAX` and Rust allocations never wrap around the
645 /// address space, so two pointers within some value of any Rust type `T` will always satisfy
646 /// the last two conditions. The standard library also generally ensures that allocations
647 /// never reach a size where an offset is a concern. For instance, `Vec` and `Box` ensure they
648 /// never allocate more than `isize::MAX` bytes, so `ptr_into_vec.offset_from(vec.as_ptr())`
649 /// always satisfies the last two conditions.
650 ///
651 /// Most platforms fundamentally can't even construct such a large allocation.
652 /// For instance, no known 64-bit platform can ever serve a request
653 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
654 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
655 /// more than `isize::MAX` bytes with things like Physical Address
656 /// Extension. As such, memory acquired directly from allocators or memory
657 /// mapped files *may* be too large to handle with this function.
658 /// (Note that [`offset`] and [`add`] also have a similar limitation and hence cannot be used on
659 /// such large allocations either.)
660 ///
661 /// The requirement for pointers to be derived from the same allocated object is primarily
662 /// needed for `const`-compatibility: the distance between pointers into *different* allocated
663 /// objects is not known at compile-time. However, the requirement also exists at
664 /// runtime and may be exploited by optimizations. If you wish to compute the difference between
665 /// pointers that are not guaranteed to be from the same allocation, use `(self as isize -
666 /// origin as isize) / mem::size_of::<T>()`.
667 // FIXME: recommend `addr()` instead of `as usize` once that is stable.
668 ///
669 /// [`add`]: #method.add
670 /// [allocated object]: crate::ptr#allocated-object
671 ///
672 /// # Panics
673 ///
674 /// This function panics if `T` is a Zero-Sized Type ("ZST").
675 ///
676 /// # Examples
677 ///
678 /// Basic usage:
679 ///
680 /// ```
681 /// let a = [0; 5];
682 /// let ptr1: *const i32 = &a[1];
683 /// let ptr2: *const i32 = &a[3];
684 /// unsafe {
685 /// assert_eq!(ptr2.offset_from(ptr1), 2);
686 /// assert_eq!(ptr1.offset_from(ptr2), -2);
687 /// assert_eq!(ptr1.offset(2), ptr2);
688 /// assert_eq!(ptr2.offset(-2), ptr1);
689 /// }
690 /// ```
691 ///
692 /// *Incorrect* usage:
693 ///
694 /// ```rust,no_run
695 /// let ptr1 = Box::into_raw(Box::new(0u8)) as *const u8;
696 /// let ptr2 = Box::into_raw(Box::new(1u8)) as *const u8;
697 /// let diff = (ptr2 as isize).wrapping_sub(ptr1 as isize);
698 /// // Make ptr2_other an "alias" of ptr2, but derived from ptr1.
699 /// let ptr2_other = (ptr1 as *const u8).wrapping_offset(diff);
700 /// assert_eq!(ptr2 as usize, ptr2_other as usize);
701 /// // Since ptr2_other and ptr2 are derived from pointers to different objects,
702 /// // computing their offset is undefined behavior, even though
703 /// // they point to the same address!
704 /// unsafe {
705 /// let zero = ptr2_other.offset_from(ptr2); // Undefined Behavior
706 /// }
707 /// ```
708 #[stable(feature = "ptr_offset_from", since = "1.47.0")]
709 #[rustc_const_stable(feature = "const_ptr_offset_from", since = "1.65.0")]
710 #[inline]
711 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
712 pub const unsafe fn offset_from(self, origin: *const T) -> isize
713 where
714 T: Sized,
715 {
716 let pointee_size = mem::size_of::<T>();
717 assert!(0 < pointee_size && pointee_size <= isize::MAX as usize);
718 // SAFETY: the caller must uphold the safety contract for `ptr_offset_from`.
719 unsafe { intrinsics::ptr_offset_from(self, origin) }
720 }
721
722 /// Calculates the distance between two pointers. The returned value is in
723 /// units of **bytes**.
724 ///
725 /// This is purely a convenience for casting to a `u8` pointer and
726 /// using [`offset_from`][pointer::offset_from] on it. See that method for
727 /// documentation and safety requirements.
728 ///
729 /// For non-`Sized` pointees this operation considers only the data pointers,
730 /// ignoring the metadata.
731 #[inline(always)]
732 #[stable(feature = "pointer_byte_offsets", since = "1.75.0")]
733 #[rustc_const_stable(feature = "const_pointer_byte_offsets", since = "1.75.0")]
734 #[rustc_allow_const_fn_unstable(set_ptr_value)]
735 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
736 pub const unsafe fn byte_offset_from<U: ?Sized>(self, origin: *const U) -> isize {
737 // SAFETY: the caller must uphold the safety contract for `offset_from`.
738 unsafe { self.cast::<u8>().offset_from(origin.cast::<u8>()) }
739 }
740
741 /// Calculates the distance between two pointers, *where it's known that
742 /// `self` is equal to or greater than `origin`*. The returned value is in
743 /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`.
744 ///
745 /// This computes the same value that [`offset_from`](#method.offset_from)
746 /// would compute, but with the added precondition that the offset is
747 /// guaranteed to be non-negative. This method is equivalent to
748 /// `usize::try_from(self.offset_from(origin)).unwrap_unchecked()`,
749 /// but it provides slightly more information to the optimizer, which can
750 /// sometimes allow it to optimize slightly better with some backends.
751 ///
752 /// This method can be though of as recovering the `count` that was passed
753 /// to [`add`](#method.add) (or, with the parameters in the other order,
754 /// to [`sub`](#method.sub)). The following are all equivalent, assuming
755 /// that their safety preconditions are met:
756 /// ```rust
757 /// # #![feature(ptr_sub_ptr)]
758 /// # unsafe fn blah(ptr: *const i32, origin: *const i32, count: usize) -> bool {
759 /// ptr.sub_ptr(origin) == count
760 /// # &&
761 /// origin.add(count) == ptr
762 /// # &&
763 /// ptr.sub(count) == origin
764 /// # }
765 /// ```
766 ///
767 /// # Safety
768 ///
769 /// - The distance between the pointers must be non-negative (`self >= origin`)
770 ///
771 /// - *All* the safety conditions of [`offset_from`](#method.offset_from)
772 /// apply to this method as well; see it for the full details.
773 ///
774 /// Importantly, despite the return type of this method being able to represent
775 /// a larger offset, it's still *not permitted* to pass pointers which differ
776 /// by more than `isize::MAX` *bytes*. As such, the result of this method will
777 /// always be less than or equal to `isize::MAX as usize`.
778 ///
779 /// # Panics
780 ///
781 /// This function panics if `T` is a Zero-Sized Type ("ZST").
782 ///
783 /// # Examples
784 ///
785 /// ```
786 /// #![feature(ptr_sub_ptr)]
787 ///
788 /// let a = [0; 5];
789 /// let ptr1: *const i32 = &a[1];
790 /// let ptr2: *const i32 = &a[3];
791 /// unsafe {
792 /// assert_eq!(ptr2.sub_ptr(ptr1), 2);
793 /// assert_eq!(ptr1.add(2), ptr2);
794 /// assert_eq!(ptr2.sub(2), ptr1);
795 /// assert_eq!(ptr2.sub_ptr(ptr2), 0);
796 /// }
797 ///
798 /// // This would be incorrect, as the pointers are not correctly ordered:
799 /// // ptr1.sub_ptr(ptr2)
800 /// ```
801 #[unstable(feature = "ptr_sub_ptr", issue = "95892")]
802 #[rustc_const_unstable(feature = "const_ptr_sub_ptr", issue = "95892")]
803 #[inline]
804 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
805 pub const unsafe fn sub_ptr(self, origin: *const T) -> usize
806 where
807 T: Sized,
808 {
809 const fn runtime_ptr_ge(this: *const (), origin: *const ()) -> bool {
810 fn runtime(this: *const (), origin: *const ()) -> bool {
811 this >= origin
812 }
813 const fn comptime(_: *const (), _: *const ()) -> bool {
814 true
815 }
816
817 #[allow(unused_unsafe)]
818 intrinsics::const_eval_select((this, origin), comptime, runtime)
819 }
820
821 ub_checks::assert_unsafe_precondition!(
822 check_language_ub,
823 "ptr::sub_ptr requires `self >= origin`",
824 (
825 this: *const () = self as *const (),
826 origin: *const () = origin as *const (),
827 ) => runtime_ptr_ge(this, origin)
828 );
829
830 let pointee_size = mem::size_of::<T>();
831 assert!(0 < pointee_size && pointee_size <= isize::MAX as usize);
832 // SAFETY: the caller must uphold the safety contract for `ptr_offset_from_unsigned`.
833 unsafe { intrinsics::ptr_offset_from_unsigned(self, origin) }
834 }
835
836 /// Returns whether two pointers are guaranteed to be equal.
837 ///
838 /// At runtime this function behaves like `Some(self == other)`.
839 /// However, in some contexts (e.g., compile-time evaluation),
840 /// it is not always possible to determine equality of two pointers, so this function may
841 /// spuriously return `None` for pointers that later actually turn out to have its equality known.
842 /// But when it returns `Some`, the pointers' equality is guaranteed to be known.
843 ///
844 /// The return value may change from `Some` to `None` and vice versa depending on the compiler
845 /// version and unsafe code must not
846 /// rely on the result of this function for soundness. It is suggested to only use this function
847 /// for performance optimizations where spurious `None` return values by this function do not
848 /// affect the outcome, but just the performance.
849 /// The consequences of using this method to make runtime and compile-time code behave
850 /// differently have not been explored. This method should not be used to introduce such
851 /// differences, and it should also not be stabilized before we have a better understanding
852 /// of this issue.
853 #[unstable(feature = "const_raw_ptr_comparison", issue = "53020")]
854 #[rustc_const_unstable(feature = "const_raw_ptr_comparison", issue = "53020")]
855 #[inline]
856 pub const fn guaranteed_eq(self, other: *const T) -> Option<bool>
857 where
858 T: Sized,
859 {
860 match intrinsics::ptr_guaranteed_cmp(self, other) {
861 2 => None,
862 other => Some(other == 1),
863 }
864 }
865
866 /// Returns whether two pointers are guaranteed to be inequal.
867 ///
868 /// At runtime this function behaves like `Some(self != other)`.
869 /// However, in some contexts (e.g., compile-time evaluation),
870 /// it is not always possible to determine inequality of two pointers, so this function may
871 /// spuriously return `None` for pointers that later actually turn out to have its inequality known.
872 /// But when it returns `Some`, the pointers' inequality is guaranteed to be known.
873 ///
874 /// The return value may change from `Some` to `None` and vice versa depending on the compiler
875 /// version and unsafe code must not
876 /// rely on the result of this function for soundness. It is suggested to only use this function
877 /// for performance optimizations where spurious `None` return values by this function do not
878 /// affect the outcome, but just the performance.
879 /// The consequences of using this method to make runtime and compile-time code behave
880 /// differently have not been explored. This method should not be used to introduce such
881 /// differences, and it should also not be stabilized before we have a better understanding
882 /// of this issue.
883 #[unstable(feature = "const_raw_ptr_comparison", issue = "53020")]
884 #[rustc_const_unstable(feature = "const_raw_ptr_comparison", issue = "53020")]
885 #[inline]
886 pub const fn guaranteed_ne(self, other: *const T) -> Option<bool>
887 where
888 T: Sized,
889 {
890 match self.guaranteed_eq(other) {
891 None => None,
892 Some(eq) => Some(!eq),
893 }
894 }
895
896 /// Calculates the offset from a pointer (convenience for `.offset(count as isize)`).
897 ///
898 /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
899 /// offset of `3 * size_of::<T>()` bytes.
900 ///
901 /// # Safety
902 ///
903 /// If any of the following conditions are violated, the result is Undefined
904 /// Behavior:
905 ///
906 /// * Both the starting and resulting pointer must be either in bounds or one
907 /// byte past the end of the same [allocated object].
908 ///
909 /// * The computed offset, **in bytes**, cannot overflow an `isize`.
910 ///
911 /// * The offset being in bounds cannot rely on "wrapping around" the address
912 /// space. That is, the infinite-precision sum must fit in a `usize`.
913 ///
914 /// The compiler and standard library generally tries to ensure allocations
915 /// never reach a size where an offset is a concern. For instance, `Vec`
916 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
917 /// `vec.as_ptr().add(vec.len())` is always safe.
918 ///
919 /// Most platforms fundamentally can't even construct such an allocation.
920 /// For instance, no known 64-bit platform can ever serve a request
921 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
922 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
923 /// more than `isize::MAX` bytes with things like Physical Address
924 /// Extension. As such, memory acquired directly from allocators or memory
925 /// mapped files *may* be too large to handle with this function.
926 ///
927 /// Consider using [`wrapping_add`] instead if these constraints are
928 /// difficult to satisfy. The only advantage of this method is that it
929 /// enables more aggressive compiler optimizations.
930 ///
931 /// [`wrapping_add`]: #method.wrapping_add
932 /// [allocated object]: crate::ptr#allocated-object
933 ///
934 /// # Examples
935 ///
936 /// ```
937 /// let s: &str = "123";
938 /// let ptr: *const u8 = s.as_ptr();
939 ///
940 /// unsafe {
941 /// println!("{}", *ptr.add(1) as char);
942 /// println!("{}", *ptr.add(2) as char);
943 /// }
944 /// ```
945 #[stable(feature = "pointer_methods", since = "1.26.0")]
946 #[must_use = "returns a new pointer rather than modifying its argument"]
947 #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
948 #[inline(always)]
949 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
950 pub const unsafe fn add(self, count: usize) -> Self
951 where
952 T: Sized,
953 {
954 // SAFETY: the caller must uphold the safety contract for `offset`.
955 unsafe { intrinsics::offset(self, count) }
956 }
957
958 /// Calculates the offset from a pointer in bytes (convenience for `.byte_offset(count as isize)`).
959 ///
960 /// `count` is in units of bytes.
961 ///
962 /// This is purely a convenience for casting to a `u8` pointer and
963 /// using [add][pointer::add] on it. See that method for documentation
964 /// and safety requirements.
965 ///
966 /// For non-`Sized` pointees this operation changes only the data pointer,
967 /// leaving the metadata untouched.
968 #[must_use]
969 #[inline(always)]
970 #[stable(feature = "pointer_byte_offsets", since = "1.75.0")]
971 #[rustc_const_stable(feature = "const_pointer_byte_offsets", since = "1.75.0")]
972 #[rustc_allow_const_fn_unstable(set_ptr_value)]
973 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
974 pub const unsafe fn byte_add(self, count: usize) -> Self {
975 // SAFETY: the caller must uphold the safety contract for `add`.
976 unsafe { self.cast::<u8>().add(count).with_metadata_of(self) }
977 }
978
979 /// Calculates the offset from a pointer (convenience for
980 /// `.offset((count as isize).wrapping_neg())`).
981 ///
982 /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
983 /// offset of `3 * size_of::<T>()` bytes.
984 ///
985 /// # Safety
986 ///
987 /// If any of the following conditions are violated, the result is Undefined
988 /// Behavior:
989 ///
990 /// * Both the starting and resulting pointer must be either in bounds or one
991 /// byte past the end of the same [allocated object].
992 ///
993 /// * The computed offset cannot exceed `isize::MAX` **bytes**.
994 ///
995 /// * The offset being in bounds cannot rely on "wrapping around" the address
996 /// space. That is, the infinite-precision sum must fit in a usize.
997 ///
998 /// The compiler and standard library generally tries to ensure allocations
999 /// never reach a size where an offset is a concern. For instance, `Vec`
1000 /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
1001 /// `vec.as_ptr().add(vec.len()).sub(vec.len())` is always safe.
1002 ///
1003 /// Most platforms fundamentally can't even construct such an allocation.
1004 /// For instance, no known 64-bit platform can ever serve a request
1005 /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
1006 /// However, some 32-bit and 16-bit platforms may successfully serve a request for
1007 /// more than `isize::MAX` bytes with things like Physical Address
1008 /// Extension. As such, memory acquired directly from allocators or memory
1009 /// mapped files *may* be too large to handle with this function.
1010 ///
1011 /// Consider using [`wrapping_sub`] instead if these constraints are
1012 /// difficult to satisfy. The only advantage of this method is that it
1013 /// enables more aggressive compiler optimizations.
1014 ///
1015 /// [`wrapping_sub`]: #method.wrapping_sub
1016 /// [allocated object]: crate::ptr#allocated-object
1017 ///
1018 /// # Examples
1019 ///
1020 /// ```
1021 /// let s: &str = "123";
1022 ///
1023 /// unsafe {
1024 /// let end: *const u8 = s.as_ptr().add(3);
1025 /// println!("{}", *end.sub(1) as char);
1026 /// println!("{}", *end.sub(2) as char);
1027 /// }
1028 /// ```
1029 #[stable(feature = "pointer_methods", since = "1.26.0")]
1030 #[must_use = "returns a new pointer rather than modifying its argument"]
1031 #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
1032 #[inline(always)]
1033 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1034 pub const unsafe fn sub(self, count: usize) -> Self
1035 where
1036 T: Sized,
1037 {
1038 if T::IS_ZST {
1039 // Pointer arithmetic does nothing when the pointee is a ZST.
1040 self
1041 } else {
1042 // SAFETY: the caller must uphold the safety contract for `offset`.
1043 // Because the pointee is *not* a ZST, that means that `count` is
1044 // at most `isize::MAX`, and thus the negation cannot overflow.
1045 unsafe { self.offset(intrinsics::unchecked_sub(0, count as isize)) }
1046 }
1047 }
1048
1049 /// Calculates the offset from a pointer in bytes (convenience for
1050 /// `.byte_offset((count as isize).wrapping_neg())`).
1051 ///
1052 /// `count` is in units of bytes.
1053 ///
1054 /// This is purely a convenience for casting to a `u8` pointer and
1055 /// using [sub][pointer::sub] on it. See that method for documentation
1056 /// and safety requirements.
1057 ///
1058 /// For non-`Sized` pointees this operation changes only the data pointer,
1059 /// leaving the metadata untouched.
1060 #[must_use]
1061 #[inline(always)]
1062 #[stable(feature = "pointer_byte_offsets", since = "1.75.0")]
1063 #[rustc_const_stable(feature = "const_pointer_byte_offsets", since = "1.75.0")]
1064 #[rustc_allow_const_fn_unstable(set_ptr_value)]
1065 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1066 pub const unsafe fn byte_sub(self, count: usize) -> Self {
1067 // SAFETY: the caller must uphold the safety contract for `sub`.
1068 unsafe { self.cast::<u8>().sub(count).with_metadata_of(self) }
1069 }
1070
1071 /// Calculates the offset from a pointer using wrapping arithmetic.
1072 /// (convenience for `.wrapping_offset(count as isize)`)
1073 ///
1074 /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
1075 /// offset of `3 * size_of::<T>()` bytes.
1076 ///
1077 /// # Safety
1078 ///
1079 /// This operation itself is always safe, but using the resulting pointer is not.
1080 ///
1081 /// The resulting pointer "remembers" the [allocated object] that `self` points to; it must not
1082 /// be used to read or write other allocated objects.
1083 ///
1084 /// In other words, `let z = x.wrapping_add((y as usize) - (x as usize))` does *not* make `z`
1085 /// the same as `y` even if we assume `T` has size `1` and there is no overflow: `z` is still
1086 /// attached to the object `x` is attached to, and dereferencing it is Undefined Behavior unless
1087 /// `x` and `y` point into the same allocated object.
1088 ///
1089 /// Compared to [`add`], this method basically delays the requirement of staying within the
1090 /// same allocated object: [`add`] is immediate Undefined Behavior when crossing object
1091 /// boundaries; `wrapping_add` produces a pointer but still leads to Undefined Behavior if a
1092 /// pointer is dereferenced when it is out-of-bounds of the object it is attached to. [`add`]
1093 /// can be optimized better and is thus preferable in performance-sensitive code.
1094 ///
1095 /// The delayed check only considers the value of the pointer that was dereferenced, not the
1096 /// intermediate values used during the computation of the final result. For example,
1097 /// `x.wrapping_add(o).wrapping_sub(o)` is always the same as `x`. In other words, leaving the
1098 /// allocated object and then re-entering it later is permitted.
1099 ///
1100 /// [`add`]: #method.add
1101 /// [allocated object]: crate::ptr#allocated-object
1102 ///
1103 /// # Examples
1104 ///
1105 /// ```
1106 /// // Iterate using a raw pointer in increments of two elements
1107 /// let data = [1u8, 2, 3, 4, 5];
1108 /// let mut ptr: *const u8 = data.as_ptr();
1109 /// let step = 2;
1110 /// let end_rounded_up = ptr.wrapping_add(6);
1111 ///
1112 /// // This loop prints "1, 3, 5, "
1113 /// while ptr != end_rounded_up {
1114 /// unsafe {
1115 /// print!("{}, ", *ptr);
1116 /// }
1117 /// ptr = ptr.wrapping_add(step);
1118 /// }
1119 /// ```
1120 #[stable(feature = "pointer_methods", since = "1.26.0")]
1121 #[must_use = "returns a new pointer rather than modifying its argument"]
1122 #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
1123 #[inline(always)]
1124 pub const fn wrapping_add(self, count: usize) -> Self
1125 where
1126 T: Sized,
1127 {
1128 self.wrapping_offset(count as isize)
1129 }
1130
1131 /// Calculates the offset from a pointer in bytes using wrapping arithmetic.
1132 /// (convenience for `.wrapping_byte_offset(count as isize)`)
1133 ///
1134 /// `count` is in units of bytes.
1135 ///
1136 /// This is purely a convenience for casting to a `u8` pointer and
1137 /// using [wrapping_add][pointer::wrapping_add] on it. See that method for documentation.
1138 ///
1139 /// For non-`Sized` pointees this operation changes only the data pointer,
1140 /// leaving the metadata untouched.
1141 #[must_use]
1142 #[inline(always)]
1143 #[stable(feature = "pointer_byte_offsets", since = "1.75.0")]
1144 #[rustc_const_stable(feature = "const_pointer_byte_offsets", since = "1.75.0")]
1145 #[rustc_allow_const_fn_unstable(set_ptr_value)]
1146 pub const fn wrapping_byte_add(self, count: usize) -> Self {
1147 self.cast::<u8>().wrapping_add(count).with_metadata_of(self)
1148 }
1149
1150 /// Calculates the offset from a pointer using wrapping arithmetic.
1151 /// (convenience for `.wrapping_offset((count as isize).wrapping_neg())`)
1152 ///
1153 /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
1154 /// offset of `3 * size_of::<T>()` bytes.
1155 ///
1156 /// # Safety
1157 ///
1158 /// This operation itself is always safe, but using the resulting pointer is not.
1159 ///
1160 /// The resulting pointer "remembers" the [allocated object] that `self` points to; it must not
1161 /// be used to read or write other allocated objects.
1162 ///
1163 /// In other words, `let z = x.wrapping_sub((x as usize) - (y as usize))` does *not* make `z`
1164 /// the same as `y` even if we assume `T` has size `1` and there is no overflow: `z` is still
1165 /// attached to the object `x` is attached to, and dereferencing it is Undefined Behavior unless
1166 /// `x` and `y` point into the same allocated object.
1167 ///
1168 /// Compared to [`sub`], this method basically delays the requirement of staying within the
1169 /// same allocated object: [`sub`] is immediate Undefined Behavior when crossing object
1170 /// boundaries; `wrapping_sub` produces a pointer but still leads to Undefined Behavior if a
1171 /// pointer is dereferenced when it is out-of-bounds of the object it is attached to. [`sub`]
1172 /// can be optimized better and is thus preferable in performance-sensitive code.
1173 ///
1174 /// The delayed check only considers the value of the pointer that was dereferenced, not the
1175 /// intermediate values used during the computation of the final result. For example,
1176 /// `x.wrapping_add(o).wrapping_sub(o)` is always the same as `x`. In other words, leaving the
1177 /// allocated object and then re-entering it later is permitted.
1178 ///
1179 /// [`sub`]: #method.sub
1180 /// [allocated object]: crate::ptr#allocated-object
1181 ///
1182 /// # Examples
1183 ///
1184 /// ```
1185 /// // Iterate using a raw pointer in increments of two elements (backwards)
1186 /// let data = [1u8, 2, 3, 4, 5];
1187 /// let mut ptr: *const u8 = data.as_ptr();
1188 /// let start_rounded_down = ptr.wrapping_sub(2);
1189 /// ptr = ptr.wrapping_add(4);
1190 /// let step = 2;
1191 /// // This loop prints "5, 3, 1, "
1192 /// while ptr != start_rounded_down {
1193 /// unsafe {
1194 /// print!("{}, ", *ptr);
1195 /// }
1196 /// ptr = ptr.wrapping_sub(step);
1197 /// }
1198 /// ```
1199 #[stable(feature = "pointer_methods", since = "1.26.0")]
1200 #[must_use = "returns a new pointer rather than modifying its argument"]
1201 #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
1202 #[inline(always)]
1203 pub const fn wrapping_sub(self, count: usize) -> Self
1204 where
1205 T: Sized,
1206 {
1207 self.wrapping_offset((count as isize).wrapping_neg())
1208 }
1209
1210 /// Calculates the offset from a pointer in bytes using wrapping arithmetic.
1211 /// (convenience for `.wrapping_offset((count as isize).wrapping_neg())`)
1212 ///
1213 /// `count` is in units of bytes.
1214 ///
1215 /// This is purely a convenience for casting to a `u8` pointer and
1216 /// using [wrapping_sub][pointer::wrapping_sub] on it. See that method for documentation.
1217 ///
1218 /// For non-`Sized` pointees this operation changes only the data pointer,
1219 /// leaving the metadata untouched.
1220 #[must_use]
1221 #[inline(always)]
1222 #[stable(feature = "pointer_byte_offsets", since = "1.75.0")]
1223 #[rustc_const_stable(feature = "const_pointer_byte_offsets", since = "1.75.0")]
1224 #[rustc_allow_const_fn_unstable(set_ptr_value)]
1225 pub const fn wrapping_byte_sub(self, count: usize) -> Self {
1226 self.cast::<u8>().wrapping_sub(count).with_metadata_of(self)
1227 }
1228
1229 /// Reads the value from `self` without moving it. This leaves the
1230 /// memory in `self` unchanged.
1231 ///
1232 /// See [`ptr::read`] for safety concerns and examples.
1233 ///
1234 /// [`ptr::read`]: crate::ptr::read()
1235 #[stable(feature = "pointer_methods", since = "1.26.0")]
1236 #[rustc_const_stable(feature = "const_ptr_read", since = "1.71.0")]
1237 #[inline]
1238 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1239 pub const unsafe fn read(self) -> T
1240 where
1241 T: Sized,
1242 {
1243 // SAFETY: the caller must uphold the safety contract for `read`.
1244 unsafe { read(self) }
1245 }
1246
1247 /// Performs a volatile read of the value from `self` without moving it. This
1248 /// leaves the memory in `self` unchanged.
1249 ///
1250 /// Volatile operations are intended to act on I/O memory, and are guaranteed
1251 /// to not be elided or reordered by the compiler across other volatile
1252 /// operations.
1253 ///
1254 /// See [`ptr::read_volatile`] for safety concerns and examples.
1255 ///
1256 /// [`ptr::read_volatile`]: crate::ptr::read_volatile()
1257 #[stable(feature = "pointer_methods", since = "1.26.0")]
1258 #[inline]
1259 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1260 pub unsafe fn read_volatile(self) -> T
1261 where
1262 T: Sized,
1263 {
1264 // SAFETY: the caller must uphold the safety contract for `read_volatile`.
1265 unsafe { read_volatile(self) }
1266 }
1267
1268 /// Reads the value from `self` without moving it. This leaves the
1269 /// memory in `self` unchanged.
1270 ///
1271 /// Unlike `read`, the pointer may be unaligned.
1272 ///
1273 /// See [`ptr::read_unaligned`] for safety concerns and examples.
1274 ///
1275 /// [`ptr::read_unaligned`]: crate::ptr::read_unaligned()
1276 #[stable(feature = "pointer_methods", since = "1.26.0")]
1277 #[rustc_const_stable(feature = "const_ptr_read", since = "1.71.0")]
1278 #[inline]
1279 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1280 pub const unsafe fn read_unaligned(self) -> T
1281 where
1282 T: Sized,
1283 {
1284 // SAFETY: the caller must uphold the safety contract for `read_unaligned`.
1285 unsafe { read_unaligned(self) }
1286 }
1287
1288 /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
1289 /// and destination may overlap.
1290 ///
1291 /// NOTE: this has the *same* argument order as [`ptr::copy`].
1292 ///
1293 /// See [`ptr::copy`] for safety concerns and examples.
1294 ///
1295 /// [`ptr::copy`]: crate::ptr::copy()
1296 #[rustc_const_unstable(feature = "const_intrinsic_copy", issue = "80697")]
1297 #[stable(feature = "pointer_methods", since = "1.26.0")]
1298 #[inline]
1299 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1300 pub const unsafe fn copy_to(self, dest: *mut T, count: usize)
1301 where
1302 T: Sized,
1303 {
1304 // SAFETY: the caller must uphold the safety contract for `copy`.
1305 unsafe { copy(self, dest, count) }
1306 }
1307
1308 /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
1309 /// and destination may *not* overlap.
1310 ///
1311 /// NOTE: this has the *same* argument order as [`ptr::copy_nonoverlapping`].
1312 ///
1313 /// See [`ptr::copy_nonoverlapping`] for safety concerns and examples.
1314 ///
1315 /// [`ptr::copy_nonoverlapping`]: crate::ptr::copy_nonoverlapping()
1316 #[rustc_const_unstable(feature = "const_intrinsic_copy", issue = "80697")]
1317 #[stable(feature = "pointer_methods", since = "1.26.0")]
1318 #[inline]
1319 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1320 pub const unsafe fn copy_to_nonoverlapping(self, dest: *mut T, count: usize)
1321 where
1322 T: Sized,
1323 {
1324 // SAFETY: the caller must uphold the safety contract for `copy_nonoverlapping`.
1325 unsafe { copy_nonoverlapping(self, dest, count) }
1326 }
1327
1328 /// Computes the offset that needs to be applied to the pointer in order to make it aligned to
1329 /// `align`.
1330 ///
1331 /// If it is not possible to align the pointer, the implementation returns
1332 /// `usize::MAX`.
1333 ///
1334 /// The offset is expressed in number of `T` elements, and not bytes. The value returned can be
1335 /// used with the `wrapping_add` method.
1336 ///
1337 /// There are no guarantees whatsoever that offsetting the pointer will not overflow or go
1338 /// beyond the allocation that the pointer points into. It is up to the caller to ensure that
1339 /// the returned offset is correct in all terms other than alignment.
1340 ///
1341 /// When this is called during compile-time evaluation (which is unstable), the implementation
1342 /// may return `usize::MAX` in cases where that can never happen at runtime. This is because the
1343 /// actual alignment of pointers is not known yet during compile-time, so an offset with
1344 /// guaranteed alignment can sometimes not be computed. For example, a buffer declared as `[u8;
1345 /// N]` might be allocated at an odd or an even address, but at compile-time this is not yet
1346 /// known, so the execution has to be correct for either choice. It is therefore impossible to
1347 /// find an offset that is guaranteed to be 2-aligned. (This behavior is subject to change, as usual
1348 /// for unstable APIs.)
1349 ///
1350 /// # Panics
1351 ///
1352 /// The function panics if `align` is not a power-of-two.
1353 ///
1354 /// # Examples
1355 ///
1356 /// Accessing adjacent `u8` as `u16`
1357 ///
1358 /// ```
1359 /// use std::mem::align_of;
1360 ///
1361 /// # unsafe {
1362 /// let x = [5_u8, 6, 7, 8, 9];
1363 /// let ptr = x.as_ptr();
1364 /// let offset = ptr.align_offset(align_of::<u16>());
1365 ///
1366 /// if offset < x.len() - 1 {
1367 /// let u16_ptr = ptr.add(offset).cast::<u16>();
1368 /// assert!(*u16_ptr == u16::from_ne_bytes([5, 6]) || *u16_ptr == u16::from_ne_bytes([6, 7]));
1369 /// } else {
1370 /// // while the pointer can be aligned via `offset`, it would point
1371 /// // outside the allocation
1372 /// }
1373 /// # }
1374 /// ```
1375 #[must_use]
1376 #[inline]
1377 #[stable(feature = "align_offset", since = "1.36.0")]
1378 #[rustc_const_unstable(feature = "const_align_offset", issue = "90962")]
1379 pub const fn align_offset(self, align: usize) -> usize
1380 where
1381 T: Sized,
1382 {
1383 if !align.is_power_of_two() {
1384 panic!("align_offset: align is not a power-of-two");
1385 }
1386
1387 // SAFETY: `align` has been checked to be a power of 2 above
1388 let ret = unsafe { align_offset(self, align) };
1389
1390 // Inform Miri that we want to consider the resulting pointer to be suitably aligned.
1391 #[cfg(miri)]
1392 if ret != usize::MAX {
1393 intrinsics::miri_promise_symbolic_alignment(self.wrapping_add(ret).cast(), align);
1394 }
1395
1396 ret
1397 }
1398
1399 /// Returns whether the pointer is properly aligned for `T`.
1400 ///
1401 /// # Examples
1402 ///
1403 /// ```
1404 /// // On some platforms, the alignment of i32 is less than 4.
1405 /// #[repr(align(4))]
1406 /// struct AlignedI32(i32);
1407 ///
1408 /// let data = AlignedI32(42);
1409 /// let ptr = &data as *const AlignedI32;
1410 ///
1411 /// assert!(ptr.is_aligned());
1412 /// assert!(!ptr.wrapping_byte_add(1).is_aligned());
1413 /// ```
1414 ///
1415 /// # At compiletime
1416 /// **Note: Alignment at compiletime is experimental and subject to change. See the
1417 /// [tracking issue] for details.**
1418 ///
1419 /// At compiletime, the compiler may not know where a value will end up in memory.
1420 /// Calling this function on a pointer created from a reference at compiletime will only
1421 /// return `true` if the pointer is guaranteed to be aligned. This means that the pointer
1422 /// is never aligned if cast to a type with a stricter alignment than the reference's
1423 /// underlying allocation.
1424 ///
1425 /// ```
1426 /// #![feature(const_pointer_is_aligned)]
1427 ///
1428 /// // On some platforms, the alignment of primitives is less than their size.
1429 /// #[repr(align(4))]
1430 /// struct AlignedI32(i32);
1431 /// #[repr(align(8))]
1432 /// struct AlignedI64(i64);
1433 ///
1434 /// const _: () = {
1435 /// let data = AlignedI32(42);
1436 /// let ptr = &data as *const AlignedI32;
1437 /// assert!(ptr.is_aligned());
1438 ///
1439 /// // At runtime either `ptr1` or `ptr2` would be aligned, but at compiletime neither is aligned.
1440 /// let ptr1 = ptr.cast::<AlignedI64>();
1441 /// let ptr2 = ptr.wrapping_add(1).cast::<AlignedI64>();
1442 /// assert!(!ptr1.is_aligned());
1443 /// assert!(!ptr2.is_aligned());
1444 /// };
1445 /// ```
1446 ///
1447 /// Due to this behavior, it is possible that a runtime pointer derived from a compiletime
1448 /// pointer is aligned, even if the compiletime pointer wasn't aligned.
1449 ///
1450 /// ```
1451 /// #![feature(const_pointer_is_aligned)]
1452 ///
1453 /// // On some platforms, the alignment of primitives is less than their size.
1454 /// #[repr(align(4))]
1455 /// struct AlignedI32(i32);
1456 /// #[repr(align(8))]
1457 /// struct AlignedI64(i64);
1458 ///
1459 /// // At compiletime, neither `COMPTIME_PTR` nor `COMPTIME_PTR + 1` is aligned.
1460 /// const COMPTIME_PTR: *const AlignedI32 = &AlignedI32(42);
1461 /// const _: () = assert!(!COMPTIME_PTR.cast::<AlignedI64>().is_aligned());
1462 /// const _: () = assert!(!COMPTIME_PTR.wrapping_add(1).cast::<AlignedI64>().is_aligned());
1463 ///
1464 /// // At runtime, either `runtime_ptr` or `runtime_ptr + 1` is aligned.
1465 /// let runtime_ptr = COMPTIME_PTR;
1466 /// assert_ne!(
1467 /// runtime_ptr.cast::<AlignedI64>().is_aligned(),
1468 /// runtime_ptr.wrapping_add(1).cast::<AlignedI64>().is_aligned(),
1469 /// );
1470 /// ```
1471 ///
1472 /// If a pointer is created from a fixed address, this function behaves the same during
1473 /// runtime and compiletime.
1474 ///
1475 /// ```
1476 /// #![feature(const_pointer_is_aligned)]
1477 ///
1478 /// // On some platforms, the alignment of primitives is less than their size.
1479 /// #[repr(align(4))]
1480 /// struct AlignedI32(i32);
1481 /// #[repr(align(8))]
1482 /// struct AlignedI64(i64);
1483 ///
1484 /// const _: () = {
1485 /// let ptr = 40 as *const AlignedI32;
1486 /// assert!(ptr.is_aligned());
1487 ///
1488 /// // For pointers with a known address, runtime and compiletime behavior are identical.
1489 /// let ptr1 = ptr.cast::<AlignedI64>();
1490 /// let ptr2 = ptr.wrapping_add(1).cast::<AlignedI64>();
1491 /// assert!(ptr1.is_aligned());
1492 /// assert!(!ptr2.is_aligned());
1493 /// };
1494 /// ```
1495 ///
1496 /// [tracking issue]: https://github.com/rust-lang/rust/issues/104203
1497 #[must_use]
1498 #[inline]
1499 #[stable(feature = "pointer_is_aligned", since = "CURRENT_RUSTC_VERSION")]
1500 #[rustc_const_unstable(feature = "const_pointer_is_aligned", issue = "104203")]
1501 pub const fn is_aligned(self) -> bool
1502 where
1503 T: Sized,
1504 {
1505 self.is_aligned_to(mem::align_of::<T>())
1506 }
1507
1508 /// Returns whether the pointer is aligned to `align`.
1509 ///
1510 /// For non-`Sized` pointees this operation considers only the data pointer,
1511 /// ignoring the metadata.
1512 ///
1513 /// # Panics
1514 ///
1515 /// The function panics if `align` is not a power-of-two (this includes 0).
1516 ///
1517 /// # Examples
1518 ///
1519 /// ```
1520 /// #![feature(pointer_is_aligned_to)]
1521 ///
1522 /// // On some platforms, the alignment of i32 is less than 4.
1523 /// #[repr(align(4))]
1524 /// struct AlignedI32(i32);
1525 ///
1526 /// let data = AlignedI32(42);
1527 /// let ptr = &data as *const AlignedI32;
1528 ///
1529 /// assert!(ptr.is_aligned_to(1));
1530 /// assert!(ptr.is_aligned_to(2));
1531 /// assert!(ptr.is_aligned_to(4));
1532 ///
1533 /// assert!(ptr.wrapping_byte_add(2).is_aligned_to(2));
1534 /// assert!(!ptr.wrapping_byte_add(2).is_aligned_to(4));
1535 ///
1536 /// assert_ne!(ptr.is_aligned_to(8), ptr.wrapping_add(1).is_aligned_to(8));
1537 /// ```
1538 ///
1539 /// # At compiletime
1540 /// **Note: Alignment at compiletime is experimental and subject to change. See the
1541 /// [tracking issue] for details.**
1542 ///
1543 /// At compiletime, the compiler may not know where a value will end up in memory.
1544 /// Calling this function on a pointer created from a reference at compiletime will only
1545 /// return `true` if the pointer is guaranteed to be aligned. This means that the pointer
1546 /// cannot be stricter aligned than the reference's underlying allocation.
1547 ///
1548 /// ```
1549 /// #![feature(pointer_is_aligned_to)]
1550 /// #![feature(const_pointer_is_aligned)]
1551 ///
1552 /// // On some platforms, the alignment of i32 is less than 4.
1553 /// #[repr(align(4))]
1554 /// struct AlignedI32(i32);
1555 ///
1556 /// const _: () = {
1557 /// let data = AlignedI32(42);
1558 /// let ptr = &data as *const AlignedI32;
1559 ///
1560 /// assert!(ptr.is_aligned_to(1));
1561 /// assert!(ptr.is_aligned_to(2));
1562 /// assert!(ptr.is_aligned_to(4));
1563 ///
1564 /// // At compiletime, we know for sure that the pointer isn't aligned to 8.
1565 /// assert!(!ptr.is_aligned_to(8));
1566 /// assert!(!ptr.wrapping_add(1).is_aligned_to(8));
1567 /// };
1568 /// ```
1569 ///
1570 /// Due to this behavior, it is possible that a runtime pointer derived from a compiletime
1571 /// pointer is aligned, even if the compiletime pointer wasn't aligned.
1572 ///
1573 /// ```
1574 /// #![feature(pointer_is_aligned_to)]
1575 /// #![feature(const_pointer_is_aligned)]
1576 ///
1577 /// // On some platforms, the alignment of i32 is less than 4.
1578 /// #[repr(align(4))]
1579 /// struct AlignedI32(i32);
1580 ///
1581 /// // At compiletime, neither `COMPTIME_PTR` nor `COMPTIME_PTR + 1` is aligned.
1582 /// const COMPTIME_PTR: *const AlignedI32 = &AlignedI32(42);
1583 /// const _: () = assert!(!COMPTIME_PTR.is_aligned_to(8));
1584 /// const _: () = assert!(!COMPTIME_PTR.wrapping_add(1).is_aligned_to(8));
1585 ///
1586 /// // At runtime, either `runtime_ptr` or `runtime_ptr + 1` is aligned.
1587 /// let runtime_ptr = COMPTIME_PTR;
1588 /// assert_ne!(
1589 /// runtime_ptr.is_aligned_to(8),
1590 /// runtime_ptr.wrapping_add(1).is_aligned_to(8),
1591 /// );
1592 /// ```
1593 ///
1594 /// If a pointer is created from a fixed address, this function behaves the same during
1595 /// runtime and compiletime.
1596 ///
1597 /// ```
1598 /// #![feature(pointer_is_aligned_to)]
1599 /// #![feature(const_pointer_is_aligned)]
1600 ///
1601 /// const _: () = {
1602 /// let ptr = 40 as *const u8;
1603 /// assert!(ptr.is_aligned_to(1));
1604 /// assert!(ptr.is_aligned_to(2));
1605 /// assert!(ptr.is_aligned_to(4));
1606 /// assert!(ptr.is_aligned_to(8));
1607 /// assert!(!ptr.is_aligned_to(16));
1608 /// };
1609 /// ```
1610 ///
1611 /// [tracking issue]: https://github.com/rust-lang/rust/issues/104203
1612 #[must_use]
1613 #[inline]
1614 #[unstable(feature = "pointer_is_aligned_to", issue = "96284")]
1615 #[rustc_const_unstable(feature = "const_pointer_is_aligned", issue = "104203")]
1616 pub const fn is_aligned_to(self, align: usize) -> bool {
1617 if !align.is_power_of_two() {
1618 panic!("is_aligned_to: align is not a power-of-two");
1619 }
1620
1621 #[inline]
1622 fn runtime_impl(ptr: *const (), align: usize) -> bool {
1623 ptr.addr() & (align - 1) == 0
1624 }
1625
1626 #[inline]
1627 const fn const_impl(ptr: *const (), align: usize) -> bool {
1628 // We can't use the address of `self` in a `const fn`, so we use `align_offset` instead.
1629 ptr.align_offset(align) == 0
1630 }
1631
1632 // The cast to `()` is used to
1633 // 1. deal with fat pointers; and
1634 // 2. ensure that `align_offset` (in `const_impl`) doesn't actually try to compute an offset.
1635 const_eval_select((self.cast::<()>(), align), const_impl, runtime_impl)
1636 }
1637}
1638
1639impl<T> *const [T] {
1640 /// Returns the length of a raw slice.
1641 ///
1642 /// The returned value is the number of **elements**, not the number of bytes.
1643 ///
1644 /// This function is safe, even when the raw slice cannot be cast to a slice
1645 /// reference because the pointer is null or unaligned.
1646 ///
1647 /// # Examples
1648 ///
1649 /// ```rust
1650 /// use std::ptr;
1651 ///
1652 /// let slice: *const [i8] = ptr::slice_from_raw_parts(ptr::null(), 3);
1653 /// assert_eq!(slice.len(), 3);
1654 /// ```
1655 #[inline]
1656 #[stable(feature = "slice_ptr_len", since = "CURRENT_RUSTC_VERSION")]
1657 #[rustc_const_stable(feature = "const_slice_ptr_len", since = "CURRENT_RUSTC_VERSION")]
1658 #[rustc_allow_const_fn_unstable(ptr_metadata)]
1659 pub const fn len(self) -> usize {
1660 metadata(self)
1661 }
1662
1663 /// Returns `true` if the raw slice has a length of 0.
1664 ///
1665 /// # Examples
1666 ///
1667 /// ```
1668 /// use std::ptr;
1669 ///
1670 /// let slice: *const [i8] = ptr::slice_from_raw_parts(ptr::null(), 3);
1671 /// assert!(!slice.is_empty());
1672 /// ```
1673 #[inline(always)]
1674 #[stable(feature = "slice_ptr_len", since = "CURRENT_RUSTC_VERSION")]
1675 #[rustc_const_stable(feature = "const_slice_ptr_len", since = "CURRENT_RUSTC_VERSION")]
1676 pub const fn is_empty(self) -> bool {
1677 self.len() == 0
1678 }
1679
1680 /// Returns a raw pointer to the slice's buffer.
1681 ///
1682 /// This is equivalent to casting `self` to `*const T`, but more type-safe.
1683 ///
1684 /// # Examples
1685 ///
1686 /// ```rust
1687 /// #![feature(slice_ptr_get)]
1688 /// use std::ptr;
1689 ///
1690 /// let slice: *const [i8] = ptr::slice_from_raw_parts(ptr::null(), 3);
1691 /// assert_eq!(slice.as_ptr(), ptr::null());
1692 /// ```
1693 #[inline]
1694 #[unstable(feature = "slice_ptr_get", issue = "74265")]
1695 #[rustc_const_unstable(feature = "slice_ptr_get", issue = "74265")]
1696 pub const fn as_ptr(self) -> *const T {
1697 self as *const T
1698 }
1699
1700 /// Returns a raw pointer to an element or subslice, without doing bounds
1701 /// checking.
1702 ///
1703 /// Calling this method with an out-of-bounds index or when `self` is not dereferenceable
1704 /// is *[undefined behavior]* even if the resulting pointer is not used.
1705 ///
1706 /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
1707 ///
1708 /// # Examples
1709 ///
1710 /// ```
1711 /// #![feature(slice_ptr_get)]
1712 ///
1713 /// let x = &[1, 2, 4] as *const [i32];
1714 ///
1715 /// unsafe {
1716 /// assert_eq!(x.get_unchecked(1), x.as_ptr().add(1));
1717 /// }
1718 /// ```
1719 #[unstable(feature = "slice_ptr_get", issue = "74265")]
1720 #[inline]
1721 pub unsafe fn get_unchecked<I>(self, index: I) -> *const I::Output
1722 where
1723 I: SliceIndex<[T]>,
1724 {
1725 // SAFETY: the caller ensures that `self` is dereferenceable and `index` in-bounds.
1726 unsafe { index.get_unchecked(self) }
1727 }
1728
1729 /// Returns `None` if the pointer is null, or else returns a shared slice to
1730 /// the value wrapped in `Some`. In contrast to [`as_ref`], this does not require
1731 /// that the value has to be initialized.
1732 ///
1733 /// [`as_ref`]: #method.as_ref
1734 ///
1735 /// # Safety
1736 ///
1737 /// When calling this method, you have to ensure that *either* the pointer is null *or*
1738 /// all of the following is true:
1739 ///
1740 /// * The pointer must be [valid] for reads for `ptr.len() * mem::size_of::<T>()` many bytes,
1741 /// and it must be properly aligned. This means in particular:
1742 ///
1743 /// * The entire memory range of this slice must be contained within a single [allocated object]!
1744 /// Slices can never span across multiple allocated objects.
1745 ///
1746 /// * The pointer must be aligned even for zero-length slices. One
1747 /// reason for this is that enum layout optimizations may rely on references
1748 /// (including slices of any length) being aligned and non-null to distinguish
1749 /// them from other data. You can obtain a pointer that is usable as `data`
1750 /// for zero-length slices using [`NonNull::dangling()`].
1751 ///
1752 /// * The total size `ptr.len() * mem::size_of::<T>()` of the slice must be no larger than `isize::MAX`.
1753 /// See the safety documentation of [`pointer::offset`].
1754 ///
1755 /// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is
1756 /// arbitrarily chosen and does not necessarily reflect the actual lifetime of the data.
1757 /// In particular, while this reference exists, the memory the pointer points to must
1758 /// not get mutated (except inside `UnsafeCell`).
1759 ///
1760 /// This applies even if the result of this method is unused!
1761 ///
1762 /// See also [`slice::from_raw_parts`][].
1763 ///
1764 /// [valid]: crate::ptr#safety
1765 /// [allocated object]: crate::ptr#allocated-object
1766 #[inline]
1767 #[unstable(feature = "ptr_as_uninit", issue = "75402")]
1768 #[rustc_const_unstable(feature = "const_ptr_as_ref", issue = "91822")]
1769 pub const unsafe fn as_uninit_slice<'a>(self) -> Option<&'a [MaybeUninit<T>]> {
1770 if self.is_null() {
1771 None
1772 } else {
1773 // SAFETY: the caller must uphold the safety contract for `as_uninit_slice`.
1774 Some(unsafe { slice::from_raw_parts(self as *const MaybeUninit<T>, self.len()) })
1775 }
1776 }
1777}
1778
1779impl<T, const N: usize> *const [T; N] {
1780 /// Returns a raw pointer to the array's buffer.
1781 ///
1782 /// This is equivalent to casting `self` to `*const T`, but more type-safe.
1783 ///
1784 /// # Examples
1785 ///
1786 /// ```rust
1787 /// #![feature(array_ptr_get)]
1788 /// use std::ptr;
1789 ///
1790 /// let arr: *const [i8; 3] = ptr::null();
1791 /// assert_eq!(arr.as_ptr(), ptr::null());
1792 /// ```
1793 #[inline]
1794 #[unstable(feature = "array_ptr_get", issue = "119834")]
1795 #[rustc_const_unstable(feature = "array_ptr_get", issue = "119834")]
1796 pub const fn as_ptr(self) -> *const T {
1797 self as *const T
1798 }
1799
1800 /// Returns a raw pointer to a slice containing the entire array.
1801 ///
1802 /// # Examples
1803 ///
1804 /// ```
1805 /// #![feature(array_ptr_get)]
1806 ///
1807 /// let arr: *const [i32; 3] = &[1, 2, 4] as *const [i32; 3];
1808 /// let slice: *const [i32] = arr.as_slice();
1809 /// assert_eq!(slice.len(), 3);
1810 /// ```
1811 #[inline]
1812 #[unstable(feature = "array_ptr_get", issue = "119834")]
1813 #[rustc_const_unstable(feature = "array_ptr_get", issue = "119834")]
1814 pub const fn as_slice(self) -> *const [T] {
1815 self
1816 }
1817}
1818
1819// Equality for pointers
1820#[stable(feature = "rust1", since = "1.0.0")]
1821impl<T: ?Sized> PartialEq for *const T {
1822 #[inline]
1823 #[allow(ambiguous_wide_pointer_comparisons)]
1824 fn eq(&self, other: &*const T) -> bool {
1825 *self == *other
1826 }
1827}
1828
1829#[stable(feature = "rust1", since = "1.0.0")]
1830impl<T: ?Sized> Eq for *const T {}
1831
1832// Comparison for pointers
1833#[stable(feature = "rust1", since = "1.0.0")]
1834impl<T: ?Sized> Ord for *const T {
1835 #[inline]
1836 #[allow(ambiguous_wide_pointer_comparisons)]
1837 fn cmp(&self, other: &*const T) -> Ordering {
1838 if self < other {
1839 Less
1840 } else if self == other {
1841 Equal
1842 } else {
1843 Greater
1844 }
1845 }
1846}
1847
1848#[stable(feature = "rust1", since = "1.0.0")]
1849impl<T: ?Sized> PartialOrd for *const T {
1850 #[inline]
1851 #[allow(ambiguous_wide_pointer_comparisons)]
1852 fn partial_cmp(&self, other: &*const T) -> Option<Ordering> {
1853 Some(self.cmp(other))
1854 }
1855
1856 #[inline]
1857 #[allow(ambiguous_wide_pointer_comparisons)]
1858 fn lt(&self, other: &*const T) -> bool {
1859 *self < *other
1860 }
1861
1862 #[inline]
1863 #[allow(ambiguous_wide_pointer_comparisons)]
1864 fn le(&self, other: &*const T) -> bool {
1865 *self <= *other
1866 }
1867
1868 #[inline]
1869 #[allow(ambiguous_wide_pointer_comparisons)]
1870 fn gt(&self, other: &*const T) -> bool {
1871 *self > *other
1872 }
1873
1874 #[inline]
1875 #[allow(ambiguous_wide_pointer_comparisons)]
1876 fn ge(&self, other: &*const T) -> bool {
1877 *self >= *other
1878 }
1879}
1880