1 | //! Types and traits associated with masking elements of vectors. |
2 | //! Types representing |
3 | #![allow (non_camel_case_types)] |
4 | |
5 | #[cfg_attr ( |
6 | not(all(target_arch = "x86_64" , target_feature = "avx512f" )), |
7 | path = "masks/full_masks.rs" |
8 | )] |
9 | #[cfg_attr ( |
10 | all(target_arch = "x86_64" , target_feature = "avx512f" ), |
11 | path = "masks/bitmask.rs" |
12 | )] |
13 | mod mask_impl; |
14 | |
15 | use crate::simd::{LaneCount, Simd, SimdCast, SimdElement, SupportedLaneCount}; |
16 | use core::cmp::Ordering; |
17 | use core::{fmt, mem}; |
18 | |
19 | mod sealed { |
20 | use super::*; |
21 | |
22 | /// Not only does this seal the `MaskElement` trait, but these functions prevent other traits |
23 | /// from bleeding into the parent bounds. |
24 | /// |
25 | /// For example, `eq` could be provided by requiring `MaskElement: PartialEq`, but that would |
26 | /// prevent us from ever removing that bound, or from implementing `MaskElement` on |
27 | /// non-`PartialEq` types in the future. |
28 | pub trait Sealed { |
29 | fn valid<const N: usize>(values: Simd<Self, N>) -> bool |
30 | where |
31 | LaneCount<N>: SupportedLaneCount, |
32 | Self: SimdElement; |
33 | |
34 | fn eq(self, other: Self) -> bool; |
35 | |
36 | fn to_usize(self) -> usize; |
37 | fn max_unsigned() -> u64; |
38 | |
39 | type Unsigned: SimdElement; |
40 | |
41 | const TRUE: Self; |
42 | |
43 | const FALSE: Self; |
44 | } |
45 | } |
46 | use sealed::Sealed; |
47 | |
48 | /// Marker trait for types that may be used as SIMD mask elements. |
49 | /// |
50 | /// # Safety |
51 | /// Type must be a signed integer. |
52 | pub unsafe trait MaskElement: SimdElement<Mask = Self> + SimdCast + Sealed {} |
53 | |
54 | macro_rules! impl_element { |
55 | { $ty:ty, $unsigned:ty } => { |
56 | impl Sealed for $ty { |
57 | #[inline] |
58 | fn valid<const N: usize>(value: Simd<Self, N>) -> bool |
59 | where |
60 | LaneCount<N>: SupportedLaneCount, |
61 | { |
62 | // We can't use `Simd` directly, because `Simd`'s functions call this function and |
63 | // we will end up with an infinite loop. |
64 | // Safety: `value` is an integer vector |
65 | unsafe { |
66 | use core::intrinsics::simd; |
67 | let falses: Simd<Self, N> = simd::simd_eq(value, Simd::splat(0 as _)); |
68 | let trues: Simd<Self, N> = simd::simd_eq(value, Simd::splat(-1 as _)); |
69 | let valid: Simd<Self, N> = simd::simd_or(falses, trues); |
70 | simd::simd_reduce_all(valid) |
71 | } |
72 | } |
73 | |
74 | #[inline] |
75 | fn eq(self, other: Self) -> bool { self == other } |
76 | |
77 | #[inline] |
78 | fn to_usize(self) -> usize { |
79 | self as usize |
80 | } |
81 | |
82 | #[inline] |
83 | fn max_unsigned() -> u64 { |
84 | <$unsigned>::MAX as u64 |
85 | } |
86 | |
87 | type Unsigned = $unsigned; |
88 | |
89 | const TRUE: Self = -1; |
90 | const FALSE: Self = 0; |
91 | } |
92 | |
93 | // Safety: this is a valid mask element type |
94 | unsafe impl MaskElement for $ty {} |
95 | } |
96 | } |
97 | |
98 | impl_element! { i8, u8 } |
99 | impl_element! { i16, u16 } |
100 | impl_element! { i32, u32 } |
101 | impl_element! { i64, u64 } |
102 | impl_element! { isize, usize } |
103 | |
104 | /// A SIMD vector mask for `N` elements of width specified by `Element`. |
105 | /// |
106 | /// Masks represent boolean inclusion/exclusion on a per-element basis. |
107 | /// |
108 | /// The layout of this type is unspecified, and may change between platforms |
109 | /// and/or Rust versions, and code should not assume that it is equivalent to |
110 | /// `[T; N]`. |
111 | #[repr (transparent)] |
112 | pub struct Mask<T, const N: usize>(mask_impl::Mask<T, N>) |
113 | where |
114 | T: MaskElement, |
115 | LaneCount<N>: SupportedLaneCount; |
116 | |
117 | impl<T, const N: usize> Copy for Mask<T, N> |
118 | where |
119 | T: MaskElement, |
120 | LaneCount<N>: SupportedLaneCount, |
121 | { |
122 | } |
123 | |
124 | impl<T, const N: usize> Clone for Mask<T, N> |
125 | where |
126 | T: MaskElement, |
127 | LaneCount<N>: SupportedLaneCount, |
128 | { |
129 | #[inline ] |
130 | fn clone(&self) -> Self { |
131 | *self |
132 | } |
133 | } |
134 | |
135 | impl<T, const N: usize> Mask<T, N> |
136 | where |
137 | T: MaskElement, |
138 | LaneCount<N>: SupportedLaneCount, |
139 | { |
140 | /// Construct a mask by setting all elements to the given value. |
141 | #[inline ] |
142 | pub fn splat(value: bool) -> Self { |
143 | Self(mask_impl::Mask::splat(value)) |
144 | } |
145 | |
146 | /// Converts an array of bools to a SIMD mask. |
147 | #[inline ] |
148 | pub fn from_array(array: [bool; N]) -> Self { |
149 | // SAFETY: Rust's bool has a layout of 1 byte (u8) with a value of |
150 | // true: 0b_0000_0001 |
151 | // false: 0b_0000_0000 |
152 | // Thus, an array of bools is also a valid array of bytes: [u8; N] |
153 | // This would be hypothetically valid as an "in-place" transmute, |
154 | // but these are "dependently-sized" types, so copy elision it is! |
155 | unsafe { |
156 | let bytes: [u8; N] = mem::transmute_copy(&array); |
157 | let bools: Simd<i8, N> = |
158 | core::intrinsics::simd::simd_ne(Simd::from_array(bytes), Simd::splat(0u8)); |
159 | Mask::from_int_unchecked(core::intrinsics::simd::simd_cast(bools)) |
160 | } |
161 | } |
162 | |
163 | /// Converts a SIMD mask to an array of bools. |
164 | #[inline ] |
165 | pub fn to_array(self) -> [bool; N] { |
166 | // This follows mostly the same logic as from_array. |
167 | // SAFETY: Rust's bool has a layout of 1 byte (u8) with a value of |
168 | // true: 0b_0000_0001 |
169 | // false: 0b_0000_0000 |
170 | // Thus, an array of bools is also a valid array of bytes: [u8; N] |
171 | // Since our masks are equal to integers where all bits are set, |
172 | // we can simply convert them to i8s, and then bitand them by the |
173 | // bitpattern for Rust's "true" bool. |
174 | // This would be hypothetically valid as an "in-place" transmute, |
175 | // but these are "dependently-sized" types, so copy elision it is! |
176 | unsafe { |
177 | let mut bytes: Simd<i8, N> = core::intrinsics::simd::simd_cast(self.to_int()); |
178 | bytes &= Simd::splat(1i8); |
179 | mem::transmute_copy(&bytes) |
180 | } |
181 | } |
182 | |
183 | /// Converts a vector of integers to a mask, where 0 represents `false` and -1 |
184 | /// represents `true`. |
185 | /// |
186 | /// # Safety |
187 | /// All elements must be either 0 or -1. |
188 | #[inline ] |
189 | #[must_use = "method returns a new mask and does not mutate the original value" ] |
190 | pub unsafe fn from_int_unchecked(value: Simd<T, N>) -> Self { |
191 | // Safety: the caller must confirm this invariant |
192 | unsafe { |
193 | core::intrinsics::assume(<T as Sealed>::valid(value)); |
194 | Self(mask_impl::Mask::from_int_unchecked(value)) |
195 | } |
196 | } |
197 | |
198 | /// Converts a vector of integers to a mask, where 0 represents `false` and -1 |
199 | /// represents `true`. |
200 | /// |
201 | /// # Panics |
202 | /// Panics if any element is not 0 or -1. |
203 | #[inline ] |
204 | #[must_use = "method returns a new mask and does not mutate the original value" ] |
205 | #[track_caller ] |
206 | pub fn from_int(value: Simd<T, N>) -> Self { |
207 | assert!(T::valid(value), "all values must be either 0 or -1" ,); |
208 | // Safety: the validity has been checked |
209 | unsafe { Self::from_int_unchecked(value) } |
210 | } |
211 | |
212 | /// Converts the mask to a vector of integers, where 0 represents `false` and -1 |
213 | /// represents `true`. |
214 | #[inline ] |
215 | #[must_use = "method returns a new vector and does not mutate the original value" ] |
216 | pub fn to_int(self) -> Simd<T, N> { |
217 | self.0.to_int() |
218 | } |
219 | |
220 | /// Converts the mask to a mask of any other element size. |
221 | #[inline ] |
222 | #[must_use = "method returns a new mask and does not mutate the original value" ] |
223 | pub fn cast<U: MaskElement>(self) -> Mask<U, N> { |
224 | Mask(self.0.convert()) |
225 | } |
226 | |
227 | /// Tests the value of the specified element. |
228 | /// |
229 | /// # Safety |
230 | /// `index` must be less than `self.len()`. |
231 | #[inline ] |
232 | #[must_use = "method returns a new bool and does not mutate the original value" ] |
233 | pub unsafe fn test_unchecked(&self, index: usize) -> bool { |
234 | // Safety: the caller must confirm this invariant |
235 | unsafe { self.0.test_unchecked(index) } |
236 | } |
237 | |
238 | /// Tests the value of the specified element. |
239 | /// |
240 | /// # Panics |
241 | /// Panics if `index` is greater than or equal to the number of elements in the vector. |
242 | #[inline ] |
243 | #[must_use = "method returns a new bool and does not mutate the original value" ] |
244 | #[track_caller ] |
245 | pub fn test(&self, index: usize) -> bool { |
246 | assert!(index < N, "element index out of range" ); |
247 | // Safety: the element index has been checked |
248 | unsafe { self.test_unchecked(index) } |
249 | } |
250 | |
251 | /// Sets the value of the specified element. |
252 | /// |
253 | /// # Safety |
254 | /// `index` must be less than `self.len()`. |
255 | #[inline ] |
256 | pub unsafe fn set_unchecked(&mut self, index: usize, value: bool) { |
257 | // Safety: the caller must confirm this invariant |
258 | unsafe { |
259 | self.0.set_unchecked(index, value); |
260 | } |
261 | } |
262 | |
263 | /// Sets the value of the specified element. |
264 | /// |
265 | /// # Panics |
266 | /// Panics if `index` is greater than or equal to the number of elements in the vector. |
267 | #[inline ] |
268 | #[track_caller ] |
269 | pub fn set(&mut self, index: usize, value: bool) { |
270 | assert!(index < N, "element index out of range" ); |
271 | // Safety: the element index has been checked |
272 | unsafe { |
273 | self.set_unchecked(index, value); |
274 | } |
275 | } |
276 | |
277 | /// Returns true if any element is set, or false otherwise. |
278 | #[inline ] |
279 | #[must_use = "method returns a new bool and does not mutate the original value" ] |
280 | pub fn any(self) -> bool { |
281 | self.0.any() |
282 | } |
283 | |
284 | /// Returns true if all elements are set, or false otherwise. |
285 | #[inline ] |
286 | #[must_use = "method returns a new bool and does not mutate the original value" ] |
287 | pub fn all(self) -> bool { |
288 | self.0.all() |
289 | } |
290 | |
291 | /// Create a bitmask from a mask. |
292 | /// |
293 | /// Each bit is set if the corresponding element in the mask is `true`. |
294 | /// If the mask contains more than 64 elements, the bitmask is truncated to the first 64. |
295 | #[inline ] |
296 | #[must_use = "method returns a new integer and does not mutate the original value" ] |
297 | pub fn to_bitmask(self) -> u64 { |
298 | self.0.to_bitmask_integer() |
299 | } |
300 | |
301 | /// Create a mask from a bitmask. |
302 | /// |
303 | /// For each bit, if it is set, the corresponding element in the mask is set to `true`. |
304 | /// If the mask contains more than 64 elements, the remainder are set to `false`. |
305 | #[inline ] |
306 | #[must_use = "method returns a new mask and does not mutate the original value" ] |
307 | pub fn from_bitmask(bitmask: u64) -> Self { |
308 | Self(mask_impl::Mask::from_bitmask_integer(bitmask)) |
309 | } |
310 | |
311 | /// Create a bitmask vector from a mask. |
312 | /// |
313 | /// Each bit is set if the corresponding element in the mask is `true`. |
314 | /// The remaining bits are unset. |
315 | /// |
316 | /// The bits are packed into the first N bits of the vector: |
317 | /// ``` |
318 | /// # #![feature (portable_simd)] |
319 | /// # #[cfg (feature = "as_crate" )] use core_simd::simd; |
320 | /// # #[cfg (not(feature = "as_crate" ))] use core::simd; |
321 | /// # use simd::mask32x8; |
322 | /// let mask = mask32x8::from_array([true, false, true, false, false, false, true, false]); |
323 | /// assert_eq!(mask.to_bitmask_vector()[0], 0b01000101); |
324 | /// ``` |
325 | #[inline ] |
326 | #[must_use = "method returns a new integer and does not mutate the original value" ] |
327 | pub fn to_bitmask_vector(self) -> Simd<u8, N> { |
328 | self.0.to_bitmask_vector() |
329 | } |
330 | |
331 | /// Create a mask from a bitmask vector. |
332 | /// |
333 | /// For each bit, if it is set, the corresponding element in the mask is set to `true`. |
334 | /// |
335 | /// The bits are packed into the first N bits of the vector: |
336 | /// ``` |
337 | /// # #![feature (portable_simd)] |
338 | /// # #[cfg (feature = "as_crate" )] use core_simd::simd; |
339 | /// # #[cfg (not(feature = "as_crate" ))] use core::simd; |
340 | /// # use simd::{mask32x8, u8x8}; |
341 | /// let bitmask = u8x8::from_array([0b01000101, 0, 0, 0, 0, 0, 0, 0]); |
342 | /// assert_eq!( |
343 | /// mask32x8::from_bitmask_vector(bitmask), |
344 | /// mask32x8::from_array([true, false, true, false, false, false, true, false]), |
345 | /// ); |
346 | /// ``` |
347 | #[inline ] |
348 | #[must_use = "method returns a new mask and does not mutate the original value" ] |
349 | pub fn from_bitmask_vector(bitmask: Simd<u8, N>) -> Self { |
350 | Self(mask_impl::Mask::from_bitmask_vector(bitmask)) |
351 | } |
352 | |
353 | /// Find the index of the first set element. |
354 | /// |
355 | /// ``` |
356 | /// # #![feature (portable_simd)] |
357 | /// # #[cfg (feature = "as_crate" )] use core_simd::simd; |
358 | /// # #[cfg (not(feature = "as_crate" ))] use core::simd; |
359 | /// # use simd::mask32x8; |
360 | /// assert_eq!(mask32x8::splat(false).first_set(), None); |
361 | /// assert_eq!(mask32x8::splat(true).first_set(), Some(0)); |
362 | /// |
363 | /// let mask = mask32x8::from_array([false, true, false, false, true, false, false, true]); |
364 | /// assert_eq!(mask.first_set(), Some(1)); |
365 | /// ``` |
366 | #[inline ] |
367 | #[must_use = "method returns the index and does not mutate the original value" ] |
368 | pub fn first_set(self) -> Option<usize> { |
369 | // If bitmasks are efficient, using them is better |
370 | if cfg!(target_feature = "sse" ) && N <= 64 { |
371 | let tz = self.to_bitmask().trailing_zeros(); |
372 | return if tz == 64 { None } else { Some(tz as usize) }; |
373 | } |
374 | |
375 | // To find the first set index: |
376 | // * create a vector 0..N |
377 | // * replace unset mask elements in that vector with -1 |
378 | // * perform _unsigned_ reduce-min |
379 | // * check if the result is -1 or an index |
380 | |
381 | let index = Simd::from_array( |
382 | const { |
383 | let mut index = [0; N]; |
384 | let mut i = 0; |
385 | while i < N { |
386 | index[i] = i; |
387 | i += 1; |
388 | } |
389 | index |
390 | }, |
391 | ); |
392 | |
393 | // Safety: the input and output are integer vectors |
394 | let index: Simd<T, N> = unsafe { core::intrinsics::simd::simd_cast(index) }; |
395 | |
396 | let masked_index = self.select(index, Self::splat(true).to_int()); |
397 | |
398 | // Safety: the input and output are integer vectors |
399 | let masked_index: Simd<T::Unsigned, N> = |
400 | unsafe { core::intrinsics::simd::simd_cast(masked_index) }; |
401 | |
402 | // Safety: the input is an integer vector |
403 | let min_index: T::Unsigned = |
404 | unsafe { core::intrinsics::simd::simd_reduce_min(masked_index) }; |
405 | |
406 | // Safety: the return value is the unsigned version of T |
407 | let min_index: T = unsafe { core::mem::transmute_copy(&min_index) }; |
408 | |
409 | if min_index.eq(T::TRUE) { |
410 | None |
411 | } else { |
412 | Some(min_index.to_usize()) |
413 | } |
414 | } |
415 | } |
416 | |
417 | // vector/array conversion |
418 | impl<T, const N: usize> From<[bool; N]> for Mask<T, N> |
419 | where |
420 | T: MaskElement, |
421 | LaneCount<N>: SupportedLaneCount, |
422 | { |
423 | #[inline ] |
424 | fn from(array: [bool; N]) -> Self { |
425 | Self::from_array(array) |
426 | } |
427 | } |
428 | |
429 | impl<T, const N: usize> From<Mask<T, N>> for [bool; N] |
430 | where |
431 | T: MaskElement, |
432 | LaneCount<N>: SupportedLaneCount, |
433 | { |
434 | #[inline ] |
435 | fn from(vector: Mask<T, N>) -> Self { |
436 | vector.to_array() |
437 | } |
438 | } |
439 | |
440 | impl<T, const N: usize> Default for Mask<T, N> |
441 | where |
442 | T: MaskElement, |
443 | LaneCount<N>: SupportedLaneCount, |
444 | { |
445 | #[inline ] |
446 | #[must_use = "method returns a defaulted mask with all elements set to false (0)" ] |
447 | fn default() -> Self { |
448 | Self::splat(false) |
449 | } |
450 | } |
451 | |
452 | impl<T, const N: usize> PartialEq for Mask<T, N> |
453 | where |
454 | T: MaskElement + PartialEq, |
455 | LaneCount<N>: SupportedLaneCount, |
456 | { |
457 | #[inline ] |
458 | #[must_use = "method returns a new bool and does not mutate the original value" ] |
459 | fn eq(&self, other: &Self) -> bool { |
460 | self.0 == other.0 |
461 | } |
462 | } |
463 | |
464 | impl<T, const N: usize> PartialOrd for Mask<T, N> |
465 | where |
466 | T: MaskElement + PartialOrd, |
467 | LaneCount<N>: SupportedLaneCount, |
468 | { |
469 | #[inline ] |
470 | #[must_use = "method returns a new Ordering and does not mutate the original value" ] |
471 | fn partial_cmp(&self, other: &Self) -> Option<Ordering> { |
472 | self.0.partial_cmp(&other.0) |
473 | } |
474 | } |
475 | |
476 | impl<T, const N: usize> fmt::Debug for Mask<T, N> |
477 | where |
478 | T: MaskElement + fmt::Debug, |
479 | LaneCount<N>: SupportedLaneCount, |
480 | { |
481 | #[inline ] |
482 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
483 | f&mut DebugList<'_, '_>.debug_list() |
484 | .entries((0..N).map(|i: usize| self.test(index:i))) |
485 | .finish() |
486 | } |
487 | } |
488 | |
489 | impl<T, const N: usize> core::ops::BitAnd for Mask<T, N> |
490 | where |
491 | T: MaskElement, |
492 | LaneCount<N>: SupportedLaneCount, |
493 | { |
494 | type Output = Self; |
495 | #[inline ] |
496 | #[must_use = "method returns a new mask and does not mutate the original value" ] |
497 | fn bitand(self, rhs: Self) -> Self { |
498 | Self(self.0 & rhs.0) |
499 | } |
500 | } |
501 | |
502 | impl<T, const N: usize> core::ops::BitAnd<bool> for Mask<T, N> |
503 | where |
504 | T: MaskElement, |
505 | LaneCount<N>: SupportedLaneCount, |
506 | { |
507 | type Output = Self; |
508 | #[inline ] |
509 | #[must_use = "method returns a new mask and does not mutate the original value" ] |
510 | fn bitand(self, rhs: bool) -> Self { |
511 | self & Self::splat(rhs) |
512 | } |
513 | } |
514 | |
515 | impl<T, const N: usize> core::ops::BitAnd<Mask<T, N>> for bool |
516 | where |
517 | T: MaskElement, |
518 | LaneCount<N>: SupportedLaneCount, |
519 | { |
520 | type Output = Mask<T, N>; |
521 | #[inline ] |
522 | #[must_use = "method returns a new mask and does not mutate the original value" ] |
523 | fn bitand(self, rhs: Mask<T, N>) -> Mask<T, N> { |
524 | Mask::splat(self) & rhs |
525 | } |
526 | } |
527 | |
528 | impl<T, const N: usize> core::ops::BitOr for Mask<T, N> |
529 | where |
530 | T: MaskElement, |
531 | LaneCount<N>: SupportedLaneCount, |
532 | { |
533 | type Output = Self; |
534 | #[inline ] |
535 | #[must_use = "method returns a new mask and does not mutate the original value" ] |
536 | fn bitor(self, rhs: Self) -> Self { |
537 | Self(self.0 | rhs.0) |
538 | } |
539 | } |
540 | |
541 | impl<T, const N: usize> core::ops::BitOr<bool> for Mask<T, N> |
542 | where |
543 | T: MaskElement, |
544 | LaneCount<N>: SupportedLaneCount, |
545 | { |
546 | type Output = Self; |
547 | #[inline ] |
548 | #[must_use = "method returns a new mask and does not mutate the original value" ] |
549 | fn bitor(self, rhs: bool) -> Self { |
550 | self | Self::splat(rhs) |
551 | } |
552 | } |
553 | |
554 | impl<T, const N: usize> core::ops::BitOr<Mask<T, N>> for bool |
555 | where |
556 | T: MaskElement, |
557 | LaneCount<N>: SupportedLaneCount, |
558 | { |
559 | type Output = Mask<T, N>; |
560 | #[inline ] |
561 | #[must_use = "method returns a new mask and does not mutate the original value" ] |
562 | fn bitor(self, rhs: Mask<T, N>) -> Mask<T, N> { |
563 | Mask::splat(self) | rhs |
564 | } |
565 | } |
566 | |
567 | impl<T, const N: usize> core::ops::BitXor for Mask<T, N> |
568 | where |
569 | T: MaskElement, |
570 | LaneCount<N>: SupportedLaneCount, |
571 | { |
572 | type Output = Self; |
573 | #[inline ] |
574 | #[must_use = "method returns a new mask and does not mutate the original value" ] |
575 | fn bitxor(self, rhs: Self) -> Self::Output { |
576 | Self(self.0 ^ rhs.0) |
577 | } |
578 | } |
579 | |
580 | impl<T, const N: usize> core::ops::BitXor<bool> for Mask<T, N> |
581 | where |
582 | T: MaskElement, |
583 | LaneCount<N>: SupportedLaneCount, |
584 | { |
585 | type Output = Self; |
586 | #[inline ] |
587 | #[must_use = "method returns a new mask and does not mutate the original value" ] |
588 | fn bitxor(self, rhs: bool) -> Self::Output { |
589 | self ^ Self::splat(rhs) |
590 | } |
591 | } |
592 | |
593 | impl<T, const N: usize> core::ops::BitXor<Mask<T, N>> for bool |
594 | where |
595 | T: MaskElement, |
596 | LaneCount<N>: SupportedLaneCount, |
597 | { |
598 | type Output = Mask<T, N>; |
599 | #[inline ] |
600 | #[must_use = "method returns a new mask and does not mutate the original value" ] |
601 | fn bitxor(self, rhs: Mask<T, N>) -> Self::Output { |
602 | Mask::splat(self) ^ rhs |
603 | } |
604 | } |
605 | |
606 | impl<T, const N: usize> core::ops::Not for Mask<T, N> |
607 | where |
608 | T: MaskElement, |
609 | LaneCount<N>: SupportedLaneCount, |
610 | { |
611 | type Output = Mask<T, N>; |
612 | #[inline ] |
613 | #[must_use = "method returns a new mask and does not mutate the original value" ] |
614 | fn not(self) -> Self::Output { |
615 | Self(!self.0) |
616 | } |
617 | } |
618 | |
619 | impl<T, const N: usize> core::ops::BitAndAssign for Mask<T, N> |
620 | where |
621 | T: MaskElement, |
622 | LaneCount<N>: SupportedLaneCount, |
623 | { |
624 | #[inline ] |
625 | fn bitand_assign(&mut self, rhs: Self) { |
626 | self.0 = self.0 & rhs.0; |
627 | } |
628 | } |
629 | |
630 | impl<T, const N: usize> core::ops::BitAndAssign<bool> for Mask<T, N> |
631 | where |
632 | T: MaskElement, |
633 | LaneCount<N>: SupportedLaneCount, |
634 | { |
635 | #[inline ] |
636 | fn bitand_assign(&mut self, rhs: bool) { |
637 | *self &= Self::splat(rhs); |
638 | } |
639 | } |
640 | |
641 | impl<T, const N: usize> core::ops::BitOrAssign for Mask<T, N> |
642 | where |
643 | T: MaskElement, |
644 | LaneCount<N>: SupportedLaneCount, |
645 | { |
646 | #[inline ] |
647 | fn bitor_assign(&mut self, rhs: Self) { |
648 | self.0 = self.0 | rhs.0; |
649 | } |
650 | } |
651 | |
652 | impl<T, const N: usize> core::ops::BitOrAssign<bool> for Mask<T, N> |
653 | where |
654 | T: MaskElement, |
655 | LaneCount<N>: SupportedLaneCount, |
656 | { |
657 | #[inline ] |
658 | fn bitor_assign(&mut self, rhs: bool) { |
659 | *self |= Self::splat(rhs); |
660 | } |
661 | } |
662 | |
663 | impl<T, const N: usize> core::ops::BitXorAssign for Mask<T, N> |
664 | where |
665 | T: MaskElement, |
666 | LaneCount<N>: SupportedLaneCount, |
667 | { |
668 | #[inline ] |
669 | fn bitxor_assign(&mut self, rhs: Self) { |
670 | self.0 = self.0 ^ rhs.0; |
671 | } |
672 | } |
673 | |
674 | impl<T, const N: usize> core::ops::BitXorAssign<bool> for Mask<T, N> |
675 | where |
676 | T: MaskElement, |
677 | LaneCount<N>: SupportedLaneCount, |
678 | { |
679 | #[inline ] |
680 | fn bitxor_assign(&mut self, rhs: bool) { |
681 | *self ^= Self::splat(rhs); |
682 | } |
683 | } |
684 | |
685 | macro_rules! impl_from { |
686 | { $from:ty => $($to:ty),* } => { |
687 | $( |
688 | impl<const N: usize> From<Mask<$from, N>> for Mask<$to, N> |
689 | where |
690 | LaneCount<N>: SupportedLaneCount, |
691 | { |
692 | #[inline] |
693 | fn from(value: Mask<$from, N>) -> Self { |
694 | value.cast() |
695 | } |
696 | } |
697 | )* |
698 | } |
699 | } |
700 | impl_from! { i8 => i16, i32, i64, isize } |
701 | impl_from! { i16 => i32, i64, isize, i8 } |
702 | impl_from! { i32 => i64, isize, i8, i16 } |
703 | impl_from! { i64 => isize, i8, i16, i32 } |
704 | impl_from! { isize => i8, i16, i32, i64 } |
705 | |