1 | use crate::core_arch::{simd::*, x86::*}; |
2 | use crate::intrinsics::simd::*; |
3 | |
4 | #[cfg (test)] |
5 | use stdarch_test::assert_instr; |
6 | |
7 | /// Broadcast the low 16-bits from input mask k to all 32-bit elements of dst. |
8 | /// |
9 | /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_broadcastmw_epi32&expand=553) |
10 | #[inline ] |
11 | #[target_feature (enable = "avx512cd" )] |
12 | #[unstable (feature = "stdarch_x86_avx512" , issue = "111137" )] |
13 | #[cfg_attr (test, assert_instr(vpbroadcast))] // should be vpbroadcastmw2d |
14 | pub fn _mm512_broadcastmw_epi32(k: __mmask16) -> __m512i { |
15 | _mm512_set1_epi32(k as i32) |
16 | } |
17 | |
18 | /// Broadcast the low 16-bits from input mask k to all 32-bit elements of dst. |
19 | /// |
20 | /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_broadcastmw_epi32&expand=552) |
21 | #[inline ] |
22 | #[target_feature (enable = "avx512cd,avx512vl" )] |
23 | #[unstable (feature = "stdarch_x86_avx512" , issue = "111137" )] |
24 | #[cfg_attr (test, assert_instr(vpbroadcast))] // should be vpbroadcastmw2d |
25 | pub fn _mm256_broadcastmw_epi32(k: __mmask16) -> __m256i { |
26 | _mm256_set1_epi32(k as i32) |
27 | } |
28 | |
29 | /// Broadcast the low 16-bits from input mask k to all 32-bit elements of dst. |
30 | /// |
31 | /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_broadcastmw_epi32&expand=551) |
32 | #[inline ] |
33 | #[target_feature (enable = "avx512cd,avx512vl" )] |
34 | #[unstable (feature = "stdarch_x86_avx512" , issue = "111137" )] |
35 | #[cfg_attr (test, assert_instr(vpbroadcast))] // should be vpbroadcastmw2d |
36 | pub fn _mm_broadcastmw_epi32(k: __mmask16) -> __m128i { |
37 | _mm_set1_epi32(k as i32) |
38 | } |
39 | |
40 | /// Broadcast the low 8-bits from input mask k to all 64-bit elements of dst. |
41 | /// |
42 | /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_broadcastmb_epi64&expand=550) |
43 | #[inline ] |
44 | #[target_feature (enable = "avx512cd" )] |
45 | #[unstable (feature = "stdarch_x86_avx512" , issue = "111137" )] |
46 | #[cfg_attr (test, assert_instr(vpbroadcast))] // should be vpbroadcastmb2q |
47 | pub fn _mm512_broadcastmb_epi64(k: __mmask8) -> __m512i { |
48 | _mm512_set1_epi64(k as i64) |
49 | } |
50 | |
51 | /// Broadcast the low 8-bits from input mask k to all 64-bit elements of dst. |
52 | /// |
53 | /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_broadcastmb_epi64&expand=549) |
54 | #[inline ] |
55 | #[target_feature (enable = "avx512cd,avx512vl" )] |
56 | #[unstable (feature = "stdarch_x86_avx512" , issue = "111137" )] |
57 | #[cfg_attr (test, assert_instr(vpbroadcast))] // should be vpbroadcastmb2q |
58 | pub fn _mm256_broadcastmb_epi64(k: __mmask8) -> __m256i { |
59 | _mm256_set1_epi64x(k as i64) |
60 | } |
61 | |
62 | /// Broadcast the low 8-bits from input mask k to all 64-bit elements of dst. |
63 | /// |
64 | /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_broadcastmb_epi64&expand=548) |
65 | #[inline ] |
66 | #[target_feature (enable = "avx512cd,avx512vl" )] |
67 | #[unstable (feature = "stdarch_x86_avx512" , issue = "111137" )] |
68 | #[cfg_attr (test, assert_instr(vpbroadcast))] // should be vpbroadcastmb2q |
69 | pub fn _mm_broadcastmb_epi64(k: __mmask8) -> __m128i { |
70 | _mm_set1_epi64x(k as i64) |
71 | } |
72 | |
73 | /// Test each 32-bit element of a for equality with all other elements in a closer to the least significant bit. Each element's comparison forms a zero extended bit vector in dst. |
74 | /// |
75 | /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_conflict_epi32&expand=1248) |
76 | #[inline ] |
77 | #[target_feature (enable = "avx512cd" )] |
78 | #[unstable (feature = "stdarch_x86_avx512" , issue = "111137" )] |
79 | #[cfg_attr (test, assert_instr(vpconflictd))] |
80 | pub fn _mm512_conflict_epi32(a: __m512i) -> __m512i { |
81 | unsafe { transmute(src:vpconflictd(a.as_i32x16())) } |
82 | } |
83 | |
84 | /// Test each 32-bit element of a for equality with all other elements in a closer to the least significant bit using writemask k (elements are copied from src when the corresponding mask bit is not set). Each element's comparison forms a zero extended bit vector in dst. |
85 | /// |
86 | /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_conflict_epi32&expand=1249) |
87 | #[inline ] |
88 | #[target_feature (enable = "avx512cd" )] |
89 | #[unstable (feature = "stdarch_x86_avx512" , issue = "111137" )] |
90 | #[cfg_attr (test, assert_instr(vpconflictd))] |
91 | pub fn _mm512_mask_conflict_epi32(src: __m512i, k: __mmask16, a: __m512i) -> __m512i { |
92 | unsafe { |
93 | let conflict: i32x16 = _mm512_conflict_epi32(a).as_i32x16(); |
94 | transmute(src:simd_select_bitmask(m:k, yes:conflict, no:src.as_i32x16())) |
95 | } |
96 | } |
97 | |
98 | /// Test each 32-bit element of a for equality with all other elements in a closer to the least significant bit using zeromask k (elements are zeroed out when the corresponding mask bit is not set). Each element's comparison forms a zero extended bit vector in dst. |
99 | /// |
100 | /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_conflict_epi32&expand=1250) |
101 | #[inline ] |
102 | #[target_feature (enable = "avx512cd" )] |
103 | #[unstable (feature = "stdarch_x86_avx512" , issue = "111137" )] |
104 | #[cfg_attr (test, assert_instr(vpconflictd))] |
105 | pub fn _mm512_maskz_conflict_epi32(k: __mmask16, a: __m512i) -> __m512i { |
106 | unsafe { |
107 | let conflict: i32x16 = _mm512_conflict_epi32(a).as_i32x16(); |
108 | transmute(src:simd_select_bitmask(m:k, yes:conflict, no:i32x16::ZERO)) |
109 | } |
110 | } |
111 | |
112 | /// Test each 32-bit element of a for equality with all other elements in a closer to the least significant bit. Each element's comparison forms a zero extended bit vector in dst. |
113 | /// |
114 | /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_conflict_epi32&expand=1245) |
115 | #[inline ] |
116 | #[target_feature (enable = "avx512cd,avx512vl" )] |
117 | #[unstable (feature = "stdarch_x86_avx512" , issue = "111137" )] |
118 | #[cfg_attr (test, assert_instr(vpconflictd))] |
119 | pub fn _mm256_conflict_epi32(a: __m256i) -> __m256i { |
120 | unsafe { transmute(src:vpconflictd256(a.as_i32x8())) } |
121 | } |
122 | |
123 | /// Test each 32-bit element of a for equality with all other elements in a closer to the least significant bit using writemask k (elements are copied from src when the corresponding mask bit is not set). Each element's comparison forms a zero extended bit vector in dst. |
124 | /// |
125 | /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_conflict_epi32&expand=1246) |
126 | #[inline ] |
127 | #[target_feature (enable = "avx512cd,avx512vl" )] |
128 | #[unstable (feature = "stdarch_x86_avx512" , issue = "111137" )] |
129 | #[cfg_attr (test, assert_instr(vpconflictd))] |
130 | pub fn _mm256_mask_conflict_epi32(src: __m256i, k: __mmask8, a: __m256i) -> __m256i { |
131 | unsafe { |
132 | let conflict: i32x8 = _mm256_conflict_epi32(a).as_i32x8(); |
133 | transmute(src:simd_select_bitmask(m:k, yes:conflict, no:src.as_i32x8())) |
134 | } |
135 | } |
136 | |
137 | /// Test each 32-bit element of a for equality with all other elements in a closer to the least significant bit using zeromask k (elements are zeroed out when the corresponding mask bit is not set). Each element's comparison forms a zero extended bit vector in dst. |
138 | /// |
139 | /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_conflict_epi32&expand=1247) |
140 | #[inline ] |
141 | #[target_feature (enable = "avx512cd,avx512vl" )] |
142 | #[unstable (feature = "stdarch_x86_avx512" , issue = "111137" )] |
143 | #[cfg_attr (test, assert_instr(vpconflictd))] |
144 | pub fn _mm256_maskz_conflict_epi32(k: __mmask8, a: __m256i) -> __m256i { |
145 | unsafe { |
146 | let conflict: i32x8 = _mm256_conflict_epi32(a).as_i32x8(); |
147 | transmute(src:simd_select_bitmask(m:k, yes:conflict, no:i32x8::ZERO)) |
148 | } |
149 | } |
150 | |
151 | /// Test each 32-bit element of a for equality with all other elements in a closer to the least significant bit. Each element's comparison forms a zero extended bit vector in dst. |
152 | /// |
153 | /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_conflict_epi32&expand=1242) |
154 | #[inline ] |
155 | #[target_feature (enable = "avx512cd,avx512vl" )] |
156 | #[unstable (feature = "stdarch_x86_avx512" , issue = "111137" )] |
157 | #[cfg_attr (test, assert_instr(vpconflictd))] |
158 | pub fn _mm_conflict_epi32(a: __m128i) -> __m128i { |
159 | unsafe { transmute(src:vpconflictd128(a.as_i32x4())) } |
160 | } |
161 | |
162 | /// Test each 32-bit element of a for equality with all other elements in a closer to the least significant bit using writemask k (elements are copied from src when the corresponding mask bit is not set). Each element's comparison forms a zero extended bit vector in dst. |
163 | /// |
164 | /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_conflict_epi32&expand=1243) |
165 | #[inline ] |
166 | #[target_feature (enable = "avx512cd,avx512vl" )] |
167 | #[unstable (feature = "stdarch_x86_avx512" , issue = "111137" )] |
168 | #[cfg_attr (test, assert_instr(vpconflictd))] |
169 | pub fn _mm_mask_conflict_epi32(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { |
170 | unsafe { |
171 | let conflict: i32x4 = _mm_conflict_epi32(a).as_i32x4(); |
172 | transmute(src:simd_select_bitmask(m:k, yes:conflict, no:src.as_i32x4())) |
173 | } |
174 | } |
175 | |
176 | /// Test each 32-bit element of a for equality with all other elements in a closer to the least significant bit using zeromask k (elements are zeroed out when the corresponding mask bit is not set). Each element's comparison forms a zero extended bit vector in dst. |
177 | /// |
178 | /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_conflict_epi32&expand=1244) |
179 | #[inline ] |
180 | #[target_feature (enable = "avx512cd,avx512vl" )] |
181 | #[unstable (feature = "stdarch_x86_avx512" , issue = "111137" )] |
182 | #[cfg_attr (test, assert_instr(vpconflictd))] |
183 | pub fn _mm_maskz_conflict_epi32(k: __mmask8, a: __m128i) -> __m128i { |
184 | unsafe { |
185 | let conflict: i32x4 = _mm_conflict_epi32(a).as_i32x4(); |
186 | transmute(src:simd_select_bitmask(m:k, yes:conflict, no:i32x4::ZERO)) |
187 | } |
188 | } |
189 | |
190 | /// Test each 64-bit element of a for equality with all other elements in a closer to the least significant bit. Each element's comparison forms a zero extended bit vector in dst. |
191 | /// |
192 | /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_conflict_epi64&expand=1257) |
193 | #[inline ] |
194 | #[target_feature (enable = "avx512cd" )] |
195 | #[unstable (feature = "stdarch_x86_avx512" , issue = "111137" )] |
196 | #[cfg_attr (test, assert_instr(vpconflictq))] |
197 | pub fn _mm512_conflict_epi64(a: __m512i) -> __m512i { |
198 | unsafe { transmute(src:vpconflictq(a.as_i64x8())) } |
199 | } |
200 | |
201 | /// Test each 64-bit element of a for equality with all other elements in a closer to the least significant bit using writemask k (elements are copied from src when the corresponding mask bit is not set). Each element's comparison forms a zero extended bit vector in dst. |
202 | /// |
203 | /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_conflict_epi64&expand=1258) |
204 | #[inline ] |
205 | #[target_feature (enable = "avx512cd" )] |
206 | #[unstable (feature = "stdarch_x86_avx512" , issue = "111137" )] |
207 | #[cfg_attr (test, assert_instr(vpconflictq))] |
208 | pub fn _mm512_mask_conflict_epi64(src: __m512i, k: __mmask8, a: __m512i) -> __m512i { |
209 | unsafe { |
210 | let conflict: i64x8 = _mm512_conflict_epi64(a).as_i64x8(); |
211 | transmute(src:simd_select_bitmask(m:k, yes:conflict, no:src.as_i64x8())) |
212 | } |
213 | } |
214 | |
215 | /// Test each 64-bit element of a for equality with all other elements in a closer to the least significant bit using zeromask k (elements are zeroed out when the corresponding mask bit is not set). Each element's comparison forms a zero extended bit vector in dst. |
216 | /// |
217 | /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_conflict_epi64&expand=1259) |
218 | #[inline ] |
219 | #[target_feature (enable = "avx512cd" )] |
220 | #[unstable (feature = "stdarch_x86_avx512" , issue = "111137" )] |
221 | #[cfg_attr (test, assert_instr(vpconflictq))] |
222 | pub fn _mm512_maskz_conflict_epi64(k: __mmask8, a: __m512i) -> __m512i { |
223 | unsafe { |
224 | let conflict: i64x8 = _mm512_conflict_epi64(a).as_i64x8(); |
225 | transmute(src:simd_select_bitmask(m:k, yes:conflict, no:i64x8::ZERO)) |
226 | } |
227 | } |
228 | |
229 | /// Test each 64-bit element of a for equality with all other elements in a closer to the least significant bit. Each element's comparison forms a zero extended bit vector in dst. |
230 | /// |
231 | /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_conflict_epi64&expand=1254) |
232 | #[inline ] |
233 | #[target_feature (enable = "avx512cd,avx512vl" )] |
234 | #[unstable (feature = "stdarch_x86_avx512" , issue = "111137" )] |
235 | #[cfg_attr (test, assert_instr(vpconflictq))] |
236 | pub fn _mm256_conflict_epi64(a: __m256i) -> __m256i { |
237 | unsafe { transmute(src:vpconflictq256(a.as_i64x4())) } |
238 | } |
239 | |
240 | /// Test each 64-bit element of a for equality with all other elements in a closer to the least significant bit using writemask k (elements are copied from src when the corresponding mask bit is not set). Each element's comparison forms a zero extended bit vector in dst. |
241 | /// |
242 | /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_conflict_epi64&expand=1255) |
243 | #[inline ] |
244 | #[target_feature (enable = "avx512cd,avx512vl" )] |
245 | #[unstable (feature = "stdarch_x86_avx512" , issue = "111137" )] |
246 | #[cfg_attr (test, assert_instr(vpconflictq))] |
247 | pub fn _mm256_mask_conflict_epi64(src: __m256i, k: __mmask8, a: __m256i) -> __m256i { |
248 | unsafe { |
249 | let conflict: i64x4 = _mm256_conflict_epi64(a).as_i64x4(); |
250 | transmute(src:simd_select_bitmask(m:k, yes:conflict, no:src.as_i64x4())) |
251 | } |
252 | } |
253 | |
254 | /// Test each 64-bit element of a for equality with all other elements in a closer to the least significant bit using zeromask k (elements are zeroed out when the corresponding mask bit is not set). Each element's comparison forms a zero extended bit vector in dst. |
255 | /// |
256 | /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_conflict_epi64&expand=1256) |
257 | #[inline ] |
258 | #[target_feature (enable = "avx512cd,avx512vl" )] |
259 | #[unstable (feature = "stdarch_x86_avx512" , issue = "111137" )] |
260 | #[cfg_attr (test, assert_instr(vpconflictq))] |
261 | pub fn _mm256_maskz_conflict_epi64(k: __mmask8, a: __m256i) -> __m256i { |
262 | unsafe { |
263 | let conflict: i64x4 = _mm256_conflict_epi64(a).as_i64x4(); |
264 | transmute(src:simd_select_bitmask(m:k, yes:conflict, no:i64x4::ZERO)) |
265 | } |
266 | } |
267 | |
268 | /// Test each 64-bit element of a for equality with all other elements in a closer to the least significant bit. Each element's comparison forms a zero extended bit vector in dst. |
269 | /// |
270 | /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_conflict_epi64&expand=1251) |
271 | #[inline ] |
272 | #[target_feature (enable = "avx512cd,avx512vl" )] |
273 | #[unstable (feature = "stdarch_x86_avx512" , issue = "111137" )] |
274 | #[cfg_attr (test, assert_instr(vpconflictq))] |
275 | pub fn _mm_conflict_epi64(a: __m128i) -> __m128i { |
276 | unsafe { transmute(src:vpconflictq128(a.as_i64x2())) } |
277 | } |
278 | |
279 | /// Test each 64-bit element of a for equality with all other elements in a closer to the least significant bit using writemask k (elements are copied from src when the corresponding mask bit is not set). Each element's comparison forms a zero extended bit vector in dst. |
280 | /// |
281 | /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_conflict_epi64&expand=1252) |
282 | #[inline ] |
283 | #[target_feature (enable = "avx512cd,avx512vl" )] |
284 | #[unstable (feature = "stdarch_x86_avx512" , issue = "111137" )] |
285 | #[cfg_attr (test, assert_instr(vpconflictq))] |
286 | pub fn _mm_mask_conflict_epi64(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { |
287 | unsafe { |
288 | let conflict: i64x2 = _mm_conflict_epi64(a).as_i64x2(); |
289 | transmute(src:simd_select_bitmask(m:k, yes:conflict, no:src.as_i64x2())) |
290 | } |
291 | } |
292 | |
293 | /// Test each 64-bit element of a for equality with all other elements in a closer to the least significant bit using zeromask k (elements are zeroed out when the corresponding mask bit is not set). Each element's comparison forms a zero extended bit vector in dst. |
294 | /// |
295 | /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_conflict_epi64&expand=1253) |
296 | #[inline ] |
297 | #[target_feature (enable = "avx512cd,avx512vl" )] |
298 | #[unstable (feature = "stdarch_x86_avx512" , issue = "111137" )] |
299 | #[cfg_attr (test, assert_instr(vpconflictq))] |
300 | pub fn _mm_maskz_conflict_epi64(k: __mmask8, a: __m128i) -> __m128i { |
301 | unsafe { |
302 | let conflict: i64x2 = _mm_conflict_epi64(a).as_i64x2(); |
303 | transmute(src:simd_select_bitmask(m:k, yes:conflict, no:i64x2::ZERO)) |
304 | } |
305 | } |
306 | |
307 | /// Counts the number of leading zero bits in each packed 32-bit integer in a, and store the results in dst. |
308 | /// |
309 | /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_lzcnt_epi32&expand=3491) |
310 | #[inline ] |
311 | #[target_feature (enable = "avx512cd" )] |
312 | #[unstable (feature = "stdarch_x86_avx512" , issue = "111137" )] |
313 | #[cfg_attr (test, assert_instr(vplzcntd))] |
314 | pub fn _mm512_lzcnt_epi32(a: __m512i) -> __m512i { |
315 | unsafe { transmute(src:simd_ctlz(a.as_i32x16())) } |
316 | } |
317 | |
318 | /// Counts the number of leading zero bits in each packed 32-bit integer in a, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). |
319 | /// |
320 | /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_lzcnt_epi32&expand=3492) |
321 | #[inline ] |
322 | #[target_feature (enable = "avx512cd" )] |
323 | #[unstable (feature = "stdarch_x86_avx512" , issue = "111137" )] |
324 | #[cfg_attr (test, assert_instr(vplzcntd))] |
325 | pub fn _mm512_mask_lzcnt_epi32(src: __m512i, k: __mmask16, a: __m512i) -> __m512i { |
326 | unsafe { |
327 | let zerocount: i32x16 = _mm512_lzcnt_epi32(a).as_i32x16(); |
328 | transmute(src:simd_select_bitmask(m:k, yes:zerocount, no:src.as_i32x16())) |
329 | } |
330 | } |
331 | |
332 | /// Counts the number of leading zero bits in each packed 32-bit integer in a, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). |
333 | /// |
334 | /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_lzcnt_epi32&expand=3493) |
335 | #[inline ] |
336 | #[target_feature (enable = "avx512cd" )] |
337 | #[unstable (feature = "stdarch_x86_avx512" , issue = "111137" )] |
338 | #[cfg_attr (test, assert_instr(vplzcntd))] |
339 | pub fn _mm512_maskz_lzcnt_epi32(k: __mmask16, a: __m512i) -> __m512i { |
340 | unsafe { |
341 | let zerocount: i32x16 = _mm512_lzcnt_epi32(a).as_i32x16(); |
342 | transmute(src:simd_select_bitmask(m:k, yes:zerocount, no:i32x16::ZERO)) |
343 | } |
344 | } |
345 | |
346 | /// Counts the number of leading zero bits in each packed 32-bit integer in a, and store the results in dst. |
347 | /// |
348 | /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_lzcnt_epi32&expand=3488) |
349 | #[inline ] |
350 | #[target_feature (enable = "avx512cd,avx512vl" )] |
351 | #[unstable (feature = "stdarch_x86_avx512" , issue = "111137" )] |
352 | #[cfg_attr (test, assert_instr(vplzcntd))] |
353 | pub fn _mm256_lzcnt_epi32(a: __m256i) -> __m256i { |
354 | unsafe { transmute(src:simd_ctlz(a.as_i32x8())) } |
355 | } |
356 | |
357 | /// Counts the number of leading zero bits in each packed 32-bit integer in a, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). |
358 | /// |
359 | /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_lzcnt_epi32&expand=3489) |
360 | #[inline ] |
361 | #[target_feature (enable = "avx512cd,avx512vl" )] |
362 | #[unstable (feature = "stdarch_x86_avx512" , issue = "111137" )] |
363 | #[cfg_attr (test, assert_instr(vplzcntd))] |
364 | pub fn _mm256_mask_lzcnt_epi32(src: __m256i, k: __mmask8, a: __m256i) -> __m256i { |
365 | unsafe { |
366 | let zerocount: i32x8 = _mm256_lzcnt_epi32(a).as_i32x8(); |
367 | transmute(src:simd_select_bitmask(m:k, yes:zerocount, no:src.as_i32x8())) |
368 | } |
369 | } |
370 | |
371 | /// Counts the number of leading zero bits in each packed 32-bit integer in a, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). |
372 | /// |
373 | /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_lzcnt_epi32&expand=3490) |
374 | #[inline ] |
375 | #[target_feature (enable = "avx512cd,avx512vl" )] |
376 | #[unstable (feature = "stdarch_x86_avx512" , issue = "111137" )] |
377 | #[cfg_attr (test, assert_instr(vplzcntd))] |
378 | pub fn _mm256_maskz_lzcnt_epi32(k: __mmask8, a: __m256i) -> __m256i { |
379 | unsafe { |
380 | let zerocount: i32x8 = _mm256_lzcnt_epi32(a).as_i32x8(); |
381 | transmute(src:simd_select_bitmask(m:k, yes:zerocount, no:i32x8::ZERO)) |
382 | } |
383 | } |
384 | |
385 | /// Counts the number of leading zero bits in each packed 32-bit integer in a, and store the results in dst. |
386 | /// |
387 | /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_lzcnt_epi32&expand=3485) |
388 | #[inline ] |
389 | #[target_feature (enable = "avx512cd,avx512vl" )] |
390 | #[unstable (feature = "stdarch_x86_avx512" , issue = "111137" )] |
391 | #[cfg_attr (test, assert_instr(vplzcntd))] |
392 | pub fn _mm_lzcnt_epi32(a: __m128i) -> __m128i { |
393 | unsafe { transmute(src:simd_ctlz(a.as_i32x4())) } |
394 | } |
395 | |
396 | /// Counts the number of leading zero bits in each packed 32-bit integer in a, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). |
397 | /// |
398 | /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_lzcnt_epi32&expand=3486) |
399 | #[inline ] |
400 | #[target_feature (enable = "avx512cd,avx512vl" )] |
401 | #[unstable (feature = "stdarch_x86_avx512" , issue = "111137" )] |
402 | #[cfg_attr (test, assert_instr(vplzcntd))] |
403 | pub fn _mm_mask_lzcnt_epi32(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { |
404 | unsafe { |
405 | let zerocount: i32x4 = _mm_lzcnt_epi32(a).as_i32x4(); |
406 | transmute(src:simd_select_bitmask(m:k, yes:zerocount, no:src.as_i32x4())) |
407 | } |
408 | } |
409 | |
410 | /// Counts the number of leading zero bits in each packed 32-bit integer in a, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). |
411 | /// |
412 | /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_lzcnt_epi32&expand=3487) |
413 | #[inline ] |
414 | #[target_feature (enable = "avx512cd,avx512vl" )] |
415 | #[unstable (feature = "stdarch_x86_avx512" , issue = "111137" )] |
416 | #[cfg_attr (test, assert_instr(vplzcntd))] |
417 | pub fn _mm_maskz_lzcnt_epi32(k: __mmask8, a: __m128i) -> __m128i { |
418 | unsafe { |
419 | let zerocount: i32x4 = _mm_lzcnt_epi32(a).as_i32x4(); |
420 | transmute(src:simd_select_bitmask(m:k, yes:zerocount, no:i32x4::ZERO)) |
421 | } |
422 | } |
423 | |
424 | /// Counts the number of leading zero bits in each packed 64-bit integer in a, and store the results in dst. |
425 | /// |
426 | /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_lzcnt_epi64&expand=3500) |
427 | #[inline ] |
428 | #[target_feature (enable = "avx512cd" )] |
429 | #[unstable (feature = "stdarch_x86_avx512" , issue = "111137" )] |
430 | #[cfg_attr (test, assert_instr(vplzcntq))] |
431 | pub fn _mm512_lzcnt_epi64(a: __m512i) -> __m512i { |
432 | unsafe { transmute(src:simd_ctlz(a.as_i64x8())) } |
433 | } |
434 | |
435 | /// Counts the number of leading zero bits in each packed 64-bit integer in a, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). |
436 | /// |
437 | /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_mask_lzcnt_epi64&expand=3501) |
438 | #[inline ] |
439 | #[target_feature (enable = "avx512cd" )] |
440 | #[unstable (feature = "stdarch_x86_avx512" , issue = "111137" )] |
441 | #[cfg_attr (test, assert_instr(vplzcntq))] |
442 | pub fn _mm512_mask_lzcnt_epi64(src: __m512i, k: __mmask8, a: __m512i) -> __m512i { |
443 | unsafe { |
444 | let zerocount: i64x8 = _mm512_lzcnt_epi64(a).as_i64x8(); |
445 | transmute(src:simd_select_bitmask(m:k, yes:zerocount, no:src.as_i64x8())) |
446 | } |
447 | } |
448 | |
449 | /// Counts the number of leading zero bits in each packed 64-bit integer in a, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). |
450 | /// |
451 | /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm512_maskz_lzcnt_epi64&expand=3502) |
452 | #[inline ] |
453 | #[target_feature (enable = "avx512cd" )] |
454 | #[unstable (feature = "stdarch_x86_avx512" , issue = "111137" )] |
455 | #[cfg_attr (test, assert_instr(vplzcntq))] |
456 | pub fn _mm512_maskz_lzcnt_epi64(k: __mmask8, a: __m512i) -> __m512i { |
457 | unsafe { |
458 | let zerocount: i64x8 = _mm512_lzcnt_epi64(a).as_i64x8(); |
459 | transmute(src:simd_select_bitmask(m:k, yes:zerocount, no:i64x8::ZERO)) |
460 | } |
461 | } |
462 | |
463 | /// Counts the number of leading zero bits in each packed 64-bit integer in a, and store the results in dst. |
464 | /// |
465 | /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_lzcnt_epi64&expand=3497) |
466 | #[inline ] |
467 | #[target_feature (enable = "avx512cd,avx512vl" )] |
468 | #[unstable (feature = "stdarch_x86_avx512" , issue = "111137" )] |
469 | #[cfg_attr (test, assert_instr(vplzcntq))] |
470 | pub fn _mm256_lzcnt_epi64(a: __m256i) -> __m256i { |
471 | unsafe { transmute(src:simd_ctlz(a.as_i64x4())) } |
472 | } |
473 | |
474 | /// Counts the number of leading zero bits in each packed 64-bit integer in a, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). |
475 | /// |
476 | /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_mask_lzcnt_epi64&expand=3498) |
477 | #[inline ] |
478 | #[target_feature (enable = "avx512cd,avx512vl" )] |
479 | #[unstable (feature = "stdarch_x86_avx512" , issue = "111137" )] |
480 | #[cfg_attr (test, assert_instr(vplzcntq))] |
481 | pub fn _mm256_mask_lzcnt_epi64(src: __m256i, k: __mmask8, a: __m256i) -> __m256i { |
482 | unsafe { |
483 | let zerocount: i64x4 = _mm256_lzcnt_epi64(a).as_i64x4(); |
484 | transmute(src:simd_select_bitmask(m:k, yes:zerocount, no:src.as_i64x4())) |
485 | } |
486 | } |
487 | |
488 | /// Counts the number of leading zero bits in each packed 64-bit integer in a, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). |
489 | /// |
490 | /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm256_maskz_lzcnt_epi64&expand=3499) |
491 | #[inline ] |
492 | #[target_feature (enable = "avx512cd,avx512vl" )] |
493 | #[unstable (feature = "stdarch_x86_avx512" , issue = "111137" )] |
494 | #[cfg_attr (test, assert_instr(vplzcntq))] |
495 | pub fn _mm256_maskz_lzcnt_epi64(k: __mmask8, a: __m256i) -> __m256i { |
496 | unsafe { |
497 | let zerocount: i64x4 = _mm256_lzcnt_epi64(a).as_i64x4(); |
498 | transmute(src:simd_select_bitmask(m:k, yes:zerocount, no:i64x4::ZERO)) |
499 | } |
500 | } |
501 | |
502 | /// Counts the number of leading zero bits in each packed 64-bit integer in a, and store the results in dst. |
503 | /// |
504 | /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_lzcnt_epi64&expand=3494) |
505 | #[inline ] |
506 | #[target_feature (enable = "avx512cd,avx512vl" )] |
507 | #[unstable (feature = "stdarch_x86_avx512" , issue = "111137" )] |
508 | #[cfg_attr (test, assert_instr(vplzcntq))] |
509 | pub fn _mm_lzcnt_epi64(a: __m128i) -> __m128i { |
510 | unsafe { transmute(src:simd_ctlz(a.as_i64x2())) } |
511 | } |
512 | |
513 | /// Counts the number of leading zero bits in each packed 64-bit integer in a, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). |
514 | /// |
515 | /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mask_lzcnt_epi64&expand=3495) |
516 | #[inline ] |
517 | #[target_feature (enable = "avx512cd,avx512vl" )] |
518 | #[unstable (feature = "stdarch_x86_avx512" , issue = "111137" )] |
519 | #[cfg_attr (test, assert_instr(vplzcntq))] |
520 | pub fn _mm_mask_lzcnt_epi64(src: __m128i, k: __mmask8, a: __m128i) -> __m128i { |
521 | unsafe { |
522 | let zerocount: i64x2 = _mm_lzcnt_epi64(a).as_i64x2(); |
523 | transmute(src:simd_select_bitmask(m:k, yes:zerocount, no:src.as_i64x2())) |
524 | } |
525 | } |
526 | |
527 | /// Counts the number of leading zero bits in each packed 64-bit integer in a, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). |
528 | /// |
529 | /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskz_lzcnt_epi64&expand=3496) |
530 | #[inline ] |
531 | #[target_feature (enable = "avx512cd,avx512vl" )] |
532 | #[unstable (feature = "stdarch_x86_avx512" , issue = "111137" )] |
533 | #[cfg_attr (test, assert_instr(vplzcntq))] |
534 | pub fn _mm_maskz_lzcnt_epi64(k: __mmask8, a: __m128i) -> __m128i { |
535 | unsafe { |
536 | let zerocount: i64x2 = _mm_lzcnt_epi64(a).as_i64x2(); |
537 | transmute(src:simd_select_bitmask(m:k, yes:zerocount, no:i64x2::ZERO)) |
538 | } |
539 | } |
540 | |
541 | #[allow (improper_ctypes)] |
542 | unsafe extern "C" { |
543 | #[link_name = "llvm.x86.avx512.conflict.d.512" ] |
544 | unsafefn vpconflictd(a: i32x16) -> i32x16; |
545 | #[link_name = "llvm.x86.avx512.conflict.d.256" ] |
546 | unsafefn vpconflictd256(a: i32x8) -> i32x8; |
547 | #[link_name = "llvm.x86.avx512.conflict.d.128" ] |
548 | unsafefn vpconflictd128(a: i32x4) -> i32x4; |
549 | |
550 | #[link_name = "llvm.x86.avx512.conflict.q.512" ] |
551 | unsafefn vpconflictq(a: i64x8) -> i64x8; |
552 | #[link_name = "llvm.x86.avx512.conflict.q.256" ] |
553 | unsafefn vpconflictq256(a: i64x4) -> i64x4; |
554 | #[link_name = "llvm.x86.avx512.conflict.q.128" ] |
555 | unsafefn vpconflictq128(a: i64x2) -> i64x2; |
556 | } |
557 | |
558 | #[cfg (test)] |
559 | mod tests { |
560 | |
561 | use crate::core_arch::x86::*; |
562 | use stdarch_test::simd_test; |
563 | |
564 | #[simd_test(enable = "avx512cd" )] |
565 | unsafe fn test_mm512_broadcastmw_epi32() { |
566 | let a: __mmask16 = 2; |
567 | let r = _mm512_broadcastmw_epi32(a); |
568 | let e = _mm512_set1_epi32(2); |
569 | assert_eq_m512i(r, e); |
570 | } |
571 | |
572 | #[simd_test(enable = "avx512cd,avx512vl" )] |
573 | unsafe fn test_mm256_broadcastmw_epi32() { |
574 | let a: __mmask16 = 2; |
575 | let r = _mm256_broadcastmw_epi32(a); |
576 | let e = _mm256_set1_epi32(2); |
577 | assert_eq_m256i(r, e); |
578 | } |
579 | |
580 | #[simd_test(enable = "avx512cd,avx512vl" )] |
581 | unsafe fn test_mm_broadcastmw_epi32() { |
582 | let a: __mmask16 = 2; |
583 | let r = _mm_broadcastmw_epi32(a); |
584 | let e = _mm_set1_epi32(2); |
585 | assert_eq_m128i(r, e); |
586 | } |
587 | |
588 | #[simd_test(enable = "avx512cd" )] |
589 | unsafe fn test_mm512_broadcastmb_epi64() { |
590 | let a: __mmask8 = 2; |
591 | let r = _mm512_broadcastmb_epi64(a); |
592 | let e = _mm512_set1_epi64(2); |
593 | assert_eq_m512i(r, e); |
594 | } |
595 | |
596 | #[simd_test(enable = "avx512cd,avx512vl" )] |
597 | unsafe fn test_mm256_broadcastmb_epi64() { |
598 | let a: __mmask8 = 2; |
599 | let r = _mm256_broadcastmb_epi64(a); |
600 | let e = _mm256_set1_epi64x(2); |
601 | assert_eq_m256i(r, e); |
602 | } |
603 | |
604 | #[simd_test(enable = "avx512cd,avx512vl" )] |
605 | unsafe fn test_mm_broadcastmb_epi64() { |
606 | let a: __mmask8 = 2; |
607 | let r = _mm_broadcastmb_epi64(a); |
608 | let e = _mm_set1_epi64x(2); |
609 | assert_eq_m128i(r, e); |
610 | } |
611 | |
612 | #[simd_test(enable = "avx512cd" )] |
613 | unsafe fn test_mm512_conflict_epi32() { |
614 | let a = _mm512_set1_epi32(1); |
615 | let r = _mm512_conflict_epi32(a); |
616 | let e = _mm512_set_epi32( |
617 | 1 << 14 |
618 | | 1 << 13 |
619 | | 1 << 12 |
620 | | 1 << 11 |
621 | | 1 << 10 |
622 | | 1 << 9 |
623 | | 1 << 8 |
624 | | 1 << 7 |
625 | | 1 << 6 |
626 | | 1 << 5 |
627 | | 1 << 4 |
628 | | 1 << 3 |
629 | | 1 << 2 |
630 | | 1 << 1 |
631 | | 1 << 0, |
632 | 1 << 13 |
633 | | 1 << 12 |
634 | | 1 << 11 |
635 | | 1 << 10 |
636 | | 1 << 9 |
637 | | 1 << 8 |
638 | | 1 << 7 |
639 | | 1 << 6 |
640 | | 1 << 5 |
641 | | 1 << 4 |
642 | | 1 << 3 |
643 | | 1 << 2 |
644 | | 1 << 1 |
645 | | 1 << 0, |
646 | 1 << 12 |
647 | | 1 << 11 |
648 | | 1 << 10 |
649 | | 1 << 9 |
650 | | 1 << 8 |
651 | | 1 << 7 |
652 | | 1 << 6 |
653 | | 1 << 5 |
654 | | 1 << 4 |
655 | | 1 << 3 |
656 | | 1 << 2 |
657 | | 1 << 1 |
658 | | 1 << 0, |
659 | 1 << 11 |
660 | | 1 << 10 |
661 | | 1 << 9 |
662 | | 1 << 8 |
663 | | 1 << 7 |
664 | | 1 << 6 |
665 | | 1 << 5 |
666 | | 1 << 4 |
667 | | 1 << 3 |
668 | | 1 << 2 |
669 | | 1 << 1 |
670 | | 1 << 0, |
671 | 1 << 10 |
672 | | 1 << 9 |
673 | | 1 << 8 |
674 | | 1 << 7 |
675 | | 1 << 6 |
676 | | 1 << 5 |
677 | | 1 << 4 |
678 | | 1 << 3 |
679 | | 1 << 2 |
680 | | 1 << 1 |
681 | | 1 << 0, |
682 | 1 << 9 | 1 << 8 | 1 << 7 | 1 << 6 | 1 << 5 | 1 << 4 | 1 << 3 | 1 << 2 | 1 << 1 | 1 << 0, |
683 | 1 << 8 | 1 << 7 | 1 << 6 | 1 << 5 | 1 << 4 | 1 << 3 | 1 << 2 | 1 << 1 | 1 << 0, |
684 | 1 << 7 | 1 << 6 | 1 << 5 | 1 << 4 | 1 << 3 | 1 << 2 | 1 << 1 | 1 << 0, |
685 | 1 << 6 | 1 << 5 | 1 << 4 | 1 << 3 | 1 << 2 | 1 << 1 | 1 << 0, |
686 | 1 << 5 | 1 << 4 | 1 << 3 | 1 << 2 | 1 << 1 | 1 << 0, |
687 | 1 << 4 | 1 << 3 | 1 << 2 | 1 << 1 | 1 << 0, |
688 | 1 << 3 | 1 << 2 | 1 << 1 | 1 << 0, |
689 | 1 << 2 | 1 << 1 | 1 << 0, |
690 | 1 << 1 | 1 << 0, |
691 | 1 << 0, |
692 | 0, |
693 | ); |
694 | assert_eq_m512i(r, e); |
695 | } |
696 | |
697 | #[simd_test(enable = "avx512cd" )] |
698 | unsafe fn test_mm512_mask_conflict_epi32() { |
699 | let a = _mm512_set1_epi32(1); |
700 | let r = _mm512_mask_conflict_epi32(a, 0, a); |
701 | assert_eq_m512i(r, a); |
702 | let r = _mm512_mask_conflict_epi32(a, 0b11111111_11111111, a); |
703 | let e = _mm512_set_epi32( |
704 | 1 << 14 |
705 | | 1 << 13 |
706 | | 1 << 12 |
707 | | 1 << 11 |
708 | | 1 << 10 |
709 | | 1 << 9 |
710 | | 1 << 8 |
711 | | 1 << 7 |
712 | | 1 << 6 |
713 | | 1 << 5 |
714 | | 1 << 4 |
715 | | 1 << 3 |
716 | | 1 << 2 |
717 | | 1 << 1 |
718 | | 1 << 0, |
719 | 1 << 13 |
720 | | 1 << 12 |
721 | | 1 << 11 |
722 | | 1 << 10 |
723 | | 1 << 9 |
724 | | 1 << 8 |
725 | | 1 << 7 |
726 | | 1 << 6 |
727 | | 1 << 5 |
728 | | 1 << 4 |
729 | | 1 << 3 |
730 | | 1 << 2 |
731 | | 1 << 1 |
732 | | 1 << 0, |
733 | 1 << 12 |
734 | | 1 << 11 |
735 | | 1 << 10 |
736 | | 1 << 9 |
737 | | 1 << 8 |
738 | | 1 << 7 |
739 | | 1 << 6 |
740 | | 1 << 5 |
741 | | 1 << 4 |
742 | | 1 << 3 |
743 | | 1 << 2 |
744 | | 1 << 1 |
745 | | 1 << 0, |
746 | 1 << 11 |
747 | | 1 << 10 |
748 | | 1 << 9 |
749 | | 1 << 8 |
750 | | 1 << 7 |
751 | | 1 << 6 |
752 | | 1 << 5 |
753 | | 1 << 4 |
754 | | 1 << 3 |
755 | | 1 << 2 |
756 | | 1 << 1 |
757 | | 1 << 0, |
758 | 1 << 10 |
759 | | 1 << 9 |
760 | | 1 << 8 |
761 | | 1 << 7 |
762 | | 1 << 6 |
763 | | 1 << 5 |
764 | | 1 << 4 |
765 | | 1 << 3 |
766 | | 1 << 2 |
767 | | 1 << 1 |
768 | | 1 << 0, |
769 | 1 << 9 | 1 << 8 | 1 << 7 | 1 << 6 | 1 << 5 | 1 << 4 | 1 << 3 | 1 << 2 | 1 << 1 | 1 << 0, |
770 | 1 << 8 | 1 << 7 | 1 << 6 | 1 << 5 | 1 << 4 | 1 << 3 | 1 << 2 | 1 << 1 | 1 << 0, |
771 | 1 << 7 | 1 << 6 | 1 << 5 | 1 << 4 | 1 << 3 | 1 << 2 | 1 << 1 | 1 << 0, |
772 | 1 << 6 | 1 << 5 | 1 << 4 | 1 << 3 | 1 << 2 | 1 << 1 | 1 << 0, |
773 | 1 << 5 | 1 << 4 | 1 << 3 | 1 << 2 | 1 << 1 | 1 << 0, |
774 | 1 << 4 | 1 << 3 | 1 << 2 | 1 << 1 | 1 << 0, |
775 | 1 << 3 | 1 << 2 | 1 << 1 | 1 << 0, |
776 | 1 << 2 | 1 << 1 | 1 << 0, |
777 | 1 << 1 | 1 << 0, |
778 | 1 << 0, |
779 | 0, |
780 | ); |
781 | assert_eq_m512i(r, e); |
782 | } |
783 | |
784 | #[simd_test(enable = "avx512cd" )] |
785 | unsafe fn test_mm512_maskz_conflict_epi32() { |
786 | let a = _mm512_set1_epi32(1); |
787 | let r = _mm512_maskz_conflict_epi32(0, a); |
788 | assert_eq_m512i(r, _mm512_setzero_si512()); |
789 | let r = _mm512_maskz_conflict_epi32(0b11111111_11111111, a); |
790 | let e = _mm512_set_epi32( |
791 | 1 << 14 |
792 | | 1 << 13 |
793 | | 1 << 12 |
794 | | 1 << 11 |
795 | | 1 << 10 |
796 | | 1 << 9 |
797 | | 1 << 8 |
798 | | 1 << 7 |
799 | | 1 << 6 |
800 | | 1 << 5 |
801 | | 1 << 4 |
802 | | 1 << 3 |
803 | | 1 << 2 |
804 | | 1 << 1 |
805 | | 1 << 0, |
806 | 1 << 13 |
807 | | 1 << 12 |
808 | | 1 << 11 |
809 | | 1 << 10 |
810 | | 1 << 9 |
811 | | 1 << 8 |
812 | | 1 << 7 |
813 | | 1 << 6 |
814 | | 1 << 5 |
815 | | 1 << 4 |
816 | | 1 << 3 |
817 | | 1 << 2 |
818 | | 1 << 1 |
819 | | 1 << 0, |
820 | 1 << 12 |
821 | | 1 << 11 |
822 | | 1 << 10 |
823 | | 1 << 9 |
824 | | 1 << 8 |
825 | | 1 << 7 |
826 | | 1 << 6 |
827 | | 1 << 5 |
828 | | 1 << 4 |
829 | | 1 << 3 |
830 | | 1 << 2 |
831 | | 1 << 1 |
832 | | 1 << 0, |
833 | 1 << 11 |
834 | | 1 << 10 |
835 | | 1 << 9 |
836 | | 1 << 8 |
837 | | 1 << 7 |
838 | | 1 << 6 |
839 | | 1 << 5 |
840 | | 1 << 4 |
841 | | 1 << 3 |
842 | | 1 << 2 |
843 | | 1 << 1 |
844 | | 1 << 0, |
845 | 1 << 10 |
846 | | 1 << 9 |
847 | | 1 << 8 |
848 | | 1 << 7 |
849 | | 1 << 6 |
850 | | 1 << 5 |
851 | | 1 << 4 |
852 | | 1 << 3 |
853 | | 1 << 2 |
854 | | 1 << 1 |
855 | | 1 << 0, |
856 | 1 << 9 | 1 << 8 | 1 << 7 | 1 << 6 | 1 << 5 | 1 << 4 | 1 << 3 | 1 << 2 | 1 << 1 | 1 << 0, |
857 | 1 << 8 | 1 << 7 | 1 << 6 | 1 << 5 | 1 << 4 | 1 << 3 | 1 << 2 | 1 << 1 | 1 << 0, |
858 | 1 << 7 | 1 << 6 | 1 << 5 | 1 << 4 | 1 << 3 | 1 << 2 | 1 << 1 | 1 << 0, |
859 | 1 << 6 | 1 << 5 | 1 << 4 | 1 << 3 | 1 << 2 | 1 << 1 | 1 << 0, |
860 | 1 << 5 | 1 << 4 | 1 << 3 | 1 << 2 | 1 << 1 | 1 << 0, |
861 | 1 << 4 | 1 << 3 | 1 << 2 | 1 << 1 | 1 << 0, |
862 | 1 << 3 | 1 << 2 | 1 << 1 | 1 << 0, |
863 | 1 << 2 | 1 << 1 | 1 << 0, |
864 | 1 << 1 | 1 << 0, |
865 | 1 << 0, |
866 | 0, |
867 | ); |
868 | assert_eq_m512i(r, e); |
869 | } |
870 | |
871 | #[simd_test(enable = "avx512cd,avx512vl" )] |
872 | unsafe fn test_mm256_conflict_epi32() { |
873 | let a = _mm256_set1_epi32(1); |
874 | let r = _mm256_conflict_epi32(a); |
875 | let e = _mm256_set_epi32( |
876 | 1 << 6 | 1 << 5 | 1 << 4 | 1 << 3 | 1 << 2 | 1 << 1 | 1 << 0, |
877 | 1 << 5 | 1 << 4 | 1 << 3 | 1 << 2 | 1 << 1 | 1 << 0, |
878 | 1 << 4 | 1 << 3 | 1 << 2 | 1 << 1 | 1 << 0, |
879 | 1 << 3 | 1 << 2 | 1 << 1 | 1 << 0, |
880 | 1 << 2 | 1 << 1 | 1 << 0, |
881 | 1 << 1 | 1 << 0, |
882 | 1 << 0, |
883 | 0, |
884 | ); |
885 | assert_eq_m256i(r, e); |
886 | } |
887 | |
888 | #[simd_test(enable = "avx512cd,avx512vl" )] |
889 | unsafe fn test_mm256_mask_conflict_epi32() { |
890 | let a = _mm256_set1_epi32(1); |
891 | let r = _mm256_mask_conflict_epi32(a, 0, a); |
892 | assert_eq_m256i(r, a); |
893 | let r = _mm256_mask_conflict_epi32(a, 0b11111111, a); |
894 | let e = _mm256_set_epi32( |
895 | 1 << 6 | 1 << 5 | 1 << 4 | 1 << 3 | 1 << 2 | 1 << 1 | 1 << 0, |
896 | 1 << 5 | 1 << 4 | 1 << 3 | 1 << 2 | 1 << 1 | 1 << 0, |
897 | 1 << 4 | 1 << 3 | 1 << 2 | 1 << 1 | 1 << 0, |
898 | 1 << 3 | 1 << 2 | 1 << 1 | 1 << 0, |
899 | 1 << 2 | 1 << 1 | 1 << 0, |
900 | 1 << 1 | 1 << 0, |
901 | 1 << 0, |
902 | 0, |
903 | ); |
904 | assert_eq_m256i(r, e); |
905 | } |
906 | |
907 | #[simd_test(enable = "avx512cd,avx512vl" )] |
908 | unsafe fn test_mm256_maskz_conflict_epi32() { |
909 | let a = _mm256_set1_epi32(1); |
910 | let r = _mm256_maskz_conflict_epi32(0, a); |
911 | assert_eq_m256i(r, _mm256_setzero_si256()); |
912 | let r = _mm256_maskz_conflict_epi32(0b11111111, a); |
913 | let e = _mm256_set_epi32( |
914 | 1 << 6 | 1 << 5 | 1 << 4 | 1 << 3 | 1 << 2 | 1 << 1 | 1 << 0, |
915 | 1 << 5 | 1 << 4 | 1 << 3 | 1 << 2 | 1 << 1 | 1 << 0, |
916 | 1 << 4 | 1 << 3 | 1 << 2 | 1 << 1 | 1 << 0, |
917 | 1 << 3 | 1 << 2 | 1 << 1 | 1 << 0, |
918 | 1 << 2 | 1 << 1 | 1 << 0, |
919 | 1 << 1 | 1 << 0, |
920 | 1 << 0, |
921 | 0, |
922 | ); |
923 | assert_eq_m256i(r, e); |
924 | } |
925 | |
926 | #[simd_test(enable = "avx512cd,avx512vl" )] |
927 | unsafe fn test_mm_conflict_epi32() { |
928 | let a = _mm_set1_epi32(1); |
929 | let r = _mm_conflict_epi32(a); |
930 | let e = _mm_set_epi32(1 << 2 | 1 << 1 | 1 << 0, 1 << 1 | 1 << 0, 1 << 0, 0); |
931 | assert_eq_m128i(r, e); |
932 | } |
933 | |
934 | #[simd_test(enable = "avx512cd,avx512vl" )] |
935 | unsafe fn test_mm_mask_conflict_epi32() { |
936 | let a = _mm_set1_epi32(1); |
937 | let r = _mm_mask_conflict_epi32(a, 0, a); |
938 | assert_eq_m128i(r, a); |
939 | let r = _mm_mask_conflict_epi32(a, 0b00001111, a); |
940 | let e = _mm_set_epi32(1 << 2 | 1 << 1 | 1 << 0, 1 << 1 | 1 << 0, 1 << 0, 0); |
941 | assert_eq_m128i(r, e); |
942 | } |
943 | |
944 | #[simd_test(enable = "avx512cd,avx512vl" )] |
945 | unsafe fn test_mm_maskz_conflict_epi32() { |
946 | let a = _mm_set1_epi32(1); |
947 | let r = _mm_maskz_conflict_epi32(0, a); |
948 | assert_eq_m128i(r, _mm_setzero_si128()); |
949 | let r = _mm_maskz_conflict_epi32(0b00001111, a); |
950 | let e = _mm_set_epi32(1 << 2 | 1 << 1 | 1 << 0, 1 << 1 | 1 << 0, 1 << 0, 0); |
951 | assert_eq_m128i(r, e); |
952 | } |
953 | |
954 | #[simd_test(enable = "avx512cd" )] |
955 | unsafe fn test_mm512_conflict_epi64() { |
956 | let a = _mm512_set1_epi64(1); |
957 | let r = _mm512_conflict_epi64(a); |
958 | let e = _mm512_set_epi64( |
959 | 1 << 6 | 1 << 5 | 1 << 4 | 1 << 3 | 1 << 2 | 1 << 1 | 1 << 0, |
960 | 1 << 5 | 1 << 4 | 1 << 3 | 1 << 2 | 1 << 1 | 1 << 0, |
961 | 1 << 4 | 1 << 3 | 1 << 2 | 1 << 1 | 1 << 0, |
962 | 1 << 3 | 1 << 2 | 1 << 1 | 1 << 0, |
963 | 1 << 2 | 1 << 1 | 1 << 0, |
964 | 1 << 1 | 1 << 0, |
965 | 1 << 0, |
966 | 0, |
967 | ); |
968 | assert_eq_m512i(r, e); |
969 | } |
970 | |
971 | #[simd_test(enable = "avx512cd" )] |
972 | unsafe fn test_mm512_mask_conflict_epi64() { |
973 | let a = _mm512_set1_epi64(1); |
974 | let r = _mm512_mask_conflict_epi64(a, 0, a); |
975 | assert_eq_m512i(r, a); |
976 | let r = _mm512_mask_conflict_epi64(a, 0b11111111, a); |
977 | let e = _mm512_set_epi64( |
978 | 1 << 6 | 1 << 5 | 1 << 4 | 1 << 3 | 1 << 2 | 1 << 1 | 1 << 0, |
979 | 1 << 5 | 1 << 4 | 1 << 3 | 1 << 2 | 1 << 1 | 1 << 0, |
980 | 1 << 4 | 1 << 3 | 1 << 2 | 1 << 1 | 1 << 0, |
981 | 1 << 3 | 1 << 2 | 1 << 1 | 1 << 0, |
982 | 1 << 2 | 1 << 1 | 1 << 0, |
983 | 1 << 1 | 1 << 0, |
984 | 1 << 0, |
985 | 0, |
986 | ); |
987 | assert_eq_m512i(r, e); |
988 | } |
989 | |
990 | #[simd_test(enable = "avx512cd" )] |
991 | unsafe fn test_mm512_maskz_conflict_epi64() { |
992 | let a = _mm512_set1_epi64(1); |
993 | let r = _mm512_maskz_conflict_epi64(0, a); |
994 | assert_eq_m512i(r, _mm512_setzero_si512()); |
995 | let r = _mm512_maskz_conflict_epi64(0b11111111, a); |
996 | let e = _mm512_set_epi64( |
997 | 1 << 6 | 1 << 5 | 1 << 4 | 1 << 3 | 1 << 2 | 1 << 1 | 1 << 0, |
998 | 1 << 5 | 1 << 4 | 1 << 3 | 1 << 2 | 1 << 1 | 1 << 0, |
999 | 1 << 4 | 1 << 3 | 1 << 2 | 1 << 1 | 1 << 0, |
1000 | 1 << 3 | 1 << 2 | 1 << 1 | 1 << 0, |
1001 | 1 << 2 | 1 << 1 | 1 << 0, |
1002 | 1 << 1 | 1 << 0, |
1003 | 1 << 0, |
1004 | 0, |
1005 | ); |
1006 | assert_eq_m512i(r, e); |
1007 | } |
1008 | |
1009 | #[simd_test(enable = "avx512cd,avx512vl" )] |
1010 | unsafe fn test_mm256_conflict_epi64() { |
1011 | let a = _mm256_set1_epi64x(1); |
1012 | let r = _mm256_conflict_epi64(a); |
1013 | let e = _mm256_set_epi64x(1 << 2 | 1 << 1 | 1 << 0, 1 << 1 | 1 << 0, 1 << 0, 0); |
1014 | assert_eq_m256i(r, e); |
1015 | } |
1016 | |
1017 | #[simd_test(enable = "avx512cd,avx512vl" )] |
1018 | unsafe fn test_mm256_mask_conflict_epi64() { |
1019 | let a = _mm256_set1_epi64x(1); |
1020 | let r = _mm256_mask_conflict_epi64(a, 0, a); |
1021 | assert_eq_m256i(r, a); |
1022 | let r = _mm256_mask_conflict_epi64(a, 0b00001111, a); |
1023 | let e = _mm256_set_epi64x(1 << 2 | 1 << 1 | 1 << 0, 1 << 1 | 1 << 0, 1 << 0, 0); |
1024 | assert_eq_m256i(r, e); |
1025 | } |
1026 | |
1027 | #[simd_test(enable = "avx512cd,avx512vl" )] |
1028 | unsafe fn test_mm256_maskz_conflict_epi64() { |
1029 | let a = _mm256_set1_epi64x(1); |
1030 | let r = _mm256_maskz_conflict_epi64(0, a); |
1031 | assert_eq_m256i(r, _mm256_setzero_si256()); |
1032 | let r = _mm256_maskz_conflict_epi64(0b00001111, a); |
1033 | let e = _mm256_set_epi64x(1 << 2 | 1 << 1 | 1 << 0, 1 << 1 | 1 << 0, 1 << 0, 0); |
1034 | assert_eq_m256i(r, e); |
1035 | } |
1036 | |
1037 | #[simd_test(enable = "avx512cd,avx512vl" )] |
1038 | unsafe fn test_mm_conflict_epi64() { |
1039 | let a = _mm_set1_epi64x(1); |
1040 | let r = _mm_conflict_epi64(a); |
1041 | let e = _mm_set_epi64x(1 << 0, 0); |
1042 | assert_eq_m128i(r, e); |
1043 | } |
1044 | |
1045 | #[simd_test(enable = "avx512cd,avx512vl" )] |
1046 | unsafe fn test_mm_mask_conflict_epi64() { |
1047 | let a = _mm_set1_epi64x(1); |
1048 | let r = _mm_mask_conflict_epi64(a, 0, a); |
1049 | assert_eq_m128i(r, a); |
1050 | let r = _mm_mask_conflict_epi64(a, 0b00000011, a); |
1051 | let e = _mm_set_epi64x(1 << 0, 0); |
1052 | assert_eq_m128i(r, e); |
1053 | } |
1054 | |
1055 | #[simd_test(enable = "avx512cd,avx512vl" )] |
1056 | unsafe fn test_mm_maskz_conflict_epi64() { |
1057 | let a = _mm_set1_epi64x(1); |
1058 | let r = _mm_maskz_conflict_epi64(0, a); |
1059 | assert_eq_m128i(r, _mm_setzero_si128()); |
1060 | let r = _mm_maskz_conflict_epi64(0b00000011, a); |
1061 | let e = _mm_set_epi64x(1 << 0, 0); |
1062 | assert_eq_m128i(r, e); |
1063 | } |
1064 | |
1065 | #[simd_test(enable = "avx512cd" )] |
1066 | unsafe fn test_mm512_lzcnt_epi32() { |
1067 | let a = _mm512_set1_epi32(1); |
1068 | let r = _mm512_lzcnt_epi32(a); |
1069 | let e = _mm512_set1_epi32(31); |
1070 | assert_eq_m512i(r, e); |
1071 | } |
1072 | |
1073 | #[simd_test(enable = "avx512cd" )] |
1074 | unsafe fn test_mm512_mask_lzcnt_epi32() { |
1075 | let a = _mm512_set1_epi32(1); |
1076 | let r = _mm512_mask_lzcnt_epi32(a, 0, a); |
1077 | assert_eq_m512i(r, a); |
1078 | let r = _mm512_mask_lzcnt_epi32(a, 0b11111111_11111111, a); |
1079 | let e = _mm512_set1_epi32(31); |
1080 | assert_eq_m512i(r, e); |
1081 | } |
1082 | |
1083 | #[simd_test(enable = "avx512cd" )] |
1084 | unsafe fn test_mm512_maskz_lzcnt_epi32() { |
1085 | let a = _mm512_set1_epi32(2); |
1086 | let r = _mm512_maskz_lzcnt_epi32(0, a); |
1087 | assert_eq_m512i(r, _mm512_setzero_si512()); |
1088 | let r = _mm512_maskz_lzcnt_epi32(0b11111111_11111111, a); |
1089 | let e = _mm512_set1_epi32(30); |
1090 | assert_eq_m512i(r, e); |
1091 | } |
1092 | |
1093 | #[simd_test(enable = "avx512cd,avx512vl" )] |
1094 | unsafe fn test_mm256_lzcnt_epi32() { |
1095 | let a = _mm256_set1_epi32(1); |
1096 | let r = _mm256_lzcnt_epi32(a); |
1097 | let e = _mm256_set1_epi32(31); |
1098 | assert_eq_m256i(r, e); |
1099 | } |
1100 | |
1101 | #[simd_test(enable = "avx512cd,avx512vl" )] |
1102 | unsafe fn test_mm256_mask_lzcnt_epi32() { |
1103 | let a = _mm256_set1_epi32(1); |
1104 | let r = _mm256_mask_lzcnt_epi32(a, 0, a); |
1105 | assert_eq_m256i(r, a); |
1106 | let r = _mm256_mask_lzcnt_epi32(a, 0b11111111, a); |
1107 | let e = _mm256_set1_epi32(31); |
1108 | assert_eq_m256i(r, e); |
1109 | } |
1110 | |
1111 | #[simd_test(enable = "avx512cd,avx512vl" )] |
1112 | unsafe fn test_mm256_maskz_lzcnt_epi32() { |
1113 | let a = _mm256_set1_epi32(1); |
1114 | let r = _mm256_maskz_lzcnt_epi32(0, a); |
1115 | assert_eq_m256i(r, _mm256_setzero_si256()); |
1116 | let r = _mm256_maskz_lzcnt_epi32(0b11111111, a); |
1117 | let e = _mm256_set1_epi32(31); |
1118 | assert_eq_m256i(r, e); |
1119 | } |
1120 | |
1121 | #[simd_test(enable = "avx512cd,avx512vl" )] |
1122 | unsafe fn test_mm_lzcnt_epi32() { |
1123 | let a = _mm_set1_epi32(1); |
1124 | let r = _mm_lzcnt_epi32(a); |
1125 | let e = _mm_set1_epi32(31); |
1126 | assert_eq_m128i(r, e); |
1127 | } |
1128 | |
1129 | #[simd_test(enable = "avx512cd,avx512vl" )] |
1130 | unsafe fn test_mm_mask_lzcnt_epi32() { |
1131 | let a = _mm_set1_epi32(1); |
1132 | let r = _mm_mask_lzcnt_epi32(a, 0, a); |
1133 | assert_eq_m128i(r, a); |
1134 | let r = _mm_mask_lzcnt_epi32(a, 0b00001111, a); |
1135 | let e = _mm_set1_epi32(31); |
1136 | assert_eq_m128i(r, e); |
1137 | } |
1138 | |
1139 | #[simd_test(enable = "avx512cd,avx512vl" )] |
1140 | unsafe fn test_mm_maskz_lzcnt_epi32() { |
1141 | let a = _mm_set1_epi32(1); |
1142 | let r = _mm_maskz_lzcnt_epi32(0, a); |
1143 | assert_eq_m128i(r, _mm_setzero_si128()); |
1144 | let r = _mm_maskz_lzcnt_epi32(0b00001111, a); |
1145 | let e = _mm_set1_epi32(31); |
1146 | assert_eq_m128i(r, e); |
1147 | } |
1148 | |
1149 | #[simd_test(enable = "avx512cd" )] |
1150 | unsafe fn test_mm512_lzcnt_epi64() { |
1151 | let a = _mm512_set1_epi64(1); |
1152 | let r = _mm512_lzcnt_epi64(a); |
1153 | let e = _mm512_set1_epi64(63); |
1154 | assert_eq_m512i(r, e); |
1155 | } |
1156 | |
1157 | #[simd_test(enable = "avx512cd" )] |
1158 | unsafe fn test_mm512_mask_lzcnt_epi64() { |
1159 | let a = _mm512_set1_epi64(1); |
1160 | let r = _mm512_mask_lzcnt_epi64(a, 0, a); |
1161 | assert_eq_m512i(r, a); |
1162 | let r = _mm512_mask_lzcnt_epi64(a, 0b11111111, a); |
1163 | let e = _mm512_set1_epi64(63); |
1164 | assert_eq_m512i(r, e); |
1165 | } |
1166 | |
1167 | #[simd_test(enable = "avx512cd" )] |
1168 | unsafe fn test_mm512_maskz_lzcnt_epi64() { |
1169 | let a = _mm512_set1_epi64(2); |
1170 | let r = _mm512_maskz_lzcnt_epi64(0, a); |
1171 | assert_eq_m512i(r, _mm512_setzero_si512()); |
1172 | let r = _mm512_maskz_lzcnt_epi64(0b11111111, a); |
1173 | let e = _mm512_set1_epi64(62); |
1174 | assert_eq_m512i(r, e); |
1175 | } |
1176 | |
1177 | #[simd_test(enable = "avx512cd,avx512vl" )] |
1178 | unsafe fn test_mm256_lzcnt_epi64() { |
1179 | let a = _mm256_set1_epi64x(1); |
1180 | let r = _mm256_lzcnt_epi64(a); |
1181 | let e = _mm256_set1_epi64x(63); |
1182 | assert_eq_m256i(r, e); |
1183 | } |
1184 | |
1185 | #[simd_test(enable = "avx512cd,avx512vl" )] |
1186 | unsafe fn test_mm256_mask_lzcnt_epi64() { |
1187 | let a = _mm256_set1_epi64x(1); |
1188 | let r = _mm256_mask_lzcnt_epi64(a, 0, a); |
1189 | assert_eq_m256i(r, a); |
1190 | let r = _mm256_mask_lzcnt_epi64(a, 0b00001111, a); |
1191 | let e = _mm256_set1_epi64x(63); |
1192 | assert_eq_m256i(r, e); |
1193 | } |
1194 | |
1195 | #[simd_test(enable = "avx512cd,avx512vl" )] |
1196 | unsafe fn test_mm256_maskz_lzcnt_epi64() { |
1197 | let a = _mm256_set1_epi64x(1); |
1198 | let r = _mm256_maskz_lzcnt_epi64(0, a); |
1199 | assert_eq_m256i(r, _mm256_setzero_si256()); |
1200 | let r = _mm256_maskz_lzcnt_epi64(0b00001111, a); |
1201 | let e = _mm256_set1_epi64x(63); |
1202 | assert_eq_m256i(r, e); |
1203 | } |
1204 | |
1205 | #[simd_test(enable = "avx512cd,avx512vl" )] |
1206 | unsafe fn test_mm_lzcnt_epi64() { |
1207 | let a = _mm_set1_epi64x(1); |
1208 | let r = _mm_lzcnt_epi64(a); |
1209 | let e = _mm_set1_epi64x(63); |
1210 | assert_eq_m128i(r, e); |
1211 | } |
1212 | |
1213 | #[simd_test(enable = "avx512cd,avx512vl" )] |
1214 | unsafe fn test_mm_mask_lzcnt_epi64() { |
1215 | let a = _mm_set1_epi64x(1); |
1216 | let r = _mm_mask_lzcnt_epi64(a, 0, a); |
1217 | assert_eq_m128i(r, a); |
1218 | let r = _mm_mask_lzcnt_epi64(a, 0b00001111, a); |
1219 | let e = _mm_set1_epi64x(63); |
1220 | assert_eq_m128i(r, e); |
1221 | } |
1222 | |
1223 | #[simd_test(enable = "avx512cd,avx512vl" )] |
1224 | unsafe fn test_mm_maskz_lzcnt_epi64() { |
1225 | let a = _mm_set1_epi64x(1); |
1226 | let r = _mm_maskz_lzcnt_epi64(0, a); |
1227 | assert_eq_m128i(r, _mm_setzero_si128()); |
1228 | let r = _mm_maskz_lzcnt_epi64(0b00001111, a); |
1229 | let e = _mm_set1_epi64x(63); |
1230 | assert_eq_m128i(r, e); |
1231 | } |
1232 | } |
1233 | |