1 | use crate::core_arch::{simd::*, x86::*}; |
2 | |
3 | #[allow (improper_ctypes)] |
4 | extern "C" { |
5 | #[link_name = "llvm.x86.sha1msg1" ] |
6 | fn sha1msg1(a: i32x4, b: i32x4) -> i32x4; |
7 | #[link_name = "llvm.x86.sha1msg2" ] |
8 | fn sha1msg2(a: i32x4, b: i32x4) -> i32x4; |
9 | #[link_name = "llvm.x86.sha1nexte" ] |
10 | fn sha1nexte(a: i32x4, b: i32x4) -> i32x4; |
11 | #[link_name = "llvm.x86.sha1rnds4" ] |
12 | fn sha1rnds4(a: i32x4, b: i32x4, c: i8) -> i32x4; |
13 | #[link_name = "llvm.x86.sha256msg1" ] |
14 | fn sha256msg1(a: i32x4, b: i32x4) -> i32x4; |
15 | #[link_name = "llvm.x86.sha256msg2" ] |
16 | fn sha256msg2(a: i32x4, b: i32x4) -> i32x4; |
17 | #[link_name = "llvm.x86.sha256rnds2" ] |
18 | fn sha256rnds2(a: i32x4, b: i32x4, k: i32x4) -> i32x4; |
19 | } |
20 | |
21 | #[cfg (test)] |
22 | use stdarch_test::assert_instr; |
23 | |
24 | /// Performs an intermediate calculation for the next four SHA1 message values |
25 | /// (unsigned 32-bit integers) using previous message values from `a` and `b`, |
26 | /// and returning the result. |
27 | /// |
28 | /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sha1msg1_epu32) |
29 | #[inline ] |
30 | #[target_feature (enable = "sha" )] |
31 | #[cfg_attr (test, assert_instr(sha1msg1))] |
32 | #[stable (feature = "simd_x86" , since = "1.27.0" )] |
33 | pub unsafe fn _mm_sha1msg1_epu32(a: __m128i, b: __m128i) -> __m128i { |
34 | transmute(src:sha1msg1(a:a.as_i32x4(), b:b.as_i32x4())) |
35 | } |
36 | |
37 | /// Performs the final calculation for the next four SHA1 message values |
38 | /// (unsigned 32-bit integers) using the intermediate result in `a` and the |
39 | /// previous message values in `b`, and returns the result. |
40 | /// |
41 | /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sha1msg2_epu32) |
42 | #[inline ] |
43 | #[target_feature (enable = "sha" )] |
44 | #[cfg_attr (test, assert_instr(sha1msg2))] |
45 | #[stable (feature = "simd_x86" , since = "1.27.0" )] |
46 | pub unsafe fn _mm_sha1msg2_epu32(a: __m128i, b: __m128i) -> __m128i { |
47 | transmute(src:sha1msg2(a:a.as_i32x4(), b:b.as_i32x4())) |
48 | } |
49 | |
50 | /// Calculate SHA1 state variable E after four rounds of operation from the |
51 | /// current SHA1 state variable `a`, add that value to the scheduled values |
52 | /// (unsigned 32-bit integers) in `b`, and returns the result. |
53 | /// |
54 | /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sha1nexte_epu32) |
55 | #[inline ] |
56 | #[target_feature (enable = "sha" )] |
57 | #[cfg_attr (test, assert_instr(sha1nexte))] |
58 | #[stable (feature = "simd_x86" , since = "1.27.0" )] |
59 | pub unsafe fn _mm_sha1nexte_epu32(a: __m128i, b: __m128i) -> __m128i { |
60 | transmute(src:sha1nexte(a:a.as_i32x4(), b:b.as_i32x4())) |
61 | } |
62 | |
63 | /// Performs four rounds of SHA1 operation using an initial SHA1 state (A,B,C,D) |
64 | /// from `a` and some pre-computed sum of the next 4 round message values |
65 | /// (unsigned 32-bit integers), and state variable E from `b`, and return the |
66 | /// updated SHA1 state (A,B,C,D). `FUNC` contains the logic functions and round |
67 | /// constants. |
68 | /// |
69 | /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sha1rnds4_epu32) |
70 | #[inline ] |
71 | #[target_feature (enable = "sha" )] |
72 | #[cfg_attr (test, assert_instr(sha1rnds4, FUNC = 0))] |
73 | #[rustc_legacy_const_generics (2)] |
74 | #[stable (feature = "simd_x86" , since = "1.27.0" )] |
75 | pub unsafe fn _mm_sha1rnds4_epu32<const FUNC: i32>(a: __m128i, b: __m128i) -> __m128i { |
76 | static_assert_uimm_bits!(FUNC, 2); |
77 | transmute(src:sha1rnds4(a:a.as_i32x4(), b:b.as_i32x4(), FUNC as i8)) |
78 | } |
79 | |
80 | /// Performs an intermediate calculation for the next four SHA256 message values |
81 | /// (unsigned 32-bit integers) using previous message values from `a` and `b`, |
82 | /// and return the result. |
83 | /// |
84 | /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sha256msg1_epu32) |
85 | #[inline ] |
86 | #[target_feature (enable = "sha" )] |
87 | #[cfg_attr (test, assert_instr(sha256msg1))] |
88 | #[stable (feature = "simd_x86" , since = "1.27.0" )] |
89 | pub unsafe fn _mm_sha256msg1_epu32(a: __m128i, b: __m128i) -> __m128i { |
90 | transmute(src:sha256msg1(a:a.as_i32x4(), b:b.as_i32x4())) |
91 | } |
92 | |
93 | /// Performs the final calculation for the next four SHA256 message values |
94 | /// (unsigned 32-bit integers) using previous message values from `a` and `b`, |
95 | /// and return the result. |
96 | /// |
97 | /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sha256msg2_epu32) |
98 | #[inline ] |
99 | #[target_feature (enable = "sha" )] |
100 | #[cfg_attr (test, assert_instr(sha256msg2))] |
101 | #[stable (feature = "simd_x86" , since = "1.27.0" )] |
102 | pub unsafe fn _mm_sha256msg2_epu32(a: __m128i, b: __m128i) -> __m128i { |
103 | transmute(src:sha256msg2(a:a.as_i32x4(), b:b.as_i32x4())) |
104 | } |
105 | |
106 | /// Performs 2 rounds of SHA256 operation using an initial SHA256 state |
107 | /// (C,D,G,H) from `a`, an initial SHA256 state (A,B,E,F) from `b`, and a |
108 | /// pre-computed sum of the next 2 round message values (unsigned 32-bit |
109 | /// integers) and the corresponding round constants from `k`, and store the |
110 | /// updated SHA256 state (A,B,E,F) in dst. |
111 | /// |
112 | /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sha256rnds2_epu32) |
113 | #[inline ] |
114 | #[target_feature (enable = "sha" )] |
115 | #[cfg_attr (test, assert_instr(sha256rnds2))] |
116 | #[stable (feature = "simd_x86" , since = "1.27.0" )] |
117 | pub unsafe fn _mm_sha256rnds2_epu32(a: __m128i, b: __m128i, k: __m128i) -> __m128i { |
118 | transmute(src:sha256rnds2(a:a.as_i32x4(), b:b.as_i32x4(), k:k.as_i32x4())) |
119 | } |
120 | |
121 | #[cfg (test)] |
122 | mod tests { |
123 | use std::{ |
124 | f32, |
125 | f64::{self, NAN}, |
126 | i32, |
127 | mem::{self, transmute}, |
128 | }; |
129 | |
130 | use crate::{ |
131 | core_arch::{simd::*, x86::*}, |
132 | hint::black_box, |
133 | }; |
134 | use stdarch_test::simd_test; |
135 | |
136 | #[simd_test(enable = "sha" )] |
137 | #[allow (overflowing_literals)] |
138 | unsafe fn test_mm_sha1msg1_epu32() { |
139 | let a = _mm_set_epi64x(0xe9b5dba5b5c0fbcf, 0x71374491428a2f98); |
140 | let b = _mm_set_epi64x(0xab1c5ed5923f82a4, 0x59f111f13956c25b); |
141 | let expected = _mm_set_epi64x(0x98829f34f74ad457, 0xda2b1a44d0b5ad3c); |
142 | let r = _mm_sha1msg1_epu32(a, b); |
143 | assert_eq_m128i(r, expected); |
144 | } |
145 | |
146 | #[simd_test(enable = "sha" )] |
147 | #[allow (overflowing_literals)] |
148 | unsafe fn test_mm_sha1msg2_epu32() { |
149 | let a = _mm_set_epi64x(0xe9b5dba5b5c0fbcf, 0x71374491428a2f98); |
150 | let b = _mm_set_epi64x(0xab1c5ed5923f82a4, 0x59f111f13956c25b); |
151 | let expected = _mm_set_epi64x(0xf714b202d863d47d, 0x90c30d946b3d3b35); |
152 | let r = _mm_sha1msg2_epu32(a, b); |
153 | assert_eq_m128i(r, expected); |
154 | } |
155 | |
156 | #[simd_test(enable = "sha" )] |
157 | #[allow (overflowing_literals)] |
158 | unsafe fn test_mm_sha1nexte_epu32() { |
159 | let a = _mm_set_epi64x(0xe9b5dba5b5c0fbcf, 0x71374491428a2f98); |
160 | let b = _mm_set_epi64x(0xab1c5ed5923f82a4, 0x59f111f13956c25b); |
161 | let expected = _mm_set_epi64x(0x2589d5be923f82a4, 0x59f111f13956c25b); |
162 | let r = _mm_sha1nexte_epu32(a, b); |
163 | assert_eq_m128i(r, expected); |
164 | } |
165 | |
166 | #[simd_test(enable = "sha" )] |
167 | #[allow (overflowing_literals)] |
168 | unsafe fn test_mm_sha1rnds4_epu32() { |
169 | let a = _mm_set_epi64x(0xe9b5dba5b5c0fbcf, 0x71374491428a2f98); |
170 | let b = _mm_set_epi64x(0xab1c5ed5923f82a4, 0x59f111f13956c25b); |
171 | let expected = _mm_set_epi64x(0x32b13cd8322f5268, 0xc54420862bd9246f); |
172 | let r = _mm_sha1rnds4_epu32::<0>(a, b); |
173 | assert_eq_m128i(r, expected); |
174 | |
175 | let expected = _mm_set_epi64x(0x6d4c43e56a3c25d9, 0xa7e00fb775cbd3fe); |
176 | let r = _mm_sha1rnds4_epu32::<1>(a, b); |
177 | assert_eq_m128i(r, expected); |
178 | |
179 | let expected = _mm_set_epi64x(0xb304e383c01222f4, 0x66f6b3b1f89d8001); |
180 | let r = _mm_sha1rnds4_epu32::<2>(a, b); |
181 | assert_eq_m128i(r, expected); |
182 | |
183 | let expected = _mm_set_epi64x(0x8189b758bfabfa79, 0xdb08f6e78cae098b); |
184 | let r = _mm_sha1rnds4_epu32::<3>(a, b); |
185 | assert_eq_m128i(r, expected); |
186 | } |
187 | |
188 | #[simd_test(enable = "sha" )] |
189 | #[allow (overflowing_literals)] |
190 | unsafe fn test_mm_sha256msg1_epu32() { |
191 | let a = _mm_set_epi64x(0xe9b5dba5b5c0fbcf, 0x71374491428a2f98); |
192 | let b = _mm_set_epi64x(0xab1c5ed5923f82a4, 0x59f111f13956c25b); |
193 | let expected = _mm_set_epi64x(0xeb84973fd5cda67d, 0x2857b88f406b09ee); |
194 | let r = _mm_sha256msg1_epu32(a, b); |
195 | assert_eq_m128i(r, expected); |
196 | } |
197 | |
198 | #[simd_test(enable = "sha" )] |
199 | #[allow (overflowing_literals)] |
200 | unsafe fn test_mm_sha256msg2_epu32() { |
201 | let a = _mm_set_epi64x(0xe9b5dba5b5c0fbcf, 0x71374491428a2f98); |
202 | let b = _mm_set_epi64x(0xab1c5ed5923f82a4, 0x59f111f13956c25b); |
203 | let expected = _mm_set_epi64x(0xb58777ce887fd851, 0x15d1ec8b73ac8450); |
204 | let r = _mm_sha256msg2_epu32(a, b); |
205 | assert_eq_m128i(r, expected); |
206 | } |
207 | |
208 | #[simd_test(enable = "sha" )] |
209 | #[allow (overflowing_literals)] |
210 | unsafe fn test_mm_sha256rnds2_epu32() { |
211 | let a = _mm_set_epi64x(0xe9b5dba5b5c0fbcf, 0x71374491428a2f98); |
212 | let b = _mm_set_epi64x(0xab1c5ed5923f82a4, 0x59f111f13956c25b); |
213 | let k = _mm_set_epi64x(0, 0x12835b01d807aa98); |
214 | let expected = _mm_set_epi64x(0xd3063037effb15ea, 0x187ee3db0d6d1d19); |
215 | let r = _mm_sha256rnds2_epu32(a, b, k); |
216 | assert_eq_m128i(r, expected); |
217 | } |
218 | } |
219 | |