1 | // |
2 | // Accelerated CRC-T10DIF using arm64 NEON and Crypto Extensions instructions |
3 | // |
4 | // Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org> |
5 | // Copyright (C) 2019 Google LLC <ebiggers@google.com> |
6 | // |
7 | // This program is free software; you can redistribute it and/or modify |
8 | // it under the terms of the GNU General Public License version 2 as |
9 | // published by the Free Software Foundation. |
10 | // |
11 | |
12 | // Derived from the x86 version: |
13 | // |
14 | // Implement fast CRC-T10DIF computation with SSE and PCLMULQDQ instructions |
15 | // |
16 | // Copyright (c) 2013, Intel Corporation |
17 | // |
18 | // Authors: |
19 | // Erdinc Ozturk <erdinc.ozturk@intel.com> |
20 | // Vinodh Gopal <vinodh.gopal@intel.com> |
21 | // James Guilford <james.guilford@intel.com> |
22 | // Tim Chen <tim.c.chen@linux.intel.com> |
23 | // |
24 | // This software is available to you under a choice of one of two |
25 | // licenses. You may choose to be licensed under the terms of the GNU |
26 | // General Public License (GPL) Version 2, available from the file |
27 | // COPYING in the main directory of this source tree, or the |
28 | // OpenIB.org BSD license below: |
29 | // |
30 | // Redistribution and use in source and binary forms, with or without |
31 | // modification, are permitted provided that the following conditions are |
32 | // met: |
33 | // |
34 | // * Redistributions of source code must retain the above copyright |
35 | // notice, this list of conditions and the following disclaimer. |
36 | // |
37 | // * Redistributions in binary form must reproduce the above copyright |
38 | // notice, this list of conditions and the following disclaimer in the |
39 | // documentation and/or other materials provided with the |
40 | // distribution. |
41 | // |
42 | // * Neither the name of the Intel Corporation nor the names of its |
43 | // contributors may be used to endorse or promote products derived from |
44 | // this software without specific prior written permission. |
45 | // |
46 | // |
47 | // THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION ""AS IS"" AND ANY |
48 | // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
49 | // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
50 | // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR |
51 | // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
52 | // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
53 | // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
54 | // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF |
55 | // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING |
56 | // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS |
57 | // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
58 | // |
59 | // Reference paper titled "Fast CRC Computation for Generic |
60 | // Polynomials Using PCLMULQDQ Instruction" |
61 | // URL: http://www.intel.com/content/dam/www/public/us/en/documents |
62 | // /white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf |
63 | // |
64 | |
65 | #include <linux/linkage.h> |
66 | #include <asm/assembler.h> |
67 | |
68 | .text |
69 | .arch armv8-a+crypto |
70 | |
71 | init_crc .req w0 |
72 | buf .req x1 |
73 | len .req x2 |
74 | fold_consts_ptr .req x3 |
75 | |
76 | fold_consts .req v10 |
77 | |
78 | ad .req v14 |
79 | |
80 | k00_16 .req v15 |
81 | k32_48 .req v16 |
82 | |
83 | t3 .req v17 |
84 | t4 .req v18 |
85 | t5 .req v19 |
86 | t6 .req v20 |
87 | t7 .req v21 |
88 | t8 .req v22 |
89 | t9 .req v23 |
90 | |
91 | perm1 .req v24 |
92 | perm2 .req v25 |
93 | perm3 .req v26 |
94 | perm4 .req v27 |
95 | |
96 | bd1 .req v28 |
97 | bd2 .req v29 |
98 | bd3 .req v30 |
99 | bd4 .req v31 |
100 | |
101 | .macro __pmull_init_p64 |
102 | .endm |
103 | |
104 | .macro __pmull_pre_p64, bd |
105 | .endm |
106 | |
107 | .macro __pmull_init_p8 |
108 | // k00_16 := 0x0000000000000000_000000000000ffff |
109 | // k32_48 := 0x00000000ffffffff_0000ffffffffffff |
110 | movi k32_48.2d, #0xffffffff |
111 | mov k32_48.h[2], k32_48.h[0] |
112 | ushr k00_16.2d, k32_48.2d, #32 |
113 | |
114 | // prepare the permutation vectors |
115 | mov_q x5, 0x080f0e0d0c0b0a09 |
116 | movi perm4.8b, #8 |
117 | dup perm1.2d, x5 |
118 | eor perm1.16b, perm1.16b, perm4.16b |
119 | ushr perm2.2d, perm1.2d, #8 |
120 | ushr perm3.2d, perm1.2d, #16 |
121 | ushr perm4.2d, perm1.2d, #24 |
122 | sli perm2.2d, perm1.2d, #56 |
123 | sli perm3.2d, perm1.2d, #48 |
124 | sli perm4.2d, perm1.2d, #40 |
125 | .endm |
126 | |
127 | .macro __pmull_pre_p8, bd |
128 | tbl bd1.16b, {\bd\().16b}, perm1.16b |
129 | tbl bd2.16b, {\bd\().16b}, perm2.16b |
130 | tbl bd3.16b, {\bd\().16b}, perm3.16b |
131 | tbl bd4.16b, {\bd\().16b}, perm4.16b |
132 | .endm |
133 | |
134 | SYM_FUNC_START_LOCAL(__pmull_p8_core) |
135 | .L__pmull_p8_core: |
136 | ext t4.8b, ad.8b, ad.8b, #1 // A1 |
137 | ext t5.8b, ad.8b, ad.8b, #2 // A2 |
138 | ext t6.8b, ad.8b, ad.8b, #3 // A3 |
139 | |
140 | pmull t4.8h, t4.8b, fold_consts.8b // F = A1*B |
141 | pmull t8.8h, ad.8b, bd1.8b // E = A*B1 |
142 | pmull t5.8h, t5.8b, fold_consts.8b // H = A2*B |
143 | pmull t7.8h, ad.8b, bd2.8b // G = A*B2 |
144 | pmull t6.8h, t6.8b, fold_consts.8b // J = A3*B |
145 | pmull t9.8h, ad.8b, bd3.8b // I = A*B3 |
146 | pmull t3.8h, ad.8b, bd4.8b // K = A*B4 |
147 | b 0f |
148 | |
149 | .L__pmull_p8_core2: |
150 | tbl t4.16b, {ad.16b}, perm1.16b // A1 |
151 | tbl t5.16b, {ad.16b}, perm2.16b // A2 |
152 | tbl t6.16b, {ad.16b}, perm3.16b // A3 |
153 | |
154 | pmull2 t4.8h, t4.16b, fold_consts.16b // F = A1*B |
155 | pmull2 t8.8h, ad.16b, bd1.16b // E = A*B1 |
156 | pmull2 t5.8h, t5.16b, fold_consts.16b // H = A2*B |
157 | pmull2 t7.8h, ad.16b, bd2.16b // G = A*B2 |
158 | pmull2 t6.8h, t6.16b, fold_consts.16b // J = A3*B |
159 | pmull2 t9.8h, ad.16b, bd3.16b // I = A*B3 |
160 | pmull2 t3.8h, ad.16b, bd4.16b // K = A*B4 |
161 | |
162 | 0: eor t4.16b, t4.16b, t8.16b // L = E + F |
163 | eor t5.16b, t5.16b, t7.16b // M = G + H |
164 | eor t6.16b, t6.16b, t9.16b // N = I + J |
165 | |
166 | uzp1 t8.2d, t4.2d, t5.2d |
167 | uzp2 t4.2d, t4.2d, t5.2d |
168 | uzp1 t7.2d, t6.2d, t3.2d |
169 | uzp2 t6.2d, t6.2d, t3.2d |
170 | |
171 | // t4 = (L) (P0 + P1) << 8 |
172 | // t5 = (M) (P2 + P3) << 16 |
173 | eor t8.16b, t8.16b, t4.16b |
174 | and t4.16b, t4.16b, k32_48.16b |
175 | |
176 | // t6 = (N) (P4 + P5) << 24 |
177 | // t7 = (K) (P6 + P7) << 32 |
178 | eor t7.16b, t7.16b, t6.16b |
179 | and t6.16b, t6.16b, k00_16.16b |
180 | |
181 | eor t8.16b, t8.16b, t4.16b |
182 | eor t7.16b, t7.16b, t6.16b |
183 | |
184 | zip2 t5.2d, t8.2d, t4.2d |
185 | zip1 t4.2d, t8.2d, t4.2d |
186 | zip2 t3.2d, t7.2d, t6.2d |
187 | zip1 t6.2d, t7.2d, t6.2d |
188 | |
189 | ext t4.16b, t4.16b, t4.16b, #15 |
190 | ext t5.16b, t5.16b, t5.16b, #14 |
191 | ext t6.16b, t6.16b, t6.16b, #13 |
192 | ext t3.16b, t3.16b, t3.16b, #12 |
193 | |
194 | eor t4.16b, t4.16b, t5.16b |
195 | eor t6.16b, t6.16b, t3.16b |
196 | ret |
197 | SYM_FUNC_END(__pmull_p8_core) |
198 | |
199 | .macro __pmull_p8, rq, ad, bd, i |
200 | .ifnc \bd, fold_consts |
201 | .err |
202 | .endif |
203 | mov ad.16b, \ad\().16b |
204 | .ifb \i |
205 | pmull \rq\().8h, \ad\().8b, \bd\().8b // D = A*B |
206 | .else |
207 | pmull2 \rq\().8h, \ad\().16b, \bd\().16b // D = A*B |
208 | .endif |
209 | |
210 | bl .L__pmull_p8_core\i |
211 | |
212 | eor \rq\().16b, \rq\().16b, t4.16b |
213 | eor \rq\().16b, \rq\().16b, t6.16b |
214 | .endm |
215 | |
216 | // Fold reg1, reg2 into the next 32 data bytes, storing the result back |
217 | // into reg1, reg2. |
218 | .macro fold_32_bytes, p, reg1, reg2 |
219 | ldp q11, q12, [buf], #0x20 |
220 | |
221 | __pmull_\p v8, \reg1, fold_consts, 2 |
222 | __pmull_\p \reg1, \reg1, fold_consts |
223 | |
224 | CPU_LE( rev64 v11.16b, v11.16b ) |
225 | CPU_LE( rev64 v12.16b, v12.16b ) |
226 | |
227 | __pmull_\p v9, \reg2, fold_consts, 2 |
228 | __pmull_\p \reg2, \reg2, fold_consts |
229 | |
230 | CPU_LE( ext v11.16b, v11.16b, v11.16b, #8 ) |
231 | CPU_LE( ext v12.16b, v12.16b, v12.16b, #8 ) |
232 | |
233 | eor \reg1\().16b, \reg1\().16b, v8.16b |
234 | eor \reg2\().16b, \reg2\().16b, v9.16b |
235 | eor \reg1\().16b, \reg1\().16b, v11.16b |
236 | eor \reg2\().16b, \reg2\().16b, v12.16b |
237 | .endm |
238 | |
239 | // Fold src_reg into dst_reg, optionally loading the next fold constants |
240 | .macro fold_16_bytes, p, src_reg, dst_reg, load_next_consts |
241 | __pmull_\p v8, \src_reg, fold_consts |
242 | __pmull_\p \src_reg, \src_reg, fold_consts, 2 |
243 | .ifnb \load_next_consts |
244 | ld1 {fold_consts.2d}, [fold_consts_ptr], #16 |
245 | __pmull_pre_\p fold_consts |
246 | .endif |
247 | eor \dst_reg\().16b, \dst_reg\().16b, v8.16b |
248 | eor \dst_reg\().16b, \dst_reg\().16b, \src_reg\().16b |
249 | .endm |
250 | |
251 | .macro __pmull_p64, rd, rn, rm, n |
252 | .ifb \n |
253 | pmull \rd\().1q, \rn\().1d, \rm\().1d |
254 | .else |
255 | pmull2 \rd\().1q, \rn\().2d, \rm\().2d |
256 | .endif |
257 | .endm |
258 | |
259 | .macro crc_t10dif_pmull, p |
260 | __pmull_init_\p |
261 | |
262 | // For sizes less than 256 bytes, we can't fold 128 bytes at a time. |
263 | cmp len, #256 |
264 | b.lt .Lless_than_256_bytes_\@ |
265 | |
266 | adr_l fold_consts_ptr, .Lfold_across_128_bytes_consts |
267 | |
268 | // Load the first 128 data bytes. Byte swapping is necessary to make |
269 | // the bit order match the polynomial coefficient order. |
270 | ldp q0, q1, [buf] |
271 | ldp q2, q3, [buf, #0x20] |
272 | ldp q4, q5, [buf, #0x40] |
273 | ldp q6, q7, [buf, #0x60] |
274 | add buf, buf, #0x80 |
275 | CPU_LE( rev64 v0.16b, v0.16b ) |
276 | CPU_LE( rev64 v1.16b, v1.16b ) |
277 | CPU_LE( rev64 v2.16b, v2.16b ) |
278 | CPU_LE( rev64 v3.16b, v3.16b ) |
279 | CPU_LE( rev64 v4.16b, v4.16b ) |
280 | CPU_LE( rev64 v5.16b, v5.16b ) |
281 | CPU_LE( rev64 v6.16b, v6.16b ) |
282 | CPU_LE( rev64 v7.16b, v7.16b ) |
283 | CPU_LE( ext v0.16b, v0.16b, v0.16b, #8 ) |
284 | CPU_LE( ext v1.16b, v1.16b, v1.16b, #8 ) |
285 | CPU_LE( ext v2.16b, v2.16b, v2.16b, #8 ) |
286 | CPU_LE( ext v3.16b, v3.16b, v3.16b, #8 ) |
287 | CPU_LE( ext v4.16b, v4.16b, v4.16b, #8 ) |
288 | CPU_LE( ext v5.16b, v5.16b, v5.16b, #8 ) |
289 | CPU_LE( ext v6.16b, v6.16b, v6.16b, #8 ) |
290 | CPU_LE( ext v7.16b, v7.16b, v7.16b, #8 ) |
291 | |
292 | // XOR the first 16 data *bits* with the initial CRC value. |
293 | movi v8.16b, #0 |
294 | mov v8.h[7], init_crc |
295 | eor v0.16b, v0.16b, v8.16b |
296 | |
297 | // Load the constants for folding across 128 bytes. |
298 | ld1 {fold_consts.2d}, [fold_consts_ptr] |
299 | __pmull_pre_\p fold_consts |
300 | |
301 | // Subtract 128 for the 128 data bytes just consumed. Subtract another |
302 | // 128 to simplify the termination condition of the following loop. |
303 | sub len, len, #256 |
304 | |
305 | // While >= 128 data bytes remain (not counting v0-v7), fold the 128 |
306 | // bytes v0-v7 into them, storing the result back into v0-v7. |
307 | .Lfold_128_bytes_loop_\@: |
308 | fold_32_bytes \p, v0, v1 |
309 | fold_32_bytes \p, v2, v3 |
310 | fold_32_bytes \p, v4, v5 |
311 | fold_32_bytes \p, v6, v7 |
312 | |
313 | subs len, len, #128 |
314 | b.ge .Lfold_128_bytes_loop_\@ |
315 | |
316 | // Now fold the 112 bytes in v0-v6 into the 16 bytes in v7. |
317 | |
318 | // Fold across 64 bytes. |
319 | add fold_consts_ptr, fold_consts_ptr, #16 |
320 | ld1 {fold_consts.2d}, [fold_consts_ptr], #16 |
321 | __pmull_pre_\p fold_consts |
322 | fold_16_bytes \p, v0, v4 |
323 | fold_16_bytes \p, v1, v5 |
324 | fold_16_bytes \p, v2, v6 |
325 | fold_16_bytes \p, v3, v7, 1 |
326 | // Fold across 32 bytes. |
327 | fold_16_bytes \p, v4, v6 |
328 | fold_16_bytes \p, v5, v7, 1 |
329 | // Fold across 16 bytes. |
330 | fold_16_bytes \p, v6, v7 |
331 | |
332 | // Add 128 to get the correct number of data bytes remaining in 0...127 |
333 | // (not counting v7), following the previous extra subtraction by 128. |
334 | // Then subtract 16 to simplify the termination condition of the |
335 | // following loop. |
336 | adds len, len, #(128-16) |
337 | |
338 | // While >= 16 data bytes remain (not counting v7), fold the 16 bytes v7 |
339 | // into them, storing the result back into v7. |
340 | b.lt .Lfold_16_bytes_loop_done_\@ |
341 | .Lfold_16_bytes_loop_\@: |
342 | __pmull_\p v8, v7, fold_consts |
343 | __pmull_\p v7, v7, fold_consts, 2 |
344 | eor v7.16b, v7.16b, v8.16b |
345 | ldr q0, [buf], #16 |
346 | CPU_LE( rev64 v0.16b, v0.16b ) |
347 | CPU_LE( ext v0.16b, v0.16b, v0.16b, #8 ) |
348 | eor v7.16b, v7.16b, v0.16b |
349 | subs len, len, #16 |
350 | b.ge .Lfold_16_bytes_loop_\@ |
351 | |
352 | .Lfold_16_bytes_loop_done_\@: |
353 | // Add 16 to get the correct number of data bytes remaining in 0...15 |
354 | // (not counting v7), following the previous extra subtraction by 16. |
355 | adds len, len, #16 |
356 | b.eq .Lreduce_final_16_bytes_\@ |
357 | |
358 | .Lhandle_partial_segment_\@: |
359 | // Reduce the last '16 + len' bytes where 1 <= len <= 15 and the first |
360 | // 16 bytes are in v7 and the rest are the remaining data in 'buf'. To |
361 | // do this without needing a fold constant for each possible 'len', |
362 | // redivide the bytes into a first chunk of 'len' bytes and a second |
363 | // chunk of 16 bytes, then fold the first chunk into the second. |
364 | |
365 | // v0 = last 16 original data bytes |
366 | add buf, buf, len |
367 | ldr q0, [buf, #-16] |
368 | CPU_LE( rev64 v0.16b, v0.16b ) |
369 | CPU_LE( ext v0.16b, v0.16b, v0.16b, #8 ) |
370 | |
371 | // v1 = high order part of second chunk: v7 left-shifted by 'len' bytes. |
372 | adr_l x4, .Lbyteshift_table + 16 |
373 | sub x4, x4, len |
374 | ld1 {v2.16b}, [x4] |
375 | tbl v1.16b, {v7.16b}, v2.16b |
376 | |
377 | // v3 = first chunk: v7 right-shifted by '16-len' bytes. |
378 | movi v3.16b, #0x80 |
379 | eor v2.16b, v2.16b, v3.16b |
380 | tbl v3.16b, {v7.16b}, v2.16b |
381 | |
382 | // Convert to 8-bit masks: 'len' 0x00 bytes, then '16-len' 0xff bytes. |
383 | sshr v2.16b, v2.16b, #7 |
384 | |
385 | // v2 = second chunk: 'len' bytes from v0 (low-order bytes), |
386 | // then '16-len' bytes from v1 (high-order bytes). |
387 | bsl v2.16b, v1.16b, v0.16b |
388 | |
389 | // Fold the first chunk into the second chunk, storing the result in v7. |
390 | __pmull_\p v0, v3, fold_consts |
391 | __pmull_\p v7, v3, fold_consts, 2 |
392 | eor v7.16b, v7.16b, v0.16b |
393 | eor v7.16b, v7.16b, v2.16b |
394 | |
395 | .Lreduce_final_16_bytes_\@: |
396 | // Reduce the 128-bit value M(x), stored in v7, to the final 16-bit CRC. |
397 | |
398 | movi v2.16b, #0 // init zero register |
399 | |
400 | // Load 'x^48 * (x^48 mod G(x))' and 'x^48 * (x^80 mod G(x))'. |
401 | ld1 {fold_consts.2d}, [fold_consts_ptr], #16 |
402 | __pmull_pre_\p fold_consts |
403 | |
404 | // Fold the high 64 bits into the low 64 bits, while also multiplying by |
405 | // x^64. This produces a 128-bit value congruent to x^64 * M(x) and |
406 | // whose low 48 bits are 0. |
407 | ext v0.16b, v2.16b, v7.16b, #8 |
408 | __pmull_\p v7, v7, fold_consts, 2 // high bits * x^48 * (x^80 mod G(x)) |
409 | eor v0.16b, v0.16b, v7.16b // + low bits * x^64 |
410 | |
411 | // Fold the high 32 bits into the low 96 bits. This produces a 96-bit |
412 | // value congruent to x^64 * M(x) and whose low 48 bits are 0. |
413 | ext v1.16b, v0.16b, v2.16b, #12 // extract high 32 bits |
414 | mov v0.s[3], v2.s[0] // zero high 32 bits |
415 | __pmull_\p v1, v1, fold_consts // high 32 bits * x^48 * (x^48 mod G(x)) |
416 | eor v0.16b, v0.16b, v1.16b // + low bits |
417 | |
418 | // Load G(x) and floor(x^48 / G(x)). |
419 | ld1 {fold_consts.2d}, [fold_consts_ptr] |
420 | __pmull_pre_\p fold_consts |
421 | |
422 | // Use Barrett reduction to compute the final CRC value. |
423 | __pmull_\p v1, v0, fold_consts, 2 // high 32 bits * floor(x^48 / G(x)) |
424 | ushr v1.2d, v1.2d, #32 // /= x^32 |
425 | __pmull_\p v1, v1, fold_consts // *= G(x) |
426 | ushr v0.2d, v0.2d, #48 |
427 | eor v0.16b, v0.16b, v1.16b // + low 16 nonzero bits |
428 | // Final CRC value (x^16 * M(x)) mod G(x) is in low 16 bits of v0. |
429 | |
430 | umov w0, v0.h[0] |
431 | .ifc \p, p8 |
432 | frame_pop |
433 | .endif |
434 | ret |
435 | |
436 | .Lless_than_256_bytes_\@: |
437 | // Checksumming a buffer of length 16...255 bytes |
438 | |
439 | adr_l fold_consts_ptr, .Lfold_across_16_bytes_consts |
440 | |
441 | // Load the first 16 data bytes. |
442 | ldr q7, [buf], #0x10 |
443 | CPU_LE( rev64 v7.16b, v7.16b ) |
444 | CPU_LE( ext v7.16b, v7.16b, v7.16b, #8 ) |
445 | |
446 | // XOR the first 16 data *bits* with the initial CRC value. |
447 | movi v0.16b, #0 |
448 | mov v0.h[7], init_crc |
449 | eor v7.16b, v7.16b, v0.16b |
450 | |
451 | // Load the fold-across-16-bytes constants. |
452 | ld1 {fold_consts.2d}, [fold_consts_ptr], #16 |
453 | __pmull_pre_\p fold_consts |
454 | |
455 | cmp len, #16 |
456 | b.eq .Lreduce_final_16_bytes_\@ // len == 16 |
457 | subs len, len, #32 |
458 | b.ge .Lfold_16_bytes_loop_\@ // 32 <= len <= 255 |
459 | add len, len, #16 |
460 | b .Lhandle_partial_segment_\@ // 17 <= len <= 31 |
461 | .endm |
462 | |
463 | // |
464 | // u16 crc_t10dif_pmull_p8(u16 init_crc, const u8 *buf, size_t len); |
465 | // |
466 | // Assumes len >= 16. |
467 | // |
468 | SYM_FUNC_START(crc_t10dif_pmull_p8) |
469 | frame_push 1 |
470 | crc_t10dif_pmull p8 |
471 | SYM_FUNC_END(crc_t10dif_pmull_p8) |
472 | |
473 | .align 5 |
474 | // |
475 | // u16 crc_t10dif_pmull_p64(u16 init_crc, const u8 *buf, size_t len); |
476 | // |
477 | // Assumes len >= 16. |
478 | // |
479 | SYM_FUNC_START(crc_t10dif_pmull_p64) |
480 | crc_t10dif_pmull p64 |
481 | SYM_FUNC_END(crc_t10dif_pmull_p64) |
482 | |
483 | .section ".rodata" , "a" |
484 | .align 4 |
485 | |
486 | // Fold constants precomputed from the polynomial 0x18bb7 |
487 | // G(x) = x^16 + x^15 + x^11 + x^9 + x^8 + x^7 + x^5 + x^4 + x^2 + x^1 + x^0 |
488 | .Lfold_across_128_bytes_consts: |
489 | .quad 0x0000000000006123 // x^(8*128) mod G(x) |
490 | .quad 0x0000000000002295 // x^(8*128+64) mod G(x) |
491 | // .Lfold_across_64_bytes_consts: |
492 | .quad 0x0000000000001069 // x^(4*128) mod G(x) |
493 | .quad 0x000000000000dd31 // x^(4*128+64) mod G(x) |
494 | // .Lfold_across_32_bytes_consts: |
495 | .quad 0x000000000000857d // x^(2*128) mod G(x) |
496 | .quad 0x0000000000007acc // x^(2*128+64) mod G(x) |
497 | .Lfold_across_16_bytes_consts: |
498 | .quad 0x000000000000a010 // x^(1*128) mod G(x) |
499 | .quad 0x0000000000001faa // x^(1*128+64) mod G(x) |
500 | // .Lfinal_fold_consts: |
501 | .quad 0x1368000000000000 // x^48 * (x^48 mod G(x)) |
502 | .quad 0x2d56000000000000 // x^48 * (x^80 mod G(x)) |
503 | // .Lbarrett_reduction_consts: |
504 | .quad 0x0000000000018bb7 // G(x) |
505 | .quad 0x00000001f65a57f8 // floor(x^48 / G(x)) |
506 | |
507 | // For 1 <= len <= 15, the 16-byte vector beginning at &byteshift_table[16 - |
508 | // len] is the index vector to shift left by 'len' bytes, and is also {0x80, |
509 | // ..., 0x80} XOR the index vector to shift right by '16 - len' bytes. |
510 | .Lbyteshift_table: |
511 | .byte 0x0, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87 |
512 | .byte 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f |
513 | .byte 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7 |
514 | .byte 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe , 0x0 |
515 | |