1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
2 | /* |
3 | * sha1-ce-core.S - SHA-1 secure hash using ARMv8 Crypto Extensions |
4 | * |
5 | * Copyright (C) 2015 Linaro Ltd. |
6 | * Author: Ard Biesheuvel <ard.biesheuvel@linaro.org> |
7 | */ |
8 | |
9 | #include <linux/linkage.h> |
10 | #include <asm/assembler.h> |
11 | |
12 | .text |
13 | .arch armv8-a |
14 | .fpu crypto-neon-fp-armv8 |
15 | |
16 | k0 .req q0 |
17 | k1 .req q1 |
18 | k2 .req q2 |
19 | k3 .req q3 |
20 | |
21 | ta0 .req q4 |
22 | ta1 .req q5 |
23 | tb0 .req q5 |
24 | tb1 .req q4 |
25 | |
26 | dga .req q6 |
27 | dgb .req q7 |
28 | dgbs .req s28 |
29 | |
30 | dg0 .req q12 |
31 | dg1a0 .req q13 |
32 | dg1a1 .req q14 |
33 | dg1b0 .req q14 |
34 | dg1b1 .req q13 |
35 | |
36 | .macro add_only, op, ev, rc, s0, dg1 |
37 | .ifnb \s0 |
38 | vadd.u32 tb\ev, q\s0, \rc |
39 | .endif |
40 | sha1h.32 dg1b\ev, dg0 |
41 | .ifb \dg1 |
42 | sha1\op\().32 dg0, dg1a\ev, ta\ev |
43 | .else |
44 | sha1\op\().32 dg0, \dg1, ta\ev |
45 | .endif |
46 | .endm |
47 | |
48 | .macro add_update, op, ev, rc, s0, s1, s2, s3, dg1 |
49 | sha1su0.32 q\s0, q\s1, q\s2 |
50 | add_only \op, \ev, \rc, \s1, \dg1 |
51 | sha1su1.32 q\s0, q\s3 |
52 | .endm |
53 | |
54 | .align 6 |
55 | .Lsha1_rcon: |
56 | .word 0x5a827999, 0x5a827999, 0x5a827999, 0x5a827999 |
57 | .word 0x6ed9eba1, 0x6ed9eba1, 0x6ed9eba1, 0x6ed9eba1 |
58 | .word 0x8f1bbcdc, 0x8f1bbcdc, 0x8f1bbcdc, 0x8f1bbcdc |
59 | .word 0xca62c1d6, 0xca62c1d6, 0xca62c1d6, 0xca62c1d6 |
60 | |
61 | /* |
62 | * void sha1_ce_transform(struct sha1_state *sst, u8 const *src, |
63 | * int blocks); |
64 | */ |
65 | ENTRY(sha1_ce_transform) |
66 | /* load round constants */ |
67 | adr ip, .Lsha1_rcon |
68 | vld1.32 {k0-k1}, [ip, :128]! |
69 | vld1.32 {k2-k3}, [ip, :128] |
70 | |
71 | /* load state */ |
72 | vld1.32 {dga}, [r0] |
73 | vldr dgbs, [r0, #16] |
74 | |
75 | /* load input */ |
76 | 0: vld1.32 {q8-q9}, [r1]! |
77 | vld1.32 {q10-q11}, [r1]! |
78 | subs r2, r2, #1 |
79 | |
80 | #ifndef CONFIG_CPU_BIG_ENDIAN |
81 | vrev32.8 q8, q8 |
82 | vrev32.8 q9, q9 |
83 | vrev32.8 q10, q10 |
84 | vrev32.8 q11, q11 |
85 | #endif |
86 | |
87 | vadd.u32 ta0, q8, k0 |
88 | vmov dg0, dga |
89 | |
90 | add_update c, 0, k0, 8, 9, 10, 11, dgb |
91 | add_update c, 1, k0, 9, 10, 11, 8 |
92 | add_update c, 0, k0, 10, 11, 8, 9 |
93 | add_update c, 1, k0, 11, 8, 9, 10 |
94 | add_update c, 0, k1, 8, 9, 10, 11 |
95 | |
96 | add_update p, 1, k1, 9, 10, 11, 8 |
97 | add_update p, 0, k1, 10, 11, 8, 9 |
98 | add_update p, 1, k1, 11, 8, 9, 10 |
99 | add_update p, 0, k1, 8, 9, 10, 11 |
100 | add_update p, 1, k2, 9, 10, 11, 8 |
101 | |
102 | add_update m, 0, k2, 10, 11, 8, 9 |
103 | add_update m, 1, k2, 11, 8, 9, 10 |
104 | add_update m, 0, k2, 8, 9, 10, 11 |
105 | add_update m, 1, k2, 9, 10, 11, 8 |
106 | add_update m, 0, k3, 10, 11, 8, 9 |
107 | |
108 | add_update p, 1, k3, 11, 8, 9, 10 |
109 | add_only p, 0, k3, 9 |
110 | add_only p, 1, k3, 10 |
111 | add_only p, 0, k3, 11 |
112 | add_only p, 1 |
113 | |
114 | /* update state */ |
115 | vadd.u32 dga, dga, dg0 |
116 | vadd.u32 dgb, dgb, dg1a0 |
117 | bne 0b |
118 | |
119 | /* store new state */ |
120 | vst1.32 {dga}, [r0] |
121 | vstr dgbs, [r0, #16] |
122 | bx lr |
123 | ENDPROC(sha1_ce_transform) |
124 | |