1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright 2016 Broadcom |
4 | */ |
5 | |
6 | #include <linux/err.h> |
7 | #include <linux/module.h> |
8 | #include <linux/init.h> |
9 | #include <linux/errno.h> |
10 | #include <linux/kernel.h> |
11 | #include <linux/interrupt.h> |
12 | #include <linux/platform_device.h> |
13 | #include <linux/scatterlist.h> |
14 | #include <linux/crypto.h> |
15 | #include <linux/kthread.h> |
16 | #include <linux/rtnetlink.h> |
17 | #include <linux/sched.h> |
18 | #include <linux/of.h> |
19 | #include <linux/io.h> |
20 | #include <linux/bitops.h> |
21 | |
22 | #include <crypto/algapi.h> |
23 | #include <crypto/aead.h> |
24 | #include <crypto/internal/aead.h> |
25 | #include <crypto/aes.h> |
26 | #include <crypto/internal/des.h> |
27 | #include <crypto/hmac.h> |
28 | #include <crypto/md5.h> |
29 | #include <crypto/authenc.h> |
30 | #include <crypto/skcipher.h> |
31 | #include <crypto/hash.h> |
32 | #include <crypto/sha1.h> |
33 | #include <crypto/sha2.h> |
34 | #include <crypto/sha3.h> |
35 | |
36 | #include "util.h" |
37 | #include "cipher.h" |
38 | #include "spu.h" |
39 | #include "spum.h" |
40 | #include "spu2.h" |
41 | |
42 | /* ================= Device Structure ================== */ |
43 | |
44 | struct bcm_device_private iproc_priv; |
45 | |
46 | /* ==================== Parameters ===================== */ |
47 | |
48 | int flow_debug_logging; |
49 | module_param(flow_debug_logging, int, 0644); |
50 | MODULE_PARM_DESC(flow_debug_logging, "Enable Flow Debug Logging" ); |
51 | |
52 | int packet_debug_logging; |
53 | module_param(packet_debug_logging, int, 0644); |
54 | MODULE_PARM_DESC(packet_debug_logging, "Enable Packet Debug Logging" ); |
55 | |
56 | int debug_logging_sleep; |
57 | module_param(debug_logging_sleep, int, 0644); |
58 | MODULE_PARM_DESC(debug_logging_sleep, "Packet Debug Logging Sleep" ); |
59 | |
60 | /* |
61 | * The value of these module parameters is used to set the priority for each |
62 | * algo type when this driver registers algos with the kernel crypto API. |
63 | * To use a priority other than the default, set the priority in the insmod or |
64 | * modprobe. Changing the module priority after init time has no effect. |
65 | * |
66 | * The default priorities are chosen to be lower (less preferred) than ARMv8 CE |
67 | * algos, but more preferred than generic software algos. |
68 | */ |
69 | static int cipher_pri = 150; |
70 | module_param(cipher_pri, int, 0644); |
71 | MODULE_PARM_DESC(cipher_pri, "Priority for cipher algos" ); |
72 | |
73 | static int hash_pri = 100; |
74 | module_param(hash_pri, int, 0644); |
75 | MODULE_PARM_DESC(hash_pri, "Priority for hash algos" ); |
76 | |
77 | static int aead_pri = 150; |
78 | module_param(aead_pri, int, 0644); |
79 | MODULE_PARM_DESC(aead_pri, "Priority for AEAD algos" ); |
80 | |
81 | /* A type 3 BCM header, expected to precede the SPU header for SPU-M. |
82 | * Bits 3 and 4 in the first byte encode the channel number (the dma ringset). |
83 | * 0x60 - ring 0 |
84 | * 0x68 - ring 1 |
85 | * 0x70 - ring 2 |
86 | * 0x78 - ring 3 |
87 | */ |
88 | static char [] = { 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x28 }; |
89 | /* |
90 | * Some SPU hw does not use BCM header on SPU messages. So BCM_HDR_LEN |
91 | * is set dynamically after reading SPU type from device tree. |
92 | */ |
93 | #define BCM_HDR_LEN iproc_priv.bcm_hdr_len |
94 | |
95 | /* min and max time to sleep before retrying when mbox queue is full. usec */ |
96 | #define MBOX_SLEEP_MIN 800 |
97 | #define MBOX_SLEEP_MAX 1000 |
98 | |
99 | /** |
100 | * select_channel() - Select a SPU channel to handle a crypto request. Selects |
101 | * channel in round robin order. |
102 | * |
103 | * Return: channel index |
104 | */ |
105 | static u8 select_channel(void) |
106 | { |
107 | u8 chan_idx = atomic_inc_return(v: &iproc_priv.next_chan); |
108 | |
109 | return chan_idx % iproc_priv.spu.num_chan; |
110 | } |
111 | |
112 | /** |
113 | * spu_skcipher_rx_sg_create() - Build up the scatterlist of buffers used to |
114 | * receive a SPU response message for an skcipher request. Includes buffers to |
115 | * catch SPU message headers and the response data. |
116 | * @mssg: mailbox message containing the receive sg |
117 | * @rctx: crypto request context |
118 | * @rx_frag_num: number of scatterlist elements required to hold the |
119 | * SPU response message |
120 | * @chunksize: Number of bytes of response data expected |
121 | * @stat_pad_len: Number of bytes required to pad the STAT field to |
122 | * a 4-byte boundary |
123 | * |
124 | * The scatterlist that gets allocated here is freed in spu_chunk_cleanup() |
125 | * when the request completes, whether the request is handled successfully or |
126 | * there is an error. |
127 | * |
128 | * Returns: |
129 | * 0 if successful |
130 | * < 0 if an error |
131 | */ |
132 | static int |
133 | spu_skcipher_rx_sg_create(struct brcm_message *mssg, |
134 | struct iproc_reqctx_s *rctx, |
135 | u8 rx_frag_num, |
136 | unsigned int chunksize, u32 stat_pad_len) |
137 | { |
138 | struct spu_hw *spu = &iproc_priv.spu; |
139 | struct scatterlist *sg; /* used to build sgs in mbox message */ |
140 | struct iproc_ctx_s *ctx = rctx->ctx; |
141 | u32 datalen; /* Number of bytes of response data expected */ |
142 | |
143 | mssg->spu.dst = kcalloc(n: rx_frag_num, size: sizeof(struct scatterlist), |
144 | flags: rctx->gfp); |
145 | if (!mssg->spu.dst) |
146 | return -ENOMEM; |
147 | |
148 | sg = mssg->spu.dst; |
149 | sg_init_table(sg, rx_frag_num); |
150 | /* Space for SPU message header */ |
151 | sg_set_buf(sg: sg++, buf: rctx->msg_buf.spu_resp_hdr, buflen: ctx->spu_resp_hdr_len); |
152 | |
153 | /* If XTS tweak in payload, add buffer to receive encrypted tweak */ |
154 | if ((ctx->cipher.mode == CIPHER_MODE_XTS) && |
155 | spu->spu_xts_tweak_in_payload()) |
156 | sg_set_buf(sg: sg++, buf: rctx->msg_buf.c.supdt_tweak, |
157 | SPU_XTS_TWEAK_SIZE); |
158 | |
159 | /* Copy in each dst sg entry from request, up to chunksize */ |
160 | datalen = spu_msg_sg_add(to_sg: &sg, from_sg: &rctx->dst_sg, skip: &rctx->dst_skip, |
161 | from_nents: rctx->dst_nents, tot_len: chunksize); |
162 | if (datalen < chunksize) { |
163 | pr_err("%s(): failed to copy dst sg to mbox msg. chunksize %u, datalen %u" , |
164 | __func__, chunksize, datalen); |
165 | return -EFAULT; |
166 | } |
167 | |
168 | if (stat_pad_len) |
169 | sg_set_buf(sg: sg++, buf: rctx->msg_buf.rx_stat_pad, buflen: stat_pad_len); |
170 | |
171 | memset(rctx->msg_buf.rx_stat, 0, SPU_RX_STATUS_LEN); |
172 | sg_set_buf(sg, buf: rctx->msg_buf.rx_stat, buflen: spu->spu_rx_status_len()); |
173 | |
174 | return 0; |
175 | } |
176 | |
177 | /** |
178 | * spu_skcipher_tx_sg_create() - Build up the scatterlist of buffers used to |
179 | * send a SPU request message for an skcipher request. Includes SPU message |
180 | * headers and the request data. |
181 | * @mssg: mailbox message containing the transmit sg |
182 | * @rctx: crypto request context |
183 | * @tx_frag_num: number of scatterlist elements required to construct the |
184 | * SPU request message |
185 | * @chunksize: Number of bytes of request data |
186 | * @pad_len: Number of pad bytes |
187 | * |
188 | * The scatterlist that gets allocated here is freed in spu_chunk_cleanup() |
189 | * when the request completes, whether the request is handled successfully or |
190 | * there is an error. |
191 | * |
192 | * Returns: |
193 | * 0 if successful |
194 | * < 0 if an error |
195 | */ |
196 | static int |
197 | spu_skcipher_tx_sg_create(struct brcm_message *mssg, |
198 | struct iproc_reqctx_s *rctx, |
199 | u8 tx_frag_num, unsigned int chunksize, u32 pad_len) |
200 | { |
201 | struct spu_hw *spu = &iproc_priv.spu; |
202 | struct scatterlist *sg; /* used to build sgs in mbox message */ |
203 | struct iproc_ctx_s *ctx = rctx->ctx; |
204 | u32 datalen; /* Number of bytes of response data expected */ |
205 | u32 stat_len; |
206 | |
207 | mssg->spu.src = kcalloc(n: tx_frag_num, size: sizeof(struct scatterlist), |
208 | flags: rctx->gfp); |
209 | if (unlikely(!mssg->spu.src)) |
210 | return -ENOMEM; |
211 | |
212 | sg = mssg->spu.src; |
213 | sg_init_table(sg, tx_frag_num); |
214 | |
215 | sg_set_buf(sg: sg++, buf: rctx->msg_buf.bcm_spu_req_hdr, |
216 | BCM_HDR_LEN + ctx->spu_req_hdr_len); |
217 | |
218 | /* if XTS tweak in payload, copy from IV (where crypto API puts it) */ |
219 | if ((ctx->cipher.mode == CIPHER_MODE_XTS) && |
220 | spu->spu_xts_tweak_in_payload()) |
221 | sg_set_buf(sg: sg++, buf: rctx->msg_buf.iv_ctr, SPU_XTS_TWEAK_SIZE); |
222 | |
223 | /* Copy in each src sg entry from request, up to chunksize */ |
224 | datalen = spu_msg_sg_add(to_sg: &sg, from_sg: &rctx->src_sg, skip: &rctx->src_skip, |
225 | from_nents: rctx->src_nents, tot_len: chunksize); |
226 | if (unlikely(datalen < chunksize)) { |
227 | pr_err("%s(): failed to copy src sg to mbox msg" , |
228 | __func__); |
229 | return -EFAULT; |
230 | } |
231 | |
232 | if (pad_len) |
233 | sg_set_buf(sg: sg++, buf: rctx->msg_buf.spu_req_pad, buflen: pad_len); |
234 | |
235 | stat_len = spu->spu_tx_status_len(); |
236 | if (stat_len) { |
237 | memset(rctx->msg_buf.tx_stat, 0, stat_len); |
238 | sg_set_buf(sg, buf: rctx->msg_buf.tx_stat, buflen: stat_len); |
239 | } |
240 | return 0; |
241 | } |
242 | |
243 | static int mailbox_send_message(struct brcm_message *mssg, u32 flags, |
244 | u8 chan_idx) |
245 | { |
246 | int err; |
247 | int retry_cnt = 0; |
248 | struct device *dev = &(iproc_priv.pdev->dev); |
249 | |
250 | err = mbox_send_message(chan: iproc_priv.mbox[chan_idx], mssg); |
251 | if (flags & CRYPTO_TFM_REQ_MAY_SLEEP) { |
252 | while ((err == -ENOBUFS) && (retry_cnt < SPU_MB_RETRY_MAX)) { |
253 | /* |
254 | * Mailbox queue is full. Since MAY_SLEEP is set, assume |
255 | * not in atomic context and we can wait and try again. |
256 | */ |
257 | retry_cnt++; |
258 | usleep_range(MBOX_SLEEP_MIN, MBOX_SLEEP_MAX); |
259 | err = mbox_send_message(chan: iproc_priv.mbox[chan_idx], |
260 | mssg); |
261 | atomic_inc(v: &iproc_priv.mb_no_spc); |
262 | } |
263 | } |
264 | if (err < 0) { |
265 | atomic_inc(v: &iproc_priv.mb_send_fail); |
266 | return err; |
267 | } |
268 | |
269 | /* Check error returned by mailbox controller */ |
270 | err = mssg->error; |
271 | if (unlikely(err < 0)) { |
272 | dev_err(dev, "message error %d" , err); |
273 | /* Signal txdone for mailbox channel */ |
274 | } |
275 | |
276 | /* Signal txdone for mailbox channel */ |
277 | mbox_client_txdone(chan: iproc_priv.mbox[chan_idx], r: err); |
278 | return err; |
279 | } |
280 | |
281 | /** |
282 | * handle_skcipher_req() - Submit as much of a block cipher request as fits in |
283 | * a single SPU request message, starting at the current position in the request |
284 | * data. |
285 | * @rctx: Crypto request context |
286 | * |
287 | * This may be called on the crypto API thread, or, when a request is so large |
288 | * it must be broken into multiple SPU messages, on the thread used to invoke |
289 | * the response callback. When requests are broken into multiple SPU |
290 | * messages, we assume subsequent messages depend on previous results, and |
291 | * thus always wait for previous results before submitting the next message. |
292 | * Because requests are submitted in lock step like this, there is no need |
293 | * to synchronize access to request data structures. |
294 | * |
295 | * Return: -EINPROGRESS: request has been accepted and result will be returned |
296 | * asynchronously |
297 | * Any other value indicates an error |
298 | */ |
299 | static int handle_skcipher_req(struct iproc_reqctx_s *rctx) |
300 | { |
301 | struct spu_hw *spu = &iproc_priv.spu; |
302 | struct crypto_async_request *areq = rctx->parent; |
303 | struct skcipher_request *req = |
304 | container_of(areq, struct skcipher_request, base); |
305 | struct iproc_ctx_s *ctx = rctx->ctx; |
306 | struct spu_cipher_parms cipher_parms; |
307 | int err; |
308 | unsigned int chunksize; /* Num bytes of request to submit */ |
309 | int remaining; /* Bytes of request still to process */ |
310 | int chunk_start; /* Beginning of data for current SPU msg */ |
311 | |
312 | /* IV or ctr value to use in this SPU msg */ |
313 | u8 local_iv_ctr[MAX_IV_SIZE]; |
314 | u32 stat_pad_len; /* num bytes to align status field */ |
315 | u32 pad_len; /* total length of all padding */ |
316 | struct brcm_message *mssg; /* mailbox message */ |
317 | |
318 | /* number of entries in src and dst sg in mailbox message. */ |
319 | u8 rx_frag_num = 2; /* response header and STATUS */ |
320 | u8 tx_frag_num = 1; /* request header */ |
321 | |
322 | flow_log(format: "%s\n" , __func__); |
323 | |
324 | cipher_parms.alg = ctx->cipher.alg; |
325 | cipher_parms.mode = ctx->cipher.mode; |
326 | cipher_parms.type = ctx->cipher_type; |
327 | cipher_parms.key_len = ctx->enckeylen; |
328 | cipher_parms.key_buf = ctx->enckey; |
329 | cipher_parms.iv_buf = local_iv_ctr; |
330 | cipher_parms.iv_len = rctx->iv_ctr_len; |
331 | |
332 | mssg = &rctx->mb_mssg; |
333 | chunk_start = rctx->src_sent; |
334 | remaining = rctx->total_todo - chunk_start; |
335 | |
336 | /* determine the chunk we are breaking off and update the indexes */ |
337 | if ((ctx->max_payload != SPU_MAX_PAYLOAD_INF) && |
338 | (remaining > ctx->max_payload)) |
339 | chunksize = ctx->max_payload; |
340 | else |
341 | chunksize = remaining; |
342 | |
343 | rctx->src_sent += chunksize; |
344 | rctx->total_sent = rctx->src_sent; |
345 | |
346 | /* Count number of sg entries to be included in this request */ |
347 | rctx->src_nents = spu_sg_count(sg_list: rctx->src_sg, skip: rctx->src_skip, nbytes: chunksize); |
348 | rctx->dst_nents = spu_sg_count(sg_list: rctx->dst_sg, skip: rctx->dst_skip, nbytes: chunksize); |
349 | |
350 | if ((ctx->cipher.mode == CIPHER_MODE_CBC) && |
351 | rctx->is_encrypt && chunk_start) |
352 | /* |
353 | * Encrypting non-first first chunk. Copy last block of |
354 | * previous result to IV for this chunk. |
355 | */ |
356 | sg_copy_part_to_buf(src: req->dst, dest: rctx->msg_buf.iv_ctr, |
357 | len: rctx->iv_ctr_len, |
358 | skip: chunk_start - rctx->iv_ctr_len); |
359 | |
360 | if (rctx->iv_ctr_len) { |
361 | /* get our local copy of the iv */ |
362 | __builtin_memcpy(local_iv_ctr, rctx->msg_buf.iv_ctr, |
363 | rctx->iv_ctr_len); |
364 | |
365 | /* generate the next IV if possible */ |
366 | if ((ctx->cipher.mode == CIPHER_MODE_CBC) && |
367 | !rctx->is_encrypt) { |
368 | /* |
369 | * CBC Decrypt: next IV is the last ciphertext block in |
370 | * this chunk |
371 | */ |
372 | sg_copy_part_to_buf(src: req->src, dest: rctx->msg_buf.iv_ctr, |
373 | len: rctx->iv_ctr_len, |
374 | skip: rctx->src_sent - rctx->iv_ctr_len); |
375 | } else if (ctx->cipher.mode == CIPHER_MODE_CTR) { |
376 | /* |
377 | * The SPU hardware increments the counter once for |
378 | * each AES block of 16 bytes. So update the counter |
379 | * for the next chunk, if there is one. Note that for |
380 | * this chunk, the counter has already been copied to |
381 | * local_iv_ctr. We can assume a block size of 16, |
382 | * because we only support CTR mode for AES, not for |
383 | * any other cipher alg. |
384 | */ |
385 | add_to_ctr(ctr_pos: rctx->msg_buf.iv_ctr, increment: chunksize >> 4); |
386 | } |
387 | } |
388 | |
389 | if (ctx->max_payload == SPU_MAX_PAYLOAD_INF) |
390 | flow_log(format: "max_payload infinite\n" ); |
391 | else |
392 | flow_log(format: "max_payload %u\n" , ctx->max_payload); |
393 | |
394 | flow_log(format: "sent:%u start:%u remains:%u size:%u\n" , |
395 | rctx->src_sent, chunk_start, remaining, chunksize); |
396 | |
397 | /* Copy SPU header template created at setkey time */ |
398 | memcpy(rctx->msg_buf.bcm_spu_req_hdr, ctx->bcm_spu_req_hdr, |
399 | sizeof(rctx->msg_buf.bcm_spu_req_hdr)); |
400 | |
401 | spu->spu_cipher_req_finish(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN, |
402 | ctx->spu_req_hdr_len, !(rctx->is_encrypt), |
403 | &cipher_parms, chunksize); |
404 | |
405 | atomic64_add(i: chunksize, v: &iproc_priv.bytes_out); |
406 | |
407 | stat_pad_len = spu->spu_wordalign_padlen(chunksize); |
408 | if (stat_pad_len) |
409 | rx_frag_num++; |
410 | pad_len = stat_pad_len; |
411 | if (pad_len) { |
412 | tx_frag_num++; |
413 | spu->spu_request_pad(rctx->msg_buf.spu_req_pad, 0, |
414 | 0, ctx->auth.alg, ctx->auth.mode, |
415 | rctx->total_sent, stat_pad_len); |
416 | } |
417 | |
418 | spu->spu_dump_msg_hdr(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN, |
419 | ctx->spu_req_hdr_len); |
420 | packet_log(format: "payload:\n" ); |
421 | dump_sg(sg: rctx->src_sg, skip: rctx->src_skip, len: chunksize); |
422 | packet_dump(msg: " pad: " , var: rctx->msg_buf.spu_req_pad, var_len: pad_len); |
423 | |
424 | /* |
425 | * Build mailbox message containing SPU request msg and rx buffers |
426 | * to catch response message |
427 | */ |
428 | memset(mssg, 0, sizeof(*mssg)); |
429 | mssg->type = BRCM_MESSAGE_SPU; |
430 | mssg->ctx = rctx; /* Will be returned in response */ |
431 | |
432 | /* Create rx scatterlist to catch result */ |
433 | rx_frag_num += rctx->dst_nents; |
434 | |
435 | if ((ctx->cipher.mode == CIPHER_MODE_XTS) && |
436 | spu->spu_xts_tweak_in_payload()) |
437 | rx_frag_num++; /* extra sg to insert tweak */ |
438 | |
439 | err = spu_skcipher_rx_sg_create(mssg, rctx, rx_frag_num, chunksize, |
440 | stat_pad_len); |
441 | if (err) |
442 | return err; |
443 | |
444 | /* Create tx scatterlist containing SPU request message */ |
445 | tx_frag_num += rctx->src_nents; |
446 | if (spu->spu_tx_status_len()) |
447 | tx_frag_num++; |
448 | |
449 | if ((ctx->cipher.mode == CIPHER_MODE_XTS) && |
450 | spu->spu_xts_tweak_in_payload()) |
451 | tx_frag_num++; /* extra sg to insert tweak */ |
452 | |
453 | err = spu_skcipher_tx_sg_create(mssg, rctx, tx_frag_num, chunksize, |
454 | pad_len); |
455 | if (err) |
456 | return err; |
457 | |
458 | err = mailbox_send_message(mssg, flags: req->base.flags, chan_idx: rctx->chan_idx); |
459 | if (unlikely(err < 0)) |
460 | return err; |
461 | |
462 | return -EINPROGRESS; |
463 | } |
464 | |
465 | /** |
466 | * handle_skcipher_resp() - Process a block cipher SPU response. Updates the |
467 | * total received count for the request and updates global stats. |
468 | * @rctx: Crypto request context |
469 | */ |
470 | static void handle_skcipher_resp(struct iproc_reqctx_s *rctx) |
471 | { |
472 | struct spu_hw *spu = &iproc_priv.spu; |
473 | struct crypto_async_request *areq = rctx->parent; |
474 | struct skcipher_request *req = skcipher_request_cast(req: areq); |
475 | struct iproc_ctx_s *ctx = rctx->ctx; |
476 | u32 payload_len; |
477 | |
478 | /* See how much data was returned */ |
479 | payload_len = spu->spu_payload_length(rctx->msg_buf.spu_resp_hdr); |
480 | |
481 | /* |
482 | * In XTS mode, the first SPU_XTS_TWEAK_SIZE bytes may be the |
483 | * encrypted tweak ("i") value; we don't count those. |
484 | */ |
485 | if ((ctx->cipher.mode == CIPHER_MODE_XTS) && |
486 | spu->spu_xts_tweak_in_payload() && |
487 | (payload_len >= SPU_XTS_TWEAK_SIZE)) |
488 | payload_len -= SPU_XTS_TWEAK_SIZE; |
489 | |
490 | atomic64_add(i: payload_len, v: &iproc_priv.bytes_in); |
491 | |
492 | flow_log(format: "%s() offset: %u, bd_len: %u BD:\n" , |
493 | __func__, rctx->total_received, payload_len); |
494 | |
495 | dump_sg(sg: req->dst, skip: rctx->total_received, len: payload_len); |
496 | |
497 | rctx->total_received += payload_len; |
498 | if (rctx->total_received == rctx->total_todo) { |
499 | atomic_inc(v: &iproc_priv.op_counts[SPU_OP_CIPHER]); |
500 | atomic_inc( |
501 | v: &iproc_priv.cipher_cnt[ctx->cipher.alg][ctx->cipher.mode]); |
502 | } |
503 | } |
504 | |
505 | /** |
506 | * spu_ahash_rx_sg_create() - Build up the scatterlist of buffers used to |
507 | * receive a SPU response message for an ahash request. |
508 | * @mssg: mailbox message containing the receive sg |
509 | * @rctx: crypto request context |
510 | * @rx_frag_num: number of scatterlist elements required to hold the |
511 | * SPU response message |
512 | * @digestsize: length of hash digest, in bytes |
513 | * @stat_pad_len: Number of bytes required to pad the STAT field to |
514 | * a 4-byte boundary |
515 | * |
516 | * The scatterlist that gets allocated here is freed in spu_chunk_cleanup() |
517 | * when the request completes, whether the request is handled successfully or |
518 | * there is an error. |
519 | * |
520 | * Return: |
521 | * 0 if successful |
522 | * < 0 if an error |
523 | */ |
524 | static int |
525 | spu_ahash_rx_sg_create(struct brcm_message *mssg, |
526 | struct iproc_reqctx_s *rctx, |
527 | u8 rx_frag_num, unsigned int digestsize, |
528 | u32 stat_pad_len) |
529 | { |
530 | struct spu_hw *spu = &iproc_priv.spu; |
531 | struct scatterlist *sg; /* used to build sgs in mbox message */ |
532 | struct iproc_ctx_s *ctx = rctx->ctx; |
533 | |
534 | mssg->spu.dst = kcalloc(n: rx_frag_num, size: sizeof(struct scatterlist), |
535 | flags: rctx->gfp); |
536 | if (!mssg->spu.dst) |
537 | return -ENOMEM; |
538 | |
539 | sg = mssg->spu.dst; |
540 | sg_init_table(sg, rx_frag_num); |
541 | /* Space for SPU message header */ |
542 | sg_set_buf(sg: sg++, buf: rctx->msg_buf.spu_resp_hdr, buflen: ctx->spu_resp_hdr_len); |
543 | |
544 | /* Space for digest */ |
545 | sg_set_buf(sg: sg++, buf: rctx->msg_buf.digest, buflen: digestsize); |
546 | |
547 | if (stat_pad_len) |
548 | sg_set_buf(sg: sg++, buf: rctx->msg_buf.rx_stat_pad, buflen: stat_pad_len); |
549 | |
550 | memset(rctx->msg_buf.rx_stat, 0, SPU_RX_STATUS_LEN); |
551 | sg_set_buf(sg, buf: rctx->msg_buf.rx_stat, buflen: spu->spu_rx_status_len()); |
552 | return 0; |
553 | } |
554 | |
555 | /** |
556 | * spu_ahash_tx_sg_create() - Build up the scatterlist of buffers used to send |
557 | * a SPU request message for an ahash request. Includes SPU message headers and |
558 | * the request data. |
559 | * @mssg: mailbox message containing the transmit sg |
560 | * @rctx: crypto request context |
561 | * @tx_frag_num: number of scatterlist elements required to construct the |
562 | * SPU request message |
563 | * @spu_hdr_len: length in bytes of SPU message header |
564 | * @hash_carry_len: Number of bytes of data carried over from previous req |
565 | * @new_data_len: Number of bytes of new request data |
566 | * @pad_len: Number of pad bytes |
567 | * |
568 | * The scatterlist that gets allocated here is freed in spu_chunk_cleanup() |
569 | * when the request completes, whether the request is handled successfully or |
570 | * there is an error. |
571 | * |
572 | * Return: |
573 | * 0 if successful |
574 | * < 0 if an error |
575 | */ |
576 | static int |
577 | spu_ahash_tx_sg_create(struct brcm_message *mssg, |
578 | struct iproc_reqctx_s *rctx, |
579 | u8 tx_frag_num, |
580 | u32 spu_hdr_len, |
581 | unsigned int hash_carry_len, |
582 | unsigned int new_data_len, u32 pad_len) |
583 | { |
584 | struct spu_hw *spu = &iproc_priv.spu; |
585 | struct scatterlist *sg; /* used to build sgs in mbox message */ |
586 | u32 datalen; /* Number of bytes of response data expected */ |
587 | u32 stat_len; |
588 | |
589 | mssg->spu.src = kcalloc(n: tx_frag_num, size: sizeof(struct scatterlist), |
590 | flags: rctx->gfp); |
591 | if (!mssg->spu.src) |
592 | return -ENOMEM; |
593 | |
594 | sg = mssg->spu.src; |
595 | sg_init_table(sg, tx_frag_num); |
596 | |
597 | sg_set_buf(sg: sg++, buf: rctx->msg_buf.bcm_spu_req_hdr, |
598 | BCM_HDR_LEN + spu_hdr_len); |
599 | |
600 | if (hash_carry_len) |
601 | sg_set_buf(sg: sg++, buf: rctx->hash_carry, buflen: hash_carry_len); |
602 | |
603 | if (new_data_len) { |
604 | /* Copy in each src sg entry from request, up to chunksize */ |
605 | datalen = spu_msg_sg_add(to_sg: &sg, from_sg: &rctx->src_sg, skip: &rctx->src_skip, |
606 | from_nents: rctx->src_nents, tot_len: new_data_len); |
607 | if (datalen < new_data_len) { |
608 | pr_err("%s(): failed to copy src sg to mbox msg" , |
609 | __func__); |
610 | return -EFAULT; |
611 | } |
612 | } |
613 | |
614 | if (pad_len) |
615 | sg_set_buf(sg: sg++, buf: rctx->msg_buf.spu_req_pad, buflen: pad_len); |
616 | |
617 | stat_len = spu->spu_tx_status_len(); |
618 | if (stat_len) { |
619 | memset(rctx->msg_buf.tx_stat, 0, stat_len); |
620 | sg_set_buf(sg, buf: rctx->msg_buf.tx_stat, buflen: stat_len); |
621 | } |
622 | |
623 | return 0; |
624 | } |
625 | |
626 | /** |
627 | * handle_ahash_req() - Process an asynchronous hash request from the crypto |
628 | * API. |
629 | * @rctx: Crypto request context |
630 | * |
631 | * Builds a SPU request message embedded in a mailbox message and submits the |
632 | * mailbox message on a selected mailbox channel. The SPU request message is |
633 | * constructed as a scatterlist, including entries from the crypto API's |
634 | * src scatterlist to avoid copying the data to be hashed. This function is |
635 | * called either on the thread from the crypto API, or, in the case that the |
636 | * crypto API request is too large to fit in a single SPU request message, |
637 | * on the thread that invokes the receive callback with a response message. |
638 | * Because some operations require the response from one chunk before the next |
639 | * chunk can be submitted, we always wait for the response for the previous |
640 | * chunk before submitting the next chunk. Because requests are submitted in |
641 | * lock step like this, there is no need to synchronize access to request data |
642 | * structures. |
643 | * |
644 | * Return: |
645 | * -EINPROGRESS: request has been submitted to SPU and response will be |
646 | * returned asynchronously |
647 | * -EAGAIN: non-final request included a small amount of data, which for |
648 | * efficiency we did not submit to the SPU, but instead stored |
649 | * to be submitted to the SPU with the next part of the request |
650 | * other: an error code |
651 | */ |
652 | static int handle_ahash_req(struct iproc_reqctx_s *rctx) |
653 | { |
654 | struct spu_hw *spu = &iproc_priv.spu; |
655 | struct crypto_async_request *areq = rctx->parent; |
656 | struct ahash_request *req = ahash_request_cast(req: areq); |
657 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); |
658 | struct crypto_tfm *tfm = crypto_ahash_tfm(tfm: ahash); |
659 | unsigned int blocksize = crypto_tfm_alg_blocksize(tfm); |
660 | struct iproc_ctx_s *ctx = rctx->ctx; |
661 | |
662 | /* number of bytes still to be hashed in this req */ |
663 | unsigned int nbytes_to_hash = 0; |
664 | int err; |
665 | unsigned int chunksize = 0; /* length of hash carry + new data */ |
666 | /* |
667 | * length of new data, not from hash carry, to be submitted in |
668 | * this hw request |
669 | */ |
670 | unsigned int new_data_len; |
671 | |
672 | unsigned int __maybe_unused chunk_start = 0; |
673 | u32 db_size; /* Length of data field, incl gcm and hash padding */ |
674 | int pad_len = 0; /* total pad len, including gcm, hash, stat padding */ |
675 | u32 data_pad_len = 0; /* length of GCM/CCM padding */ |
676 | u32 stat_pad_len = 0; /* length of padding to align STATUS word */ |
677 | struct brcm_message *mssg; /* mailbox message */ |
678 | struct spu_request_opts req_opts; |
679 | struct spu_cipher_parms cipher_parms; |
680 | struct spu_hash_parms hash_parms; |
681 | struct spu_aead_parms aead_parms; |
682 | unsigned int local_nbuf; |
683 | u32 spu_hdr_len; |
684 | unsigned int digestsize; |
685 | u16 rem = 0; |
686 | |
687 | /* |
688 | * number of entries in src and dst sg. Always includes SPU msg header. |
689 | * rx always includes a buffer to catch digest and STATUS. |
690 | */ |
691 | u8 rx_frag_num = 3; |
692 | u8 tx_frag_num = 1; |
693 | |
694 | flow_log(format: "total_todo %u, total_sent %u\n" , |
695 | rctx->total_todo, rctx->total_sent); |
696 | |
697 | memset(&req_opts, 0, sizeof(req_opts)); |
698 | memset(&cipher_parms, 0, sizeof(cipher_parms)); |
699 | memset(&hash_parms, 0, sizeof(hash_parms)); |
700 | memset(&aead_parms, 0, sizeof(aead_parms)); |
701 | |
702 | req_opts.bd_suppress = true; |
703 | hash_parms.alg = ctx->auth.alg; |
704 | hash_parms.mode = ctx->auth.mode; |
705 | hash_parms.type = HASH_TYPE_NONE; |
706 | hash_parms.key_buf = (u8 *)ctx->authkey; |
707 | hash_parms.key_len = ctx->authkeylen; |
708 | |
709 | /* |
710 | * For hash algorithms below assignment looks bit odd but |
711 | * it's needed for AES-XCBC and AES-CMAC hash algorithms |
712 | * to differentiate between 128, 192, 256 bit key values. |
713 | * Based on the key values, hash algorithm is selected. |
714 | * For example for 128 bit key, hash algorithm is AES-128. |
715 | */ |
716 | cipher_parms.type = ctx->cipher_type; |
717 | |
718 | mssg = &rctx->mb_mssg; |
719 | chunk_start = rctx->src_sent; |
720 | |
721 | /* |
722 | * Compute the amount remaining to hash. This may include data |
723 | * carried over from previous requests. |
724 | */ |
725 | nbytes_to_hash = rctx->total_todo - rctx->total_sent; |
726 | chunksize = nbytes_to_hash; |
727 | if ((ctx->max_payload != SPU_MAX_PAYLOAD_INF) && |
728 | (chunksize > ctx->max_payload)) |
729 | chunksize = ctx->max_payload; |
730 | |
731 | /* |
732 | * If this is not a final request and the request data is not a multiple |
733 | * of a full block, then simply park the extra data and prefix it to the |
734 | * data for the next request. |
735 | */ |
736 | if (!rctx->is_final) { |
737 | u8 *dest = rctx->hash_carry + rctx->hash_carry_len; |
738 | u16 new_len; /* len of data to add to hash carry */ |
739 | |
740 | rem = chunksize % blocksize; /* remainder */ |
741 | if (rem) { |
742 | /* chunksize not a multiple of blocksize */ |
743 | chunksize -= rem; |
744 | if (chunksize == 0) { |
745 | /* Don't have a full block to submit to hw */ |
746 | new_len = rem - rctx->hash_carry_len; |
747 | sg_copy_part_to_buf(src: req->src, dest, len: new_len, |
748 | skip: rctx->src_sent); |
749 | rctx->hash_carry_len = rem; |
750 | flow_log(format: "Exiting with hash carry len: %u\n" , |
751 | rctx->hash_carry_len); |
752 | packet_dump(msg: " buf: " , |
753 | var: rctx->hash_carry, |
754 | var_len: rctx->hash_carry_len); |
755 | return -EAGAIN; |
756 | } |
757 | } |
758 | } |
759 | |
760 | /* if we have hash carry, then prefix it to the data in this request */ |
761 | local_nbuf = rctx->hash_carry_len; |
762 | rctx->hash_carry_len = 0; |
763 | if (local_nbuf) |
764 | tx_frag_num++; |
765 | new_data_len = chunksize - local_nbuf; |
766 | |
767 | /* Count number of sg entries to be used in this request */ |
768 | rctx->src_nents = spu_sg_count(sg_list: rctx->src_sg, skip: rctx->src_skip, |
769 | nbytes: new_data_len); |
770 | |
771 | /* AES hashing keeps key size in type field, so need to copy it here */ |
772 | if (hash_parms.alg == HASH_ALG_AES) |
773 | hash_parms.type = (enum hash_type)cipher_parms.type; |
774 | else |
775 | hash_parms.type = spu->spu_hash_type(rctx->total_sent); |
776 | |
777 | digestsize = spu->spu_digest_size(ctx->digestsize, ctx->auth.alg, |
778 | hash_parms.type); |
779 | hash_parms.digestsize = digestsize; |
780 | |
781 | /* update the indexes */ |
782 | rctx->total_sent += chunksize; |
783 | /* if you sent a prebuf then that wasn't from this req->src */ |
784 | rctx->src_sent += new_data_len; |
785 | |
786 | if ((rctx->total_sent == rctx->total_todo) && rctx->is_final) |
787 | hash_parms.pad_len = spu->spu_hash_pad_len(hash_parms.alg, |
788 | hash_parms.mode, |
789 | chunksize, |
790 | blocksize); |
791 | |
792 | /* |
793 | * If a non-first chunk, then include the digest returned from the |
794 | * previous chunk so that hw can add to it (except for AES types). |
795 | */ |
796 | if ((hash_parms.type == HASH_TYPE_UPDT) && |
797 | (hash_parms.alg != HASH_ALG_AES)) { |
798 | hash_parms.key_buf = rctx->incr_hash; |
799 | hash_parms.key_len = digestsize; |
800 | } |
801 | |
802 | atomic64_add(i: chunksize, v: &iproc_priv.bytes_out); |
803 | |
804 | flow_log(format: "%s() final: %u nbuf: %u " , |
805 | __func__, rctx->is_final, local_nbuf); |
806 | |
807 | if (ctx->max_payload == SPU_MAX_PAYLOAD_INF) |
808 | flow_log(format: "max_payload infinite\n" ); |
809 | else |
810 | flow_log(format: "max_payload %u\n" , ctx->max_payload); |
811 | |
812 | flow_log(format: "chunk_start: %u chunk_size: %u\n" , chunk_start, chunksize); |
813 | |
814 | /* Prepend SPU header with type 3 BCM header */ |
815 | memcpy(rctx->msg_buf.bcm_spu_req_hdr, BCMHEADER, BCM_HDR_LEN); |
816 | |
817 | hash_parms.prebuf_len = local_nbuf; |
818 | spu_hdr_len = spu->spu_create_request(rctx->msg_buf.bcm_spu_req_hdr + |
819 | BCM_HDR_LEN, |
820 | &req_opts, &cipher_parms, |
821 | &hash_parms, &aead_parms, |
822 | new_data_len); |
823 | |
824 | if (spu_hdr_len == 0) { |
825 | pr_err("Failed to create SPU request header\n" ); |
826 | return -EFAULT; |
827 | } |
828 | |
829 | /* |
830 | * Determine total length of padding required. Put all padding in one |
831 | * buffer. |
832 | */ |
833 | data_pad_len = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode, chunksize); |
834 | db_size = spu_real_db_size(assoc_size: 0, aead_iv_buf_len: 0, prebuf_len: local_nbuf, data_size: new_data_len, |
835 | aad_pad_len: 0, gcm_pad_len: 0, hash_pad_len: hash_parms.pad_len); |
836 | if (spu->spu_tx_status_len()) |
837 | stat_pad_len = spu->spu_wordalign_padlen(db_size); |
838 | if (stat_pad_len) |
839 | rx_frag_num++; |
840 | pad_len = hash_parms.pad_len + data_pad_len + stat_pad_len; |
841 | if (pad_len) { |
842 | tx_frag_num++; |
843 | spu->spu_request_pad(rctx->msg_buf.spu_req_pad, data_pad_len, |
844 | hash_parms.pad_len, ctx->auth.alg, |
845 | ctx->auth.mode, rctx->total_sent, |
846 | stat_pad_len); |
847 | } |
848 | |
849 | spu->spu_dump_msg_hdr(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN, |
850 | spu_hdr_len); |
851 | packet_dump(msg: " prebuf: " , var: rctx->hash_carry, var_len: local_nbuf); |
852 | flow_log(format: "Data:\n" ); |
853 | dump_sg(sg: rctx->src_sg, skip: rctx->src_skip, len: new_data_len); |
854 | packet_dump(msg: " pad: " , var: rctx->msg_buf.spu_req_pad, var_len: pad_len); |
855 | |
856 | /* |
857 | * Build mailbox message containing SPU request msg and rx buffers |
858 | * to catch response message |
859 | */ |
860 | memset(mssg, 0, sizeof(*mssg)); |
861 | mssg->type = BRCM_MESSAGE_SPU; |
862 | mssg->ctx = rctx; /* Will be returned in response */ |
863 | |
864 | /* Create rx scatterlist to catch result */ |
865 | err = spu_ahash_rx_sg_create(mssg, rctx, rx_frag_num, digestsize, |
866 | stat_pad_len); |
867 | if (err) |
868 | return err; |
869 | |
870 | /* Create tx scatterlist containing SPU request message */ |
871 | tx_frag_num += rctx->src_nents; |
872 | if (spu->spu_tx_status_len()) |
873 | tx_frag_num++; |
874 | err = spu_ahash_tx_sg_create(mssg, rctx, tx_frag_num, spu_hdr_len, |
875 | hash_carry_len: local_nbuf, new_data_len, pad_len); |
876 | if (err) |
877 | return err; |
878 | |
879 | err = mailbox_send_message(mssg, flags: req->base.flags, chan_idx: rctx->chan_idx); |
880 | if (unlikely(err < 0)) |
881 | return err; |
882 | |
883 | return -EINPROGRESS; |
884 | } |
885 | |
886 | /** |
887 | * spu_hmac_outer_hash() - Request synchonous software compute of the outer hash |
888 | * for an HMAC request. |
889 | * @req: The HMAC request from the crypto API |
890 | * @ctx: The session context |
891 | * |
892 | * Return: 0 if synchronous hash operation successful |
893 | * -EINVAL if the hash algo is unrecognized |
894 | * any other value indicates an error |
895 | */ |
896 | static int spu_hmac_outer_hash(struct ahash_request *req, |
897 | struct iproc_ctx_s *ctx) |
898 | { |
899 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); |
900 | unsigned int blocksize = |
901 | crypto_tfm_alg_blocksize(tfm: crypto_ahash_tfm(tfm: ahash)); |
902 | int rc; |
903 | |
904 | switch (ctx->auth.alg) { |
905 | case HASH_ALG_MD5: |
906 | rc = do_shash(name: "md5" , result: req->result, data1: ctx->opad, data1_len: blocksize, |
907 | data2: req->result, data2_len: ctx->digestsize, NULL, key_len: 0); |
908 | break; |
909 | case HASH_ALG_SHA1: |
910 | rc = do_shash(name: "sha1" , result: req->result, data1: ctx->opad, data1_len: blocksize, |
911 | data2: req->result, data2_len: ctx->digestsize, NULL, key_len: 0); |
912 | break; |
913 | case HASH_ALG_SHA224: |
914 | rc = do_shash(name: "sha224" , result: req->result, data1: ctx->opad, data1_len: blocksize, |
915 | data2: req->result, data2_len: ctx->digestsize, NULL, key_len: 0); |
916 | break; |
917 | case HASH_ALG_SHA256: |
918 | rc = do_shash(name: "sha256" , result: req->result, data1: ctx->opad, data1_len: blocksize, |
919 | data2: req->result, data2_len: ctx->digestsize, NULL, key_len: 0); |
920 | break; |
921 | case HASH_ALG_SHA384: |
922 | rc = do_shash(name: "sha384" , result: req->result, data1: ctx->opad, data1_len: blocksize, |
923 | data2: req->result, data2_len: ctx->digestsize, NULL, key_len: 0); |
924 | break; |
925 | case HASH_ALG_SHA512: |
926 | rc = do_shash(name: "sha512" , result: req->result, data1: ctx->opad, data1_len: blocksize, |
927 | data2: req->result, data2_len: ctx->digestsize, NULL, key_len: 0); |
928 | break; |
929 | default: |
930 | pr_err("%s() Error : unknown hmac type\n" , __func__); |
931 | rc = -EINVAL; |
932 | } |
933 | return rc; |
934 | } |
935 | |
936 | /** |
937 | * ahash_req_done() - Process a hash result from the SPU hardware. |
938 | * @rctx: Crypto request context |
939 | * |
940 | * Return: 0 if successful |
941 | * < 0 if an error |
942 | */ |
943 | static int ahash_req_done(struct iproc_reqctx_s *rctx) |
944 | { |
945 | struct spu_hw *spu = &iproc_priv.spu; |
946 | struct crypto_async_request *areq = rctx->parent; |
947 | struct ahash_request *req = ahash_request_cast(req: areq); |
948 | struct iproc_ctx_s *ctx = rctx->ctx; |
949 | int err; |
950 | |
951 | memcpy(req->result, rctx->msg_buf.digest, ctx->digestsize); |
952 | |
953 | if (spu->spu_type == SPU_TYPE_SPUM) { |
954 | /* byte swap the output from the UPDT function to network byte |
955 | * order |
956 | */ |
957 | if (ctx->auth.alg == HASH_ALG_MD5) { |
958 | __swab32s(p: (u32 *)req->result); |
959 | __swab32s(p: ((u32 *)req->result) + 1); |
960 | __swab32s(p: ((u32 *)req->result) + 2); |
961 | __swab32s(p: ((u32 *)req->result) + 3); |
962 | __swab32s(p: ((u32 *)req->result) + 4); |
963 | } |
964 | } |
965 | |
966 | flow_dump(msg: " digest " , var: req->result, var_len: ctx->digestsize); |
967 | |
968 | /* if this an HMAC then do the outer hash */ |
969 | if (rctx->is_sw_hmac) { |
970 | err = spu_hmac_outer_hash(req, ctx); |
971 | if (err < 0) |
972 | return err; |
973 | flow_dump(msg: " hmac: " , var: req->result, var_len: ctx->digestsize); |
974 | } |
975 | |
976 | if (rctx->is_sw_hmac || ctx->auth.mode == HASH_MODE_HMAC) { |
977 | atomic_inc(v: &iproc_priv.op_counts[SPU_OP_HMAC]); |
978 | atomic_inc(v: &iproc_priv.hmac_cnt[ctx->auth.alg]); |
979 | } else { |
980 | atomic_inc(v: &iproc_priv.op_counts[SPU_OP_HASH]); |
981 | atomic_inc(v: &iproc_priv.hash_cnt[ctx->auth.alg]); |
982 | } |
983 | |
984 | return 0; |
985 | } |
986 | |
987 | /** |
988 | * handle_ahash_resp() - Process a SPU response message for a hash request. |
989 | * Checks if the entire crypto API request has been processed, and if so, |
990 | * invokes post processing on the result. |
991 | * @rctx: Crypto request context |
992 | */ |
993 | static void handle_ahash_resp(struct iproc_reqctx_s *rctx) |
994 | { |
995 | struct iproc_ctx_s *ctx = rctx->ctx; |
996 | struct crypto_async_request *areq = rctx->parent; |
997 | struct ahash_request *req = ahash_request_cast(req: areq); |
998 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); |
999 | unsigned int blocksize = |
1000 | crypto_tfm_alg_blocksize(tfm: crypto_ahash_tfm(tfm: ahash)); |
1001 | /* |
1002 | * Save hash to use as input to next op if incremental. Might be copying |
1003 | * too much, but that's easier than figuring out actual digest size here |
1004 | */ |
1005 | memcpy(rctx->incr_hash, rctx->msg_buf.digest, MAX_DIGEST_SIZE); |
1006 | |
1007 | flow_log(format: "%s() blocksize:%u digestsize:%u\n" , |
1008 | __func__, blocksize, ctx->digestsize); |
1009 | |
1010 | atomic64_add(i: ctx->digestsize, v: &iproc_priv.bytes_in); |
1011 | |
1012 | if (rctx->is_final && (rctx->total_sent == rctx->total_todo)) |
1013 | ahash_req_done(rctx); |
1014 | } |
1015 | |
1016 | /** |
1017 | * spu_aead_rx_sg_create() - Build up the scatterlist of buffers used to receive |
1018 | * a SPU response message for an AEAD request. Includes buffers to catch SPU |
1019 | * message headers and the response data. |
1020 | * @mssg: mailbox message containing the receive sg |
1021 | * @req: Crypto API request |
1022 | * @rctx: crypto request context |
1023 | * @rx_frag_num: number of scatterlist elements required to hold the |
1024 | * SPU response message |
1025 | * @assoc_len: Length of associated data included in the crypto request |
1026 | * @ret_iv_len: Length of IV returned in response |
1027 | * @resp_len: Number of bytes of response data expected to be written to |
1028 | * dst buffer from crypto API |
1029 | * @digestsize: Length of hash digest, in bytes |
1030 | * @stat_pad_len: Number of bytes required to pad the STAT field to |
1031 | * a 4-byte boundary |
1032 | * |
1033 | * The scatterlist that gets allocated here is freed in spu_chunk_cleanup() |
1034 | * when the request completes, whether the request is handled successfully or |
1035 | * there is an error. |
1036 | * |
1037 | * Returns: |
1038 | * 0 if successful |
1039 | * < 0 if an error |
1040 | */ |
1041 | static int spu_aead_rx_sg_create(struct brcm_message *mssg, |
1042 | struct aead_request *req, |
1043 | struct iproc_reqctx_s *rctx, |
1044 | u8 rx_frag_num, |
1045 | unsigned int assoc_len, |
1046 | u32 ret_iv_len, unsigned int resp_len, |
1047 | unsigned int digestsize, u32 stat_pad_len) |
1048 | { |
1049 | struct spu_hw *spu = &iproc_priv.spu; |
1050 | struct scatterlist *sg; /* used to build sgs in mbox message */ |
1051 | struct iproc_ctx_s *ctx = rctx->ctx; |
1052 | u32 datalen; /* Number of bytes of response data expected */ |
1053 | u32 assoc_buf_len; |
1054 | u8 data_padlen = 0; |
1055 | |
1056 | if (ctx->is_rfc4543) { |
1057 | /* RFC4543: only pad after data, not after AAD */ |
1058 | data_padlen = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode, |
1059 | assoc_len + resp_len); |
1060 | assoc_buf_len = assoc_len; |
1061 | } else { |
1062 | data_padlen = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode, |
1063 | resp_len); |
1064 | assoc_buf_len = spu->spu_assoc_resp_len(ctx->cipher.mode, |
1065 | assoc_len, ret_iv_len, |
1066 | rctx->is_encrypt); |
1067 | } |
1068 | |
1069 | if (ctx->cipher.mode == CIPHER_MODE_CCM) |
1070 | /* ICV (after data) must be in the next 32-bit word for CCM */ |
1071 | data_padlen += spu->spu_wordalign_padlen(assoc_buf_len + |
1072 | resp_len + |
1073 | data_padlen); |
1074 | |
1075 | if (data_padlen) |
1076 | /* have to catch gcm pad in separate buffer */ |
1077 | rx_frag_num++; |
1078 | |
1079 | mssg->spu.dst = kcalloc(n: rx_frag_num, size: sizeof(struct scatterlist), |
1080 | flags: rctx->gfp); |
1081 | if (!mssg->spu.dst) |
1082 | return -ENOMEM; |
1083 | |
1084 | sg = mssg->spu.dst; |
1085 | sg_init_table(sg, rx_frag_num); |
1086 | |
1087 | /* Space for SPU message header */ |
1088 | sg_set_buf(sg: sg++, buf: rctx->msg_buf.spu_resp_hdr, buflen: ctx->spu_resp_hdr_len); |
1089 | |
1090 | if (assoc_buf_len) { |
1091 | /* |
1092 | * Don't write directly to req->dst, because SPU may pad the |
1093 | * assoc data in the response |
1094 | */ |
1095 | memset(rctx->msg_buf.a.resp_aad, 0, assoc_buf_len); |
1096 | sg_set_buf(sg: sg++, buf: rctx->msg_buf.a.resp_aad, buflen: assoc_buf_len); |
1097 | } |
1098 | |
1099 | if (resp_len) { |
1100 | /* |
1101 | * Copy in each dst sg entry from request, up to chunksize. |
1102 | * dst sg catches just the data. digest caught in separate buf. |
1103 | */ |
1104 | datalen = spu_msg_sg_add(to_sg: &sg, from_sg: &rctx->dst_sg, skip: &rctx->dst_skip, |
1105 | from_nents: rctx->dst_nents, tot_len: resp_len); |
1106 | if (datalen < (resp_len)) { |
1107 | pr_err("%s(): failed to copy dst sg to mbox msg. expected len %u, datalen %u" , |
1108 | __func__, resp_len, datalen); |
1109 | return -EFAULT; |
1110 | } |
1111 | } |
1112 | |
1113 | /* If GCM/CCM data is padded, catch padding in separate buffer */ |
1114 | if (data_padlen) { |
1115 | memset(rctx->msg_buf.a.gcmpad, 0, data_padlen); |
1116 | sg_set_buf(sg: sg++, buf: rctx->msg_buf.a.gcmpad, buflen: data_padlen); |
1117 | } |
1118 | |
1119 | /* Always catch ICV in separate buffer */ |
1120 | sg_set_buf(sg: sg++, buf: rctx->msg_buf.digest, buflen: digestsize); |
1121 | |
1122 | flow_log(format: "stat_pad_len %u\n" , stat_pad_len); |
1123 | if (stat_pad_len) { |
1124 | memset(rctx->msg_buf.rx_stat_pad, 0, stat_pad_len); |
1125 | sg_set_buf(sg: sg++, buf: rctx->msg_buf.rx_stat_pad, buflen: stat_pad_len); |
1126 | } |
1127 | |
1128 | memset(rctx->msg_buf.rx_stat, 0, SPU_RX_STATUS_LEN); |
1129 | sg_set_buf(sg, buf: rctx->msg_buf.rx_stat, buflen: spu->spu_rx_status_len()); |
1130 | |
1131 | return 0; |
1132 | } |
1133 | |
1134 | /** |
1135 | * spu_aead_tx_sg_create() - Build up the scatterlist of buffers used to send a |
1136 | * SPU request message for an AEAD request. Includes SPU message headers and the |
1137 | * request data. |
1138 | * @mssg: mailbox message containing the transmit sg |
1139 | * @rctx: crypto request context |
1140 | * @tx_frag_num: number of scatterlist elements required to construct the |
1141 | * SPU request message |
1142 | * @spu_hdr_len: length of SPU message header in bytes |
1143 | * @assoc: crypto API associated data scatterlist |
1144 | * @assoc_len: length of associated data |
1145 | * @assoc_nents: number of scatterlist entries containing assoc data |
1146 | * @aead_iv_len: length of AEAD IV, if included |
1147 | * @chunksize: Number of bytes of request data |
1148 | * @aad_pad_len: Number of bytes of padding at end of AAD. For GCM/CCM. |
1149 | * @pad_len: Number of pad bytes |
1150 | * @incl_icv: If true, write separate ICV buffer after data and |
1151 | * any padding |
1152 | * |
1153 | * The scatterlist that gets allocated here is freed in spu_chunk_cleanup() |
1154 | * when the request completes, whether the request is handled successfully or |
1155 | * there is an error. |
1156 | * |
1157 | * Return: |
1158 | * 0 if successful |
1159 | * < 0 if an error |
1160 | */ |
1161 | static int spu_aead_tx_sg_create(struct brcm_message *mssg, |
1162 | struct iproc_reqctx_s *rctx, |
1163 | u8 tx_frag_num, |
1164 | u32 spu_hdr_len, |
1165 | struct scatterlist *assoc, |
1166 | unsigned int assoc_len, |
1167 | int assoc_nents, |
1168 | unsigned int aead_iv_len, |
1169 | unsigned int chunksize, |
1170 | u32 aad_pad_len, u32 pad_len, bool incl_icv) |
1171 | { |
1172 | struct spu_hw *spu = &iproc_priv.spu; |
1173 | struct scatterlist *sg; /* used to build sgs in mbox message */ |
1174 | struct scatterlist *assoc_sg = assoc; |
1175 | struct iproc_ctx_s *ctx = rctx->ctx; |
1176 | u32 datalen; /* Number of bytes of data to write */ |
1177 | u32 written; /* Number of bytes of data written */ |
1178 | u32 assoc_offset = 0; |
1179 | u32 stat_len; |
1180 | |
1181 | mssg->spu.src = kcalloc(n: tx_frag_num, size: sizeof(struct scatterlist), |
1182 | flags: rctx->gfp); |
1183 | if (!mssg->spu.src) |
1184 | return -ENOMEM; |
1185 | |
1186 | sg = mssg->spu.src; |
1187 | sg_init_table(sg, tx_frag_num); |
1188 | |
1189 | sg_set_buf(sg: sg++, buf: rctx->msg_buf.bcm_spu_req_hdr, |
1190 | BCM_HDR_LEN + spu_hdr_len); |
1191 | |
1192 | if (assoc_len) { |
1193 | /* Copy in each associated data sg entry from request */ |
1194 | written = spu_msg_sg_add(to_sg: &sg, from_sg: &assoc_sg, skip: &assoc_offset, |
1195 | from_nents: assoc_nents, tot_len: assoc_len); |
1196 | if (written < assoc_len) { |
1197 | pr_err("%s(): failed to copy assoc sg to mbox msg" , |
1198 | __func__); |
1199 | return -EFAULT; |
1200 | } |
1201 | } |
1202 | |
1203 | if (aead_iv_len) |
1204 | sg_set_buf(sg: sg++, buf: rctx->msg_buf.iv_ctr, buflen: aead_iv_len); |
1205 | |
1206 | if (aad_pad_len) { |
1207 | memset(rctx->msg_buf.a.req_aad_pad, 0, aad_pad_len); |
1208 | sg_set_buf(sg: sg++, buf: rctx->msg_buf.a.req_aad_pad, buflen: aad_pad_len); |
1209 | } |
1210 | |
1211 | datalen = chunksize; |
1212 | if ((chunksize > ctx->digestsize) && incl_icv) |
1213 | datalen -= ctx->digestsize; |
1214 | if (datalen) { |
1215 | /* For aead, a single msg should consume the entire src sg */ |
1216 | written = spu_msg_sg_add(to_sg: &sg, from_sg: &rctx->src_sg, skip: &rctx->src_skip, |
1217 | from_nents: rctx->src_nents, tot_len: datalen); |
1218 | if (written < datalen) { |
1219 | pr_err("%s(): failed to copy src sg to mbox msg" , |
1220 | __func__); |
1221 | return -EFAULT; |
1222 | } |
1223 | } |
1224 | |
1225 | if (pad_len) { |
1226 | memset(rctx->msg_buf.spu_req_pad, 0, pad_len); |
1227 | sg_set_buf(sg: sg++, buf: rctx->msg_buf.spu_req_pad, buflen: pad_len); |
1228 | } |
1229 | |
1230 | if (incl_icv) |
1231 | sg_set_buf(sg: sg++, buf: rctx->msg_buf.digest, buflen: ctx->digestsize); |
1232 | |
1233 | stat_len = spu->spu_tx_status_len(); |
1234 | if (stat_len) { |
1235 | memset(rctx->msg_buf.tx_stat, 0, stat_len); |
1236 | sg_set_buf(sg, buf: rctx->msg_buf.tx_stat, buflen: stat_len); |
1237 | } |
1238 | return 0; |
1239 | } |
1240 | |
1241 | /** |
1242 | * handle_aead_req() - Submit a SPU request message for the next chunk of the |
1243 | * current AEAD request. |
1244 | * @rctx: Crypto request context |
1245 | * |
1246 | * Unlike other operation types, we assume the length of the request fits in |
1247 | * a single SPU request message. aead_enqueue() makes sure this is true. |
1248 | * Comments for other op types regarding threads applies here as well. |
1249 | * |
1250 | * Unlike incremental hash ops, where the spu returns the entire hash for |
1251 | * truncated algs like sha-224, the SPU returns just the truncated hash in |
1252 | * response to aead requests. So digestsize is always ctx->digestsize here. |
1253 | * |
1254 | * Return: -EINPROGRESS: crypto request has been accepted and result will be |
1255 | * returned asynchronously |
1256 | * Any other value indicates an error |
1257 | */ |
1258 | static int handle_aead_req(struct iproc_reqctx_s *rctx) |
1259 | { |
1260 | struct spu_hw *spu = &iproc_priv.spu; |
1261 | struct crypto_async_request *areq = rctx->parent; |
1262 | struct aead_request *req = container_of(areq, |
1263 | struct aead_request, base); |
1264 | struct iproc_ctx_s *ctx = rctx->ctx; |
1265 | int err; |
1266 | unsigned int chunksize; |
1267 | unsigned int resp_len; |
1268 | u32 spu_hdr_len; |
1269 | u32 db_size; |
1270 | u32 stat_pad_len; |
1271 | u32 pad_len; |
1272 | struct brcm_message *mssg; /* mailbox message */ |
1273 | struct spu_request_opts req_opts; |
1274 | struct spu_cipher_parms cipher_parms; |
1275 | struct spu_hash_parms hash_parms; |
1276 | struct spu_aead_parms aead_parms; |
1277 | int assoc_nents = 0; |
1278 | bool incl_icv = false; |
1279 | unsigned int digestsize = ctx->digestsize; |
1280 | |
1281 | /* number of entries in src and dst sg. Always includes SPU msg header. |
1282 | */ |
1283 | u8 rx_frag_num = 2; /* and STATUS */ |
1284 | u8 tx_frag_num = 1; |
1285 | |
1286 | /* doing the whole thing at once */ |
1287 | chunksize = rctx->total_todo; |
1288 | |
1289 | flow_log(format: "%s: chunksize %u\n" , __func__, chunksize); |
1290 | |
1291 | memset(&req_opts, 0, sizeof(req_opts)); |
1292 | memset(&hash_parms, 0, sizeof(hash_parms)); |
1293 | memset(&aead_parms, 0, sizeof(aead_parms)); |
1294 | |
1295 | req_opts.is_inbound = !(rctx->is_encrypt); |
1296 | req_opts.auth_first = ctx->auth_first; |
1297 | req_opts.is_aead = true; |
1298 | req_opts.is_esp = ctx->is_esp; |
1299 | |
1300 | cipher_parms.alg = ctx->cipher.alg; |
1301 | cipher_parms.mode = ctx->cipher.mode; |
1302 | cipher_parms.type = ctx->cipher_type; |
1303 | cipher_parms.key_buf = ctx->enckey; |
1304 | cipher_parms.key_len = ctx->enckeylen; |
1305 | cipher_parms.iv_buf = rctx->msg_buf.iv_ctr; |
1306 | cipher_parms.iv_len = rctx->iv_ctr_len; |
1307 | |
1308 | hash_parms.alg = ctx->auth.alg; |
1309 | hash_parms.mode = ctx->auth.mode; |
1310 | hash_parms.type = HASH_TYPE_NONE; |
1311 | hash_parms.key_buf = (u8 *)ctx->authkey; |
1312 | hash_parms.key_len = ctx->authkeylen; |
1313 | hash_parms.digestsize = digestsize; |
1314 | |
1315 | if ((ctx->auth.alg == HASH_ALG_SHA224) && |
1316 | (ctx->authkeylen < SHA224_DIGEST_SIZE)) |
1317 | hash_parms.key_len = SHA224_DIGEST_SIZE; |
1318 | |
1319 | aead_parms.assoc_size = req->assoclen; |
1320 | if (ctx->is_esp && !ctx->is_rfc4543) { |
1321 | /* |
1322 | * 8-byte IV is included assoc data in request. SPU2 |
1323 | * expects AAD to include just SPI and seqno. So |
1324 | * subtract off the IV len. |
1325 | */ |
1326 | aead_parms.assoc_size -= GCM_RFC4106_IV_SIZE; |
1327 | |
1328 | if (rctx->is_encrypt) { |
1329 | aead_parms.return_iv = true; |
1330 | aead_parms.ret_iv_len = GCM_RFC4106_IV_SIZE; |
1331 | aead_parms.ret_iv_off = GCM_ESP_SALT_SIZE; |
1332 | } |
1333 | } else { |
1334 | aead_parms.ret_iv_len = 0; |
1335 | } |
1336 | |
1337 | /* |
1338 | * Count number of sg entries from the crypto API request that are to |
1339 | * be included in this mailbox message. For dst sg, don't count space |
1340 | * for digest. Digest gets caught in a separate buffer and copied back |
1341 | * to dst sg when processing response. |
1342 | */ |
1343 | rctx->src_nents = spu_sg_count(sg_list: rctx->src_sg, skip: rctx->src_skip, nbytes: chunksize); |
1344 | rctx->dst_nents = spu_sg_count(sg_list: rctx->dst_sg, skip: rctx->dst_skip, nbytes: chunksize); |
1345 | if (aead_parms.assoc_size) |
1346 | assoc_nents = spu_sg_count(sg_list: rctx->assoc, skip: 0, |
1347 | nbytes: aead_parms.assoc_size); |
1348 | |
1349 | mssg = &rctx->mb_mssg; |
1350 | |
1351 | rctx->total_sent = chunksize; |
1352 | rctx->src_sent = chunksize; |
1353 | if (spu->spu_assoc_resp_len(ctx->cipher.mode, |
1354 | aead_parms.assoc_size, |
1355 | aead_parms.ret_iv_len, |
1356 | rctx->is_encrypt)) |
1357 | rx_frag_num++; |
1358 | |
1359 | aead_parms.iv_len = spu->spu_aead_ivlen(ctx->cipher.mode, |
1360 | rctx->iv_ctr_len); |
1361 | |
1362 | if (ctx->auth.alg == HASH_ALG_AES) |
1363 | hash_parms.type = (enum hash_type)ctx->cipher_type; |
1364 | |
1365 | /* General case AAD padding (CCM and RFC4543 special cases below) */ |
1366 | aead_parms.aad_pad_len = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode, |
1367 | aead_parms.assoc_size); |
1368 | |
1369 | /* General case data padding (CCM decrypt special case below) */ |
1370 | aead_parms.data_pad_len = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode, |
1371 | chunksize); |
1372 | |
1373 | if (ctx->cipher.mode == CIPHER_MODE_CCM) { |
1374 | /* |
1375 | * for CCM, AAD len + 2 (rather than AAD len) needs to be |
1376 | * 128-bit aligned |
1377 | */ |
1378 | aead_parms.aad_pad_len = spu->spu_gcm_ccm_pad_len( |
1379 | ctx->cipher.mode, |
1380 | aead_parms.assoc_size + 2); |
1381 | |
1382 | /* |
1383 | * And when decrypting CCM, need to pad without including |
1384 | * size of ICV which is tacked on to end of chunk |
1385 | */ |
1386 | if (!rctx->is_encrypt) |
1387 | aead_parms.data_pad_len = |
1388 | spu->spu_gcm_ccm_pad_len(ctx->cipher.mode, |
1389 | chunksize - digestsize); |
1390 | |
1391 | /* CCM also requires software to rewrite portions of IV: */ |
1392 | spu->spu_ccm_update_iv(digestsize, &cipher_parms, req->assoclen, |
1393 | chunksize, rctx->is_encrypt, |
1394 | ctx->is_esp); |
1395 | } |
1396 | |
1397 | if (ctx->is_rfc4543) { |
1398 | /* |
1399 | * RFC4543: data is included in AAD, so don't pad after AAD |
1400 | * and pad data based on both AAD + data size |
1401 | */ |
1402 | aead_parms.aad_pad_len = 0; |
1403 | if (!rctx->is_encrypt) |
1404 | aead_parms.data_pad_len = spu->spu_gcm_ccm_pad_len( |
1405 | ctx->cipher.mode, |
1406 | aead_parms.assoc_size + chunksize - |
1407 | digestsize); |
1408 | else |
1409 | aead_parms.data_pad_len = spu->spu_gcm_ccm_pad_len( |
1410 | ctx->cipher.mode, |
1411 | aead_parms.assoc_size + chunksize); |
1412 | |
1413 | req_opts.is_rfc4543 = true; |
1414 | } |
1415 | |
1416 | if (spu_req_incl_icv(cipher_mode: ctx->cipher.mode, is_encrypt: rctx->is_encrypt)) { |
1417 | incl_icv = true; |
1418 | tx_frag_num++; |
1419 | /* Copy ICV from end of src scatterlist to digest buf */ |
1420 | sg_copy_part_to_buf(src: req->src, dest: rctx->msg_buf.digest, len: digestsize, |
1421 | skip: req->assoclen + rctx->total_sent - |
1422 | digestsize); |
1423 | } |
1424 | |
1425 | atomic64_add(i: chunksize, v: &iproc_priv.bytes_out); |
1426 | |
1427 | flow_log(format: "%s()-sent chunksize:%u\n" , __func__, chunksize); |
1428 | |
1429 | /* Prepend SPU header with type 3 BCM header */ |
1430 | memcpy(rctx->msg_buf.bcm_spu_req_hdr, BCMHEADER, BCM_HDR_LEN); |
1431 | |
1432 | spu_hdr_len = spu->spu_create_request(rctx->msg_buf.bcm_spu_req_hdr + |
1433 | BCM_HDR_LEN, &req_opts, |
1434 | &cipher_parms, &hash_parms, |
1435 | &aead_parms, chunksize); |
1436 | |
1437 | /* Determine total length of padding. Put all padding in one buffer. */ |
1438 | db_size = spu_real_db_size(assoc_size: aead_parms.assoc_size, aead_iv_buf_len: aead_parms.iv_len, prebuf_len: 0, |
1439 | data_size: chunksize, aad_pad_len: aead_parms.aad_pad_len, |
1440 | gcm_pad_len: aead_parms.data_pad_len, hash_pad_len: 0); |
1441 | |
1442 | stat_pad_len = spu->spu_wordalign_padlen(db_size); |
1443 | |
1444 | if (stat_pad_len) |
1445 | rx_frag_num++; |
1446 | pad_len = aead_parms.data_pad_len + stat_pad_len; |
1447 | if (pad_len) { |
1448 | tx_frag_num++; |
1449 | spu->spu_request_pad(rctx->msg_buf.spu_req_pad, |
1450 | aead_parms.data_pad_len, 0, |
1451 | ctx->auth.alg, ctx->auth.mode, |
1452 | rctx->total_sent, stat_pad_len); |
1453 | } |
1454 | |
1455 | spu->spu_dump_msg_hdr(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN, |
1456 | spu_hdr_len); |
1457 | dump_sg(sg: rctx->assoc, skip: 0, len: aead_parms.assoc_size); |
1458 | packet_dump(msg: " aead iv: " , var: rctx->msg_buf.iv_ctr, var_len: aead_parms.iv_len); |
1459 | packet_log(format: "BD:\n" ); |
1460 | dump_sg(sg: rctx->src_sg, skip: rctx->src_skip, len: chunksize); |
1461 | packet_dump(msg: " pad: " , var: rctx->msg_buf.spu_req_pad, var_len: pad_len); |
1462 | |
1463 | /* |
1464 | * Build mailbox message containing SPU request msg and rx buffers |
1465 | * to catch response message |
1466 | */ |
1467 | memset(mssg, 0, sizeof(*mssg)); |
1468 | mssg->type = BRCM_MESSAGE_SPU; |
1469 | mssg->ctx = rctx; /* Will be returned in response */ |
1470 | |
1471 | /* Create rx scatterlist to catch result */ |
1472 | rx_frag_num += rctx->dst_nents; |
1473 | resp_len = chunksize; |
1474 | |
1475 | /* |
1476 | * Always catch ICV in separate buffer. Have to for GCM/CCM because of |
1477 | * padding. Have to for SHA-224 and other truncated SHAs because SPU |
1478 | * sends entire digest back. |
1479 | */ |
1480 | rx_frag_num++; |
1481 | |
1482 | if (((ctx->cipher.mode == CIPHER_MODE_GCM) || |
1483 | (ctx->cipher.mode == CIPHER_MODE_CCM)) && !rctx->is_encrypt) { |
1484 | /* |
1485 | * Input is ciphertxt plus ICV, but ICV not incl |
1486 | * in output. |
1487 | */ |
1488 | resp_len -= ctx->digestsize; |
1489 | if (resp_len == 0) |
1490 | /* no rx frags to catch output data */ |
1491 | rx_frag_num -= rctx->dst_nents; |
1492 | } |
1493 | |
1494 | err = spu_aead_rx_sg_create(mssg, req, rctx, rx_frag_num, |
1495 | assoc_len: aead_parms.assoc_size, |
1496 | ret_iv_len: aead_parms.ret_iv_len, resp_len, digestsize, |
1497 | stat_pad_len); |
1498 | if (err) |
1499 | return err; |
1500 | |
1501 | /* Create tx scatterlist containing SPU request message */ |
1502 | tx_frag_num += rctx->src_nents; |
1503 | tx_frag_num += assoc_nents; |
1504 | if (aead_parms.aad_pad_len) |
1505 | tx_frag_num++; |
1506 | if (aead_parms.iv_len) |
1507 | tx_frag_num++; |
1508 | if (spu->spu_tx_status_len()) |
1509 | tx_frag_num++; |
1510 | err = spu_aead_tx_sg_create(mssg, rctx, tx_frag_num, spu_hdr_len, |
1511 | assoc: rctx->assoc, assoc_len: aead_parms.assoc_size, |
1512 | assoc_nents, aead_iv_len: aead_parms.iv_len, chunksize, |
1513 | aad_pad_len: aead_parms.aad_pad_len, pad_len, incl_icv); |
1514 | if (err) |
1515 | return err; |
1516 | |
1517 | err = mailbox_send_message(mssg, flags: req->base.flags, chan_idx: rctx->chan_idx); |
1518 | if (unlikely(err < 0)) |
1519 | return err; |
1520 | |
1521 | return -EINPROGRESS; |
1522 | } |
1523 | |
1524 | /** |
1525 | * handle_aead_resp() - Process a SPU response message for an AEAD request. |
1526 | * @rctx: Crypto request context |
1527 | */ |
1528 | static void handle_aead_resp(struct iproc_reqctx_s *rctx) |
1529 | { |
1530 | struct spu_hw *spu = &iproc_priv.spu; |
1531 | struct crypto_async_request *areq = rctx->parent; |
1532 | struct aead_request *req = container_of(areq, |
1533 | struct aead_request, base); |
1534 | struct iproc_ctx_s *ctx = rctx->ctx; |
1535 | u32 payload_len; |
1536 | unsigned int icv_offset; |
1537 | u32 result_len; |
1538 | |
1539 | /* See how much data was returned */ |
1540 | payload_len = spu->spu_payload_length(rctx->msg_buf.spu_resp_hdr); |
1541 | flow_log(format: "payload_len %u\n" , payload_len); |
1542 | |
1543 | /* only count payload */ |
1544 | atomic64_add(i: payload_len, v: &iproc_priv.bytes_in); |
1545 | |
1546 | if (req->assoclen) |
1547 | packet_dump(msg: " assoc_data " , var: rctx->msg_buf.a.resp_aad, |
1548 | var_len: req->assoclen); |
1549 | |
1550 | /* |
1551 | * Copy the ICV back to the destination |
1552 | * buffer. In decrypt case, SPU gives us back the digest, but crypto |
1553 | * API doesn't expect ICV in dst buffer. |
1554 | */ |
1555 | result_len = req->cryptlen; |
1556 | if (rctx->is_encrypt) { |
1557 | icv_offset = req->assoclen + rctx->total_sent; |
1558 | packet_dump(msg: " ICV: " , var: rctx->msg_buf.digest, var_len: ctx->digestsize); |
1559 | flow_log(format: "copying ICV to dst sg at offset %u\n" , icv_offset); |
1560 | sg_copy_part_from_buf(dest: req->dst, src: rctx->msg_buf.digest, |
1561 | len: ctx->digestsize, skip: icv_offset); |
1562 | result_len += ctx->digestsize; |
1563 | } |
1564 | |
1565 | packet_log(format: "response data: " ); |
1566 | dump_sg(sg: req->dst, skip: req->assoclen, len: result_len); |
1567 | |
1568 | atomic_inc(v: &iproc_priv.op_counts[SPU_OP_AEAD]); |
1569 | if (ctx->cipher.alg == CIPHER_ALG_AES) { |
1570 | if (ctx->cipher.mode == CIPHER_MODE_CCM) |
1571 | atomic_inc(v: &iproc_priv.aead_cnt[AES_CCM]); |
1572 | else if (ctx->cipher.mode == CIPHER_MODE_GCM) |
1573 | atomic_inc(v: &iproc_priv.aead_cnt[AES_GCM]); |
1574 | else |
1575 | atomic_inc(v: &iproc_priv.aead_cnt[AUTHENC]); |
1576 | } else { |
1577 | atomic_inc(v: &iproc_priv.aead_cnt[AUTHENC]); |
1578 | } |
1579 | } |
1580 | |
1581 | /** |
1582 | * spu_chunk_cleanup() - Do cleanup after processing one chunk of a request |
1583 | * @rctx: request context |
1584 | * |
1585 | * Mailbox scatterlists are allocated for each chunk. So free them after |
1586 | * processing each chunk. |
1587 | */ |
1588 | static void spu_chunk_cleanup(struct iproc_reqctx_s *rctx) |
1589 | { |
1590 | /* mailbox message used to tx request */ |
1591 | struct brcm_message *mssg = &rctx->mb_mssg; |
1592 | |
1593 | kfree(objp: mssg->spu.src); |
1594 | kfree(objp: mssg->spu.dst); |
1595 | memset(mssg, 0, sizeof(struct brcm_message)); |
1596 | } |
1597 | |
1598 | /** |
1599 | * finish_req() - Used to invoke the complete callback from the requester when |
1600 | * a request has been handled asynchronously. |
1601 | * @rctx: Request context |
1602 | * @err: Indicates whether the request was successful or not |
1603 | * |
1604 | * Ensures that cleanup has been done for request |
1605 | */ |
1606 | static void finish_req(struct iproc_reqctx_s *rctx, int err) |
1607 | { |
1608 | struct crypto_async_request *areq = rctx->parent; |
1609 | |
1610 | flow_log(format: "%s() err:%d\n\n" , __func__, err); |
1611 | |
1612 | /* No harm done if already called */ |
1613 | spu_chunk_cleanup(rctx); |
1614 | |
1615 | if (areq) |
1616 | crypto_request_complete(req: areq, err); |
1617 | } |
1618 | |
1619 | /** |
1620 | * spu_rx_callback() - Callback from mailbox framework with a SPU response. |
1621 | * @cl: mailbox client structure for SPU driver |
1622 | * @msg: mailbox message containing SPU response |
1623 | */ |
1624 | static void spu_rx_callback(struct mbox_client *cl, void *msg) |
1625 | { |
1626 | struct spu_hw *spu = &iproc_priv.spu; |
1627 | struct brcm_message *mssg = msg; |
1628 | struct iproc_reqctx_s *rctx; |
1629 | int err; |
1630 | |
1631 | rctx = mssg->ctx; |
1632 | if (unlikely(!rctx)) { |
1633 | /* This is fatal */ |
1634 | pr_err("%s(): no request context" , __func__); |
1635 | err = -EFAULT; |
1636 | goto cb_finish; |
1637 | } |
1638 | |
1639 | /* process the SPU status */ |
1640 | err = spu->spu_status_process(rctx->msg_buf.rx_stat); |
1641 | if (err != 0) { |
1642 | if (err == SPU_INVALID_ICV) |
1643 | atomic_inc(v: &iproc_priv.bad_icv); |
1644 | err = -EBADMSG; |
1645 | goto cb_finish; |
1646 | } |
1647 | |
1648 | /* Process the SPU response message */ |
1649 | switch (rctx->ctx->alg->type) { |
1650 | case CRYPTO_ALG_TYPE_SKCIPHER: |
1651 | handle_skcipher_resp(rctx); |
1652 | break; |
1653 | case CRYPTO_ALG_TYPE_AHASH: |
1654 | handle_ahash_resp(rctx); |
1655 | break; |
1656 | case CRYPTO_ALG_TYPE_AEAD: |
1657 | handle_aead_resp(rctx); |
1658 | break; |
1659 | default: |
1660 | err = -EINVAL; |
1661 | goto cb_finish; |
1662 | } |
1663 | |
1664 | /* |
1665 | * If this response does not complete the request, then send the next |
1666 | * request chunk. |
1667 | */ |
1668 | if (rctx->total_sent < rctx->total_todo) { |
1669 | /* Deallocate anything specific to previous chunk */ |
1670 | spu_chunk_cleanup(rctx); |
1671 | |
1672 | switch (rctx->ctx->alg->type) { |
1673 | case CRYPTO_ALG_TYPE_SKCIPHER: |
1674 | err = handle_skcipher_req(rctx); |
1675 | break; |
1676 | case CRYPTO_ALG_TYPE_AHASH: |
1677 | err = handle_ahash_req(rctx); |
1678 | if (err == -EAGAIN) |
1679 | /* |
1680 | * we saved data in hash carry, but tell crypto |
1681 | * API we successfully completed request. |
1682 | */ |
1683 | err = 0; |
1684 | break; |
1685 | case CRYPTO_ALG_TYPE_AEAD: |
1686 | err = handle_aead_req(rctx); |
1687 | break; |
1688 | default: |
1689 | err = -EINVAL; |
1690 | } |
1691 | |
1692 | if (err == -EINPROGRESS) |
1693 | /* Successfully submitted request for next chunk */ |
1694 | return; |
1695 | } |
1696 | |
1697 | cb_finish: |
1698 | finish_req(rctx, err); |
1699 | } |
1700 | |
1701 | /* ==================== Kernel Cryptographic API ==================== */ |
1702 | |
1703 | /** |
1704 | * skcipher_enqueue() - Handle skcipher encrypt or decrypt request. |
1705 | * @req: Crypto API request |
1706 | * @encrypt: true if encrypting; false if decrypting |
1707 | * |
1708 | * Return: -EINPROGRESS if request accepted and result will be returned |
1709 | * asynchronously |
1710 | * < 0 if an error |
1711 | */ |
1712 | static int skcipher_enqueue(struct skcipher_request *req, bool encrypt) |
1713 | { |
1714 | struct iproc_reqctx_s *rctx = skcipher_request_ctx(req); |
1715 | struct iproc_ctx_s *ctx = |
1716 | crypto_skcipher_ctx(tfm: crypto_skcipher_reqtfm(req)); |
1717 | int err; |
1718 | |
1719 | flow_log(format: "%s() enc:%u\n" , __func__, encrypt); |
1720 | |
1721 | rctx->gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | |
1722 | CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; |
1723 | rctx->parent = &req->base; |
1724 | rctx->is_encrypt = encrypt; |
1725 | rctx->bd_suppress = false; |
1726 | rctx->total_todo = req->cryptlen; |
1727 | rctx->src_sent = 0; |
1728 | rctx->total_sent = 0; |
1729 | rctx->total_received = 0; |
1730 | rctx->ctx = ctx; |
1731 | |
1732 | /* Initialize current position in src and dst scatterlists */ |
1733 | rctx->src_sg = req->src; |
1734 | rctx->src_nents = 0; |
1735 | rctx->src_skip = 0; |
1736 | rctx->dst_sg = req->dst; |
1737 | rctx->dst_nents = 0; |
1738 | rctx->dst_skip = 0; |
1739 | |
1740 | if (ctx->cipher.mode == CIPHER_MODE_CBC || |
1741 | ctx->cipher.mode == CIPHER_MODE_CTR || |
1742 | ctx->cipher.mode == CIPHER_MODE_OFB || |
1743 | ctx->cipher.mode == CIPHER_MODE_XTS || |
1744 | ctx->cipher.mode == CIPHER_MODE_GCM || |
1745 | ctx->cipher.mode == CIPHER_MODE_CCM) { |
1746 | rctx->iv_ctr_len = |
1747 | crypto_skcipher_ivsize(tfm: crypto_skcipher_reqtfm(req)); |
1748 | memcpy(rctx->msg_buf.iv_ctr, req->iv, rctx->iv_ctr_len); |
1749 | } else { |
1750 | rctx->iv_ctr_len = 0; |
1751 | } |
1752 | |
1753 | /* Choose a SPU to process this request */ |
1754 | rctx->chan_idx = select_channel(); |
1755 | err = handle_skcipher_req(rctx); |
1756 | if (err != -EINPROGRESS) |
1757 | /* synchronous result */ |
1758 | spu_chunk_cleanup(rctx); |
1759 | |
1760 | return err; |
1761 | } |
1762 | |
1763 | static int des_setkey(struct crypto_skcipher *cipher, const u8 *key, |
1764 | unsigned int keylen) |
1765 | { |
1766 | struct iproc_ctx_s *ctx = crypto_skcipher_ctx(tfm: cipher); |
1767 | int err; |
1768 | |
1769 | err = verify_skcipher_des_key(tfm: cipher, key); |
1770 | if (err) |
1771 | return err; |
1772 | |
1773 | ctx->cipher_type = CIPHER_TYPE_DES; |
1774 | return 0; |
1775 | } |
1776 | |
1777 | static int threedes_setkey(struct crypto_skcipher *cipher, const u8 *key, |
1778 | unsigned int keylen) |
1779 | { |
1780 | struct iproc_ctx_s *ctx = crypto_skcipher_ctx(tfm: cipher); |
1781 | int err; |
1782 | |
1783 | err = verify_skcipher_des3_key(tfm: cipher, key); |
1784 | if (err) |
1785 | return err; |
1786 | |
1787 | ctx->cipher_type = CIPHER_TYPE_3DES; |
1788 | return 0; |
1789 | } |
1790 | |
1791 | static int aes_setkey(struct crypto_skcipher *cipher, const u8 *key, |
1792 | unsigned int keylen) |
1793 | { |
1794 | struct iproc_ctx_s *ctx = crypto_skcipher_ctx(tfm: cipher); |
1795 | |
1796 | if (ctx->cipher.mode == CIPHER_MODE_XTS) |
1797 | /* XTS includes two keys of equal length */ |
1798 | keylen = keylen / 2; |
1799 | |
1800 | switch (keylen) { |
1801 | case AES_KEYSIZE_128: |
1802 | ctx->cipher_type = CIPHER_TYPE_AES128; |
1803 | break; |
1804 | case AES_KEYSIZE_192: |
1805 | ctx->cipher_type = CIPHER_TYPE_AES192; |
1806 | break; |
1807 | case AES_KEYSIZE_256: |
1808 | ctx->cipher_type = CIPHER_TYPE_AES256; |
1809 | break; |
1810 | default: |
1811 | return -EINVAL; |
1812 | } |
1813 | WARN_ON((ctx->max_payload != SPU_MAX_PAYLOAD_INF) && |
1814 | ((ctx->max_payload % AES_BLOCK_SIZE) != 0)); |
1815 | return 0; |
1816 | } |
1817 | |
1818 | static int skcipher_setkey(struct crypto_skcipher *cipher, const u8 *key, |
1819 | unsigned int keylen) |
1820 | { |
1821 | struct spu_hw *spu = &iproc_priv.spu; |
1822 | struct iproc_ctx_s *ctx = crypto_skcipher_ctx(tfm: cipher); |
1823 | struct spu_cipher_parms cipher_parms; |
1824 | u32 alloc_len = 0; |
1825 | int err; |
1826 | |
1827 | flow_log(format: "skcipher_setkey() keylen: %d\n" , keylen); |
1828 | flow_dump(msg: " key: " , var: key, var_len: keylen); |
1829 | |
1830 | switch (ctx->cipher.alg) { |
1831 | case CIPHER_ALG_DES: |
1832 | err = des_setkey(cipher, key, keylen); |
1833 | break; |
1834 | case CIPHER_ALG_3DES: |
1835 | err = threedes_setkey(cipher, key, keylen); |
1836 | break; |
1837 | case CIPHER_ALG_AES: |
1838 | err = aes_setkey(cipher, key, keylen); |
1839 | break; |
1840 | default: |
1841 | pr_err("%s() Error: unknown cipher alg\n" , __func__); |
1842 | err = -EINVAL; |
1843 | } |
1844 | if (err) |
1845 | return err; |
1846 | |
1847 | memcpy(ctx->enckey, key, keylen); |
1848 | ctx->enckeylen = keylen; |
1849 | |
1850 | /* SPU needs XTS keys in the reverse order the crypto API presents */ |
1851 | if ((ctx->cipher.alg == CIPHER_ALG_AES) && |
1852 | (ctx->cipher.mode == CIPHER_MODE_XTS)) { |
1853 | unsigned int xts_keylen = keylen / 2; |
1854 | |
1855 | memcpy(ctx->enckey, key + xts_keylen, xts_keylen); |
1856 | memcpy(ctx->enckey + xts_keylen, key, xts_keylen); |
1857 | } |
1858 | |
1859 | if (spu->spu_type == SPU_TYPE_SPUM) |
1860 | alloc_len = BCM_HDR_LEN + SPU_HEADER_ALLOC_LEN; |
1861 | else if (spu->spu_type == SPU_TYPE_SPU2) |
1862 | alloc_len = BCM_HDR_LEN + SPU2_HEADER_ALLOC_LEN; |
1863 | memset(ctx->bcm_spu_req_hdr, 0, alloc_len); |
1864 | cipher_parms.iv_buf = NULL; |
1865 | cipher_parms.iv_len = crypto_skcipher_ivsize(tfm: cipher); |
1866 | flow_log(format: "%s: iv_len %u\n" , __func__, cipher_parms.iv_len); |
1867 | |
1868 | cipher_parms.alg = ctx->cipher.alg; |
1869 | cipher_parms.mode = ctx->cipher.mode; |
1870 | cipher_parms.type = ctx->cipher_type; |
1871 | cipher_parms.key_buf = ctx->enckey; |
1872 | cipher_parms.key_len = ctx->enckeylen; |
1873 | |
1874 | /* Prepend SPU request message with BCM header */ |
1875 | memcpy(ctx->bcm_spu_req_hdr, BCMHEADER, BCM_HDR_LEN); |
1876 | ctx->spu_req_hdr_len = |
1877 | spu->spu_cipher_req_init(ctx->bcm_spu_req_hdr + BCM_HDR_LEN, |
1878 | &cipher_parms); |
1879 | |
1880 | ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen, |
1881 | ctx->enckeylen, |
1882 | false); |
1883 | |
1884 | atomic_inc(v: &iproc_priv.setkey_cnt[SPU_OP_CIPHER]); |
1885 | |
1886 | return 0; |
1887 | } |
1888 | |
1889 | static int skcipher_encrypt(struct skcipher_request *req) |
1890 | { |
1891 | flow_log(format: "skcipher_encrypt() nbytes:%u\n" , req->cryptlen); |
1892 | |
1893 | return skcipher_enqueue(req, encrypt: true); |
1894 | } |
1895 | |
1896 | static int skcipher_decrypt(struct skcipher_request *req) |
1897 | { |
1898 | flow_log(format: "skcipher_decrypt() nbytes:%u\n" , req->cryptlen); |
1899 | return skcipher_enqueue(req, encrypt: false); |
1900 | } |
1901 | |
1902 | static int ahash_enqueue(struct ahash_request *req) |
1903 | { |
1904 | struct iproc_reqctx_s *rctx = ahash_request_ctx(req); |
1905 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
1906 | struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm); |
1907 | int err; |
1908 | const char *alg_name; |
1909 | |
1910 | flow_log(format: "ahash_enqueue() nbytes:%u\n" , req->nbytes); |
1911 | |
1912 | rctx->gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | |
1913 | CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; |
1914 | rctx->parent = &req->base; |
1915 | rctx->ctx = ctx; |
1916 | rctx->bd_suppress = true; |
1917 | memset(&rctx->mb_mssg, 0, sizeof(struct brcm_message)); |
1918 | |
1919 | /* Initialize position in src scatterlist */ |
1920 | rctx->src_sg = req->src; |
1921 | rctx->src_skip = 0; |
1922 | rctx->src_nents = 0; |
1923 | rctx->dst_sg = NULL; |
1924 | rctx->dst_skip = 0; |
1925 | rctx->dst_nents = 0; |
1926 | |
1927 | /* SPU2 hardware does not compute hash of zero length data */ |
1928 | if ((rctx->is_final == 1) && (rctx->total_todo == 0) && |
1929 | (iproc_priv.spu.spu_type == SPU_TYPE_SPU2)) { |
1930 | alg_name = crypto_ahash_alg_name(tfm); |
1931 | flow_log(format: "Doing %sfinal %s zero-len hash request in software\n" , |
1932 | rctx->is_final ? "" : "non-" , alg_name); |
1933 | err = do_shash(name: (unsigned char *)alg_name, result: req->result, |
1934 | NULL, data1_len: 0, NULL, data2_len: 0, key: ctx->authkey, |
1935 | key_len: ctx->authkeylen); |
1936 | if (err < 0) |
1937 | flow_log(format: "Hash request failed with error %d\n" , err); |
1938 | return err; |
1939 | } |
1940 | /* Choose a SPU to process this request */ |
1941 | rctx->chan_idx = select_channel(); |
1942 | |
1943 | err = handle_ahash_req(rctx); |
1944 | if (err != -EINPROGRESS) |
1945 | /* synchronous result */ |
1946 | spu_chunk_cleanup(rctx); |
1947 | |
1948 | if (err == -EAGAIN) |
1949 | /* |
1950 | * we saved data in hash carry, but tell crypto API |
1951 | * we successfully completed request. |
1952 | */ |
1953 | err = 0; |
1954 | |
1955 | return err; |
1956 | } |
1957 | |
1958 | static int __ahash_init(struct ahash_request *req) |
1959 | { |
1960 | struct spu_hw *spu = &iproc_priv.spu; |
1961 | struct iproc_reqctx_s *rctx = ahash_request_ctx(req); |
1962 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
1963 | struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm); |
1964 | |
1965 | flow_log(format: "%s()\n" , __func__); |
1966 | |
1967 | /* Initialize the context */ |
1968 | rctx->hash_carry_len = 0; |
1969 | rctx->is_final = 0; |
1970 | |
1971 | rctx->total_todo = 0; |
1972 | rctx->src_sent = 0; |
1973 | rctx->total_sent = 0; |
1974 | rctx->total_received = 0; |
1975 | |
1976 | ctx->digestsize = crypto_ahash_digestsize(tfm); |
1977 | /* If we add a hash whose digest is larger, catch it here. */ |
1978 | WARN_ON(ctx->digestsize > MAX_DIGEST_SIZE); |
1979 | |
1980 | rctx->is_sw_hmac = false; |
1981 | |
1982 | ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen, 0, |
1983 | true); |
1984 | |
1985 | return 0; |
1986 | } |
1987 | |
1988 | /** |
1989 | * spu_no_incr_hash() - Determine whether incremental hashing is supported. |
1990 | * @ctx: Crypto session context |
1991 | * |
1992 | * SPU-2 does not support incremental hashing (we'll have to revisit and |
1993 | * condition based on chip revision or device tree entry if future versions do |
1994 | * support incremental hash) |
1995 | * |
1996 | * SPU-M also doesn't support incremental hashing of AES-XCBC |
1997 | * |
1998 | * Return: true if incremental hashing is not supported |
1999 | * false otherwise |
2000 | */ |
2001 | static bool spu_no_incr_hash(struct iproc_ctx_s *ctx) |
2002 | { |
2003 | struct spu_hw *spu = &iproc_priv.spu; |
2004 | |
2005 | if (spu->spu_type == SPU_TYPE_SPU2) |
2006 | return true; |
2007 | |
2008 | if ((ctx->auth.alg == HASH_ALG_AES) && |
2009 | (ctx->auth.mode == HASH_MODE_XCBC)) |
2010 | return true; |
2011 | |
2012 | /* Otherwise, incremental hashing is supported */ |
2013 | return false; |
2014 | } |
2015 | |
2016 | static int ahash_init(struct ahash_request *req) |
2017 | { |
2018 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
2019 | struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm); |
2020 | const char *alg_name; |
2021 | struct crypto_shash *hash; |
2022 | int ret; |
2023 | gfp_t gfp; |
2024 | |
2025 | if (spu_no_incr_hash(ctx)) { |
2026 | /* |
2027 | * If we get an incremental hashing request and it's not |
2028 | * supported by the hardware, we need to handle it in software |
2029 | * by calling synchronous hash functions. |
2030 | */ |
2031 | alg_name = crypto_ahash_alg_name(tfm); |
2032 | hash = crypto_alloc_shash(alg_name, type: 0, mask: 0); |
2033 | if (IS_ERR(ptr: hash)) { |
2034 | ret = PTR_ERR(ptr: hash); |
2035 | goto err; |
2036 | } |
2037 | |
2038 | gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | |
2039 | CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; |
2040 | ctx->shash = kmalloc(size: sizeof(*ctx->shash) + |
2041 | crypto_shash_descsize(tfm: hash), flags: gfp); |
2042 | if (!ctx->shash) { |
2043 | ret = -ENOMEM; |
2044 | goto err_hash; |
2045 | } |
2046 | ctx->shash->tfm = hash; |
2047 | |
2048 | /* Set the key using data we already have from setkey */ |
2049 | if (ctx->authkeylen > 0) { |
2050 | ret = crypto_shash_setkey(tfm: hash, key: ctx->authkey, |
2051 | keylen: ctx->authkeylen); |
2052 | if (ret) |
2053 | goto err_shash; |
2054 | } |
2055 | |
2056 | /* Initialize hash w/ this key and other params */ |
2057 | ret = crypto_shash_init(desc: ctx->shash); |
2058 | if (ret) |
2059 | goto err_shash; |
2060 | } else { |
2061 | /* Otherwise call the internal function which uses SPU hw */ |
2062 | ret = __ahash_init(req); |
2063 | } |
2064 | |
2065 | return ret; |
2066 | |
2067 | err_shash: |
2068 | kfree(objp: ctx->shash); |
2069 | err_hash: |
2070 | crypto_free_shash(tfm: hash); |
2071 | err: |
2072 | return ret; |
2073 | } |
2074 | |
2075 | static int __ahash_update(struct ahash_request *req) |
2076 | { |
2077 | struct iproc_reqctx_s *rctx = ahash_request_ctx(req); |
2078 | |
2079 | flow_log(format: "ahash_update() nbytes:%u\n" , req->nbytes); |
2080 | |
2081 | if (!req->nbytes) |
2082 | return 0; |
2083 | rctx->total_todo += req->nbytes; |
2084 | rctx->src_sent = 0; |
2085 | |
2086 | return ahash_enqueue(req); |
2087 | } |
2088 | |
2089 | static int ahash_update(struct ahash_request *req) |
2090 | { |
2091 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
2092 | struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm); |
2093 | u8 *tmpbuf; |
2094 | int ret; |
2095 | int nents; |
2096 | gfp_t gfp; |
2097 | |
2098 | if (spu_no_incr_hash(ctx)) { |
2099 | /* |
2100 | * If we get an incremental hashing request and it's not |
2101 | * supported by the hardware, we need to handle it in software |
2102 | * by calling synchronous hash functions. |
2103 | */ |
2104 | if (req->src) |
2105 | nents = sg_nents(sg: req->src); |
2106 | else |
2107 | return -EINVAL; |
2108 | |
2109 | /* Copy data from req scatterlist to tmp buffer */ |
2110 | gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | |
2111 | CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; |
2112 | tmpbuf = kmalloc(size: req->nbytes, flags: gfp); |
2113 | if (!tmpbuf) |
2114 | return -ENOMEM; |
2115 | |
2116 | if (sg_copy_to_buffer(sgl: req->src, nents, buf: tmpbuf, buflen: req->nbytes) != |
2117 | req->nbytes) { |
2118 | kfree(objp: tmpbuf); |
2119 | return -EINVAL; |
2120 | } |
2121 | |
2122 | /* Call synchronous update */ |
2123 | ret = crypto_shash_update(desc: ctx->shash, data: tmpbuf, len: req->nbytes); |
2124 | kfree(objp: tmpbuf); |
2125 | } else { |
2126 | /* Otherwise call the internal function which uses SPU hw */ |
2127 | ret = __ahash_update(req); |
2128 | } |
2129 | |
2130 | return ret; |
2131 | } |
2132 | |
2133 | static int __ahash_final(struct ahash_request *req) |
2134 | { |
2135 | struct iproc_reqctx_s *rctx = ahash_request_ctx(req); |
2136 | |
2137 | flow_log(format: "ahash_final() nbytes:%u\n" , req->nbytes); |
2138 | |
2139 | rctx->is_final = 1; |
2140 | |
2141 | return ahash_enqueue(req); |
2142 | } |
2143 | |
2144 | static int ahash_final(struct ahash_request *req) |
2145 | { |
2146 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
2147 | struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm); |
2148 | int ret; |
2149 | |
2150 | if (spu_no_incr_hash(ctx)) { |
2151 | /* |
2152 | * If we get an incremental hashing request and it's not |
2153 | * supported by the hardware, we need to handle it in software |
2154 | * by calling synchronous hash functions. |
2155 | */ |
2156 | ret = crypto_shash_final(desc: ctx->shash, out: req->result); |
2157 | |
2158 | /* Done with hash, can deallocate it now */ |
2159 | crypto_free_shash(tfm: ctx->shash->tfm); |
2160 | kfree(objp: ctx->shash); |
2161 | |
2162 | } else { |
2163 | /* Otherwise call the internal function which uses SPU hw */ |
2164 | ret = __ahash_final(req); |
2165 | } |
2166 | |
2167 | return ret; |
2168 | } |
2169 | |
2170 | static int __ahash_finup(struct ahash_request *req) |
2171 | { |
2172 | struct iproc_reqctx_s *rctx = ahash_request_ctx(req); |
2173 | |
2174 | flow_log(format: "ahash_finup() nbytes:%u\n" , req->nbytes); |
2175 | |
2176 | rctx->total_todo += req->nbytes; |
2177 | rctx->src_sent = 0; |
2178 | rctx->is_final = 1; |
2179 | |
2180 | return ahash_enqueue(req); |
2181 | } |
2182 | |
2183 | static int ahash_finup(struct ahash_request *req) |
2184 | { |
2185 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
2186 | struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm); |
2187 | u8 *tmpbuf; |
2188 | int ret; |
2189 | int nents; |
2190 | gfp_t gfp; |
2191 | |
2192 | if (spu_no_incr_hash(ctx)) { |
2193 | /* |
2194 | * If we get an incremental hashing request and it's not |
2195 | * supported by the hardware, we need to handle it in software |
2196 | * by calling synchronous hash functions. |
2197 | */ |
2198 | if (req->src) { |
2199 | nents = sg_nents(sg: req->src); |
2200 | } else { |
2201 | ret = -EINVAL; |
2202 | goto ahash_finup_exit; |
2203 | } |
2204 | |
2205 | /* Copy data from req scatterlist to tmp buffer */ |
2206 | gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | |
2207 | CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; |
2208 | tmpbuf = kmalloc(size: req->nbytes, flags: gfp); |
2209 | if (!tmpbuf) { |
2210 | ret = -ENOMEM; |
2211 | goto ahash_finup_exit; |
2212 | } |
2213 | |
2214 | if (sg_copy_to_buffer(sgl: req->src, nents, buf: tmpbuf, buflen: req->nbytes) != |
2215 | req->nbytes) { |
2216 | ret = -EINVAL; |
2217 | goto ahash_finup_free; |
2218 | } |
2219 | |
2220 | /* Call synchronous update */ |
2221 | ret = crypto_shash_finup(desc: ctx->shash, data: tmpbuf, len: req->nbytes, |
2222 | out: req->result); |
2223 | } else { |
2224 | /* Otherwise call the internal function which uses SPU hw */ |
2225 | return __ahash_finup(req); |
2226 | } |
2227 | ahash_finup_free: |
2228 | kfree(objp: tmpbuf); |
2229 | |
2230 | ahash_finup_exit: |
2231 | /* Done with hash, can deallocate it now */ |
2232 | crypto_free_shash(tfm: ctx->shash->tfm); |
2233 | kfree(objp: ctx->shash); |
2234 | return ret; |
2235 | } |
2236 | |
2237 | static int ahash_digest(struct ahash_request *req) |
2238 | { |
2239 | int err; |
2240 | |
2241 | flow_log(format: "ahash_digest() nbytes:%u\n" , req->nbytes); |
2242 | |
2243 | /* whole thing at once */ |
2244 | err = __ahash_init(req); |
2245 | if (!err) |
2246 | err = __ahash_finup(req); |
2247 | |
2248 | return err; |
2249 | } |
2250 | |
2251 | static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key, |
2252 | unsigned int keylen) |
2253 | { |
2254 | struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm: ahash); |
2255 | |
2256 | flow_log(format: "%s() ahash:%p key:%p keylen:%u\n" , |
2257 | __func__, ahash, key, keylen); |
2258 | flow_dump(msg: " key: " , var: key, var_len: keylen); |
2259 | |
2260 | if (ctx->auth.alg == HASH_ALG_AES) { |
2261 | switch (keylen) { |
2262 | case AES_KEYSIZE_128: |
2263 | ctx->cipher_type = CIPHER_TYPE_AES128; |
2264 | break; |
2265 | case AES_KEYSIZE_192: |
2266 | ctx->cipher_type = CIPHER_TYPE_AES192; |
2267 | break; |
2268 | case AES_KEYSIZE_256: |
2269 | ctx->cipher_type = CIPHER_TYPE_AES256; |
2270 | break; |
2271 | default: |
2272 | pr_err("%s() Error: Invalid key length\n" , __func__); |
2273 | return -EINVAL; |
2274 | } |
2275 | } else { |
2276 | pr_err("%s() Error: unknown hash alg\n" , __func__); |
2277 | return -EINVAL; |
2278 | } |
2279 | memcpy(ctx->authkey, key, keylen); |
2280 | ctx->authkeylen = keylen; |
2281 | |
2282 | return 0; |
2283 | } |
2284 | |
2285 | static int ahash_export(struct ahash_request *req, void *out) |
2286 | { |
2287 | const struct iproc_reqctx_s *rctx = ahash_request_ctx(req); |
2288 | struct spu_hash_export_s *spu_exp = (struct spu_hash_export_s *)out; |
2289 | |
2290 | spu_exp->total_todo = rctx->total_todo; |
2291 | spu_exp->total_sent = rctx->total_sent; |
2292 | spu_exp->is_sw_hmac = rctx->is_sw_hmac; |
2293 | memcpy(spu_exp->hash_carry, rctx->hash_carry, sizeof(rctx->hash_carry)); |
2294 | spu_exp->hash_carry_len = rctx->hash_carry_len; |
2295 | memcpy(spu_exp->incr_hash, rctx->incr_hash, sizeof(rctx->incr_hash)); |
2296 | |
2297 | return 0; |
2298 | } |
2299 | |
2300 | static int ahash_import(struct ahash_request *req, const void *in) |
2301 | { |
2302 | struct iproc_reqctx_s *rctx = ahash_request_ctx(req); |
2303 | struct spu_hash_export_s *spu_exp = (struct spu_hash_export_s *)in; |
2304 | |
2305 | rctx->total_todo = spu_exp->total_todo; |
2306 | rctx->total_sent = spu_exp->total_sent; |
2307 | rctx->is_sw_hmac = spu_exp->is_sw_hmac; |
2308 | memcpy(rctx->hash_carry, spu_exp->hash_carry, sizeof(rctx->hash_carry)); |
2309 | rctx->hash_carry_len = spu_exp->hash_carry_len; |
2310 | memcpy(rctx->incr_hash, spu_exp->incr_hash, sizeof(rctx->incr_hash)); |
2311 | |
2312 | return 0; |
2313 | } |
2314 | |
2315 | static int ahash_hmac_setkey(struct crypto_ahash *ahash, const u8 *key, |
2316 | unsigned int keylen) |
2317 | { |
2318 | struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm: ahash); |
2319 | unsigned int blocksize = |
2320 | crypto_tfm_alg_blocksize(tfm: crypto_ahash_tfm(tfm: ahash)); |
2321 | unsigned int digestsize = crypto_ahash_digestsize(tfm: ahash); |
2322 | unsigned int index; |
2323 | int rc; |
2324 | |
2325 | flow_log(format: "%s() ahash:%p key:%p keylen:%u blksz:%u digestsz:%u\n" , |
2326 | __func__, ahash, key, keylen, blocksize, digestsize); |
2327 | flow_dump(msg: " key: " , var: key, var_len: keylen); |
2328 | |
2329 | if (keylen > blocksize) { |
2330 | switch (ctx->auth.alg) { |
2331 | case HASH_ALG_MD5: |
2332 | rc = do_shash(name: "md5" , result: ctx->authkey, data1: key, data1_len: keylen, NULL, |
2333 | data2_len: 0, NULL, key_len: 0); |
2334 | break; |
2335 | case HASH_ALG_SHA1: |
2336 | rc = do_shash(name: "sha1" , result: ctx->authkey, data1: key, data1_len: keylen, NULL, |
2337 | data2_len: 0, NULL, key_len: 0); |
2338 | break; |
2339 | case HASH_ALG_SHA224: |
2340 | rc = do_shash(name: "sha224" , result: ctx->authkey, data1: key, data1_len: keylen, NULL, |
2341 | data2_len: 0, NULL, key_len: 0); |
2342 | break; |
2343 | case HASH_ALG_SHA256: |
2344 | rc = do_shash(name: "sha256" , result: ctx->authkey, data1: key, data1_len: keylen, NULL, |
2345 | data2_len: 0, NULL, key_len: 0); |
2346 | break; |
2347 | case HASH_ALG_SHA384: |
2348 | rc = do_shash(name: "sha384" , result: ctx->authkey, data1: key, data1_len: keylen, NULL, |
2349 | data2_len: 0, NULL, key_len: 0); |
2350 | break; |
2351 | case HASH_ALG_SHA512: |
2352 | rc = do_shash(name: "sha512" , result: ctx->authkey, data1: key, data1_len: keylen, NULL, |
2353 | data2_len: 0, NULL, key_len: 0); |
2354 | break; |
2355 | case HASH_ALG_SHA3_224: |
2356 | rc = do_shash(name: "sha3-224" , result: ctx->authkey, data1: key, data1_len: keylen, |
2357 | NULL, data2_len: 0, NULL, key_len: 0); |
2358 | break; |
2359 | case HASH_ALG_SHA3_256: |
2360 | rc = do_shash(name: "sha3-256" , result: ctx->authkey, data1: key, data1_len: keylen, |
2361 | NULL, data2_len: 0, NULL, key_len: 0); |
2362 | break; |
2363 | case HASH_ALG_SHA3_384: |
2364 | rc = do_shash(name: "sha3-384" , result: ctx->authkey, data1: key, data1_len: keylen, |
2365 | NULL, data2_len: 0, NULL, key_len: 0); |
2366 | break; |
2367 | case HASH_ALG_SHA3_512: |
2368 | rc = do_shash(name: "sha3-512" , result: ctx->authkey, data1: key, data1_len: keylen, |
2369 | NULL, data2_len: 0, NULL, key_len: 0); |
2370 | break; |
2371 | default: |
2372 | pr_err("%s() Error: unknown hash alg\n" , __func__); |
2373 | return -EINVAL; |
2374 | } |
2375 | if (rc < 0) { |
2376 | pr_err("%s() Error %d computing shash for %s\n" , |
2377 | __func__, rc, hash_alg_name[ctx->auth.alg]); |
2378 | return rc; |
2379 | } |
2380 | ctx->authkeylen = digestsize; |
2381 | |
2382 | flow_log(format: " keylen > digestsize... hashed\n" ); |
2383 | flow_dump(msg: " newkey: " , var: ctx->authkey, var_len: ctx->authkeylen); |
2384 | } else { |
2385 | memcpy(ctx->authkey, key, keylen); |
2386 | ctx->authkeylen = keylen; |
2387 | } |
2388 | |
2389 | /* |
2390 | * Full HMAC operation in SPUM is not verified, |
2391 | * So keeping the generation of IPAD, OPAD and |
2392 | * outer hashing in software. |
2393 | */ |
2394 | if (iproc_priv.spu.spu_type == SPU_TYPE_SPUM) { |
2395 | memcpy(ctx->ipad, ctx->authkey, ctx->authkeylen); |
2396 | memset(ctx->ipad + ctx->authkeylen, 0, |
2397 | blocksize - ctx->authkeylen); |
2398 | ctx->authkeylen = 0; |
2399 | unsafe_memcpy(ctx->opad, ctx->ipad, blocksize, |
2400 | "fortified memcpy causes -Wrestrict warning" ); |
2401 | |
2402 | for (index = 0; index < blocksize; index++) { |
2403 | ctx->ipad[index] ^= HMAC_IPAD_VALUE; |
2404 | ctx->opad[index] ^= HMAC_OPAD_VALUE; |
2405 | } |
2406 | |
2407 | flow_dump(msg: " ipad: " , var: ctx->ipad, var_len: blocksize); |
2408 | flow_dump(msg: " opad: " , var: ctx->opad, var_len: blocksize); |
2409 | } |
2410 | ctx->digestsize = digestsize; |
2411 | atomic_inc(v: &iproc_priv.setkey_cnt[SPU_OP_HMAC]); |
2412 | |
2413 | return 0; |
2414 | } |
2415 | |
2416 | static int ahash_hmac_init(struct ahash_request *req) |
2417 | { |
2418 | struct iproc_reqctx_s *rctx = ahash_request_ctx(req); |
2419 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
2420 | struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm); |
2421 | unsigned int blocksize = |
2422 | crypto_tfm_alg_blocksize(tfm: crypto_ahash_tfm(tfm)); |
2423 | |
2424 | flow_log(format: "ahash_hmac_init()\n" ); |
2425 | |
2426 | /* init the context as a hash */ |
2427 | ahash_init(req); |
2428 | |
2429 | if (!spu_no_incr_hash(ctx)) { |
2430 | /* SPU-M can do incr hashing but needs sw for outer HMAC */ |
2431 | rctx->is_sw_hmac = true; |
2432 | ctx->auth.mode = HASH_MODE_HASH; |
2433 | /* start with a prepended ipad */ |
2434 | memcpy(rctx->hash_carry, ctx->ipad, blocksize); |
2435 | rctx->hash_carry_len = blocksize; |
2436 | rctx->total_todo += blocksize; |
2437 | } |
2438 | |
2439 | return 0; |
2440 | } |
2441 | |
2442 | static int ahash_hmac_update(struct ahash_request *req) |
2443 | { |
2444 | flow_log(format: "ahash_hmac_update() nbytes:%u\n" , req->nbytes); |
2445 | |
2446 | if (!req->nbytes) |
2447 | return 0; |
2448 | |
2449 | return ahash_update(req); |
2450 | } |
2451 | |
2452 | static int ahash_hmac_final(struct ahash_request *req) |
2453 | { |
2454 | flow_log(format: "ahash_hmac_final() nbytes:%u\n" , req->nbytes); |
2455 | |
2456 | return ahash_final(req); |
2457 | } |
2458 | |
2459 | static int ahash_hmac_finup(struct ahash_request *req) |
2460 | { |
2461 | flow_log(format: "ahash_hmac_finupl() nbytes:%u\n" , req->nbytes); |
2462 | |
2463 | return ahash_finup(req); |
2464 | } |
2465 | |
2466 | static int ahash_hmac_digest(struct ahash_request *req) |
2467 | { |
2468 | struct iproc_reqctx_s *rctx = ahash_request_ctx(req); |
2469 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
2470 | struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm); |
2471 | unsigned int blocksize = |
2472 | crypto_tfm_alg_blocksize(tfm: crypto_ahash_tfm(tfm)); |
2473 | |
2474 | flow_log(format: "ahash_hmac_digest() nbytes:%u\n" , req->nbytes); |
2475 | |
2476 | /* Perform initialization and then call finup */ |
2477 | __ahash_init(req); |
2478 | |
2479 | if (iproc_priv.spu.spu_type == SPU_TYPE_SPU2) { |
2480 | /* |
2481 | * SPU2 supports full HMAC implementation in the |
2482 | * hardware, need not to generate IPAD, OPAD and |
2483 | * outer hash in software. |
2484 | * Only for hash key len > hash block size, SPU2 |
2485 | * expects to perform hashing on the key, shorten |
2486 | * it to digest size and feed it as hash key. |
2487 | */ |
2488 | rctx->is_sw_hmac = false; |
2489 | ctx->auth.mode = HASH_MODE_HMAC; |
2490 | } else { |
2491 | rctx->is_sw_hmac = true; |
2492 | ctx->auth.mode = HASH_MODE_HASH; |
2493 | /* start with a prepended ipad */ |
2494 | memcpy(rctx->hash_carry, ctx->ipad, blocksize); |
2495 | rctx->hash_carry_len = blocksize; |
2496 | rctx->total_todo += blocksize; |
2497 | } |
2498 | |
2499 | return __ahash_finup(req); |
2500 | } |
2501 | |
2502 | /* aead helpers */ |
2503 | |
2504 | static int aead_need_fallback(struct aead_request *req) |
2505 | { |
2506 | struct iproc_reqctx_s *rctx = aead_request_ctx(req); |
2507 | struct spu_hw *spu = &iproc_priv.spu; |
2508 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
2509 | struct iproc_ctx_s *ctx = crypto_aead_ctx(tfm: aead); |
2510 | u32 payload_len; |
2511 | |
2512 | /* |
2513 | * SPU hardware cannot handle the AES-GCM/CCM case where plaintext |
2514 | * and AAD are both 0 bytes long. So use fallback in this case. |
2515 | */ |
2516 | if (((ctx->cipher.mode == CIPHER_MODE_GCM) || |
2517 | (ctx->cipher.mode == CIPHER_MODE_CCM)) && |
2518 | (req->assoclen == 0)) { |
2519 | if ((rctx->is_encrypt && (req->cryptlen == 0)) || |
2520 | (!rctx->is_encrypt && (req->cryptlen == ctx->digestsize))) { |
2521 | flow_log(format: "AES GCM/CCM needs fallback for 0 len req\n" ); |
2522 | return 1; |
2523 | } |
2524 | } |
2525 | |
2526 | /* SPU-M hardware only supports CCM digest size of 8, 12, or 16 bytes */ |
2527 | if ((ctx->cipher.mode == CIPHER_MODE_CCM) && |
2528 | (spu->spu_type == SPU_TYPE_SPUM) && |
2529 | (ctx->digestsize != 8) && (ctx->digestsize != 12) && |
2530 | (ctx->digestsize != 16)) { |
2531 | flow_log(format: "%s() AES CCM needs fallback for digest size %d\n" , |
2532 | __func__, ctx->digestsize); |
2533 | return 1; |
2534 | } |
2535 | |
2536 | /* |
2537 | * SPU-M on NSP has an issue where AES-CCM hash is not correct |
2538 | * when AAD size is 0 |
2539 | */ |
2540 | if ((ctx->cipher.mode == CIPHER_MODE_CCM) && |
2541 | (spu->spu_subtype == SPU_SUBTYPE_SPUM_NSP) && |
2542 | (req->assoclen == 0)) { |
2543 | flow_log(format: "%s() AES_CCM needs fallback for 0 len AAD on NSP\n" , |
2544 | __func__); |
2545 | return 1; |
2546 | } |
2547 | |
2548 | /* |
2549 | * RFC4106 and RFC4543 cannot handle the case where AAD is other than |
2550 | * 16 or 20 bytes long. So use fallback in this case. |
2551 | */ |
2552 | if (ctx->cipher.mode == CIPHER_MODE_GCM && |
2553 | ctx->cipher.alg == CIPHER_ALG_AES && |
2554 | rctx->iv_ctr_len == GCM_RFC4106_IV_SIZE && |
2555 | req->assoclen != 16 && req->assoclen != 20) { |
2556 | flow_log(format: "RFC4106/RFC4543 needs fallback for assoclen" |
2557 | " other than 16 or 20 bytes\n" ); |
2558 | return 1; |
2559 | } |
2560 | |
2561 | payload_len = req->cryptlen; |
2562 | if (spu->spu_type == SPU_TYPE_SPUM) |
2563 | payload_len += req->assoclen; |
2564 | |
2565 | flow_log(format: "%s() payload len: %u\n" , __func__, payload_len); |
2566 | |
2567 | if (ctx->max_payload == SPU_MAX_PAYLOAD_INF) |
2568 | return 0; |
2569 | else |
2570 | return payload_len > ctx->max_payload; |
2571 | } |
2572 | |
2573 | static int aead_do_fallback(struct aead_request *req, bool is_encrypt) |
2574 | { |
2575 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
2576 | struct crypto_tfm *tfm = crypto_aead_tfm(tfm: aead); |
2577 | struct iproc_reqctx_s *rctx = aead_request_ctx(req); |
2578 | struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm); |
2579 | struct aead_request *subreq; |
2580 | |
2581 | flow_log(format: "%s() enc:%u\n" , __func__, is_encrypt); |
2582 | |
2583 | if (!ctx->fallback_cipher) |
2584 | return -EINVAL; |
2585 | |
2586 | subreq = &rctx->req; |
2587 | aead_request_set_tfm(req: subreq, tfm: ctx->fallback_cipher); |
2588 | aead_request_set_callback(req: subreq, flags: aead_request_flags(req), |
2589 | compl: req->base.complete, data: req->base.data); |
2590 | aead_request_set_crypt(req: subreq, src: req->src, dst: req->dst, cryptlen: req->cryptlen, |
2591 | iv: req->iv); |
2592 | aead_request_set_ad(req: subreq, assoclen: req->assoclen); |
2593 | |
2594 | return is_encrypt ? crypto_aead_encrypt(req) : |
2595 | crypto_aead_decrypt(req); |
2596 | } |
2597 | |
2598 | static int aead_enqueue(struct aead_request *req, bool is_encrypt) |
2599 | { |
2600 | struct iproc_reqctx_s *rctx = aead_request_ctx(req); |
2601 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
2602 | struct iproc_ctx_s *ctx = crypto_aead_ctx(tfm: aead); |
2603 | int err; |
2604 | |
2605 | flow_log(format: "%s() enc:%u\n" , __func__, is_encrypt); |
2606 | |
2607 | if (req->assoclen > MAX_ASSOC_SIZE) { |
2608 | pr_err |
2609 | ("%s() Error: associated data too long. (%u > %u bytes)\n" , |
2610 | __func__, req->assoclen, MAX_ASSOC_SIZE); |
2611 | return -EINVAL; |
2612 | } |
2613 | |
2614 | rctx->gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | |
2615 | CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; |
2616 | rctx->parent = &req->base; |
2617 | rctx->is_encrypt = is_encrypt; |
2618 | rctx->bd_suppress = false; |
2619 | rctx->total_todo = req->cryptlen; |
2620 | rctx->src_sent = 0; |
2621 | rctx->total_sent = 0; |
2622 | rctx->total_received = 0; |
2623 | rctx->is_sw_hmac = false; |
2624 | rctx->ctx = ctx; |
2625 | memset(&rctx->mb_mssg, 0, sizeof(struct brcm_message)); |
2626 | |
2627 | /* assoc data is at start of src sg */ |
2628 | rctx->assoc = req->src; |
2629 | |
2630 | /* |
2631 | * Init current position in src scatterlist to be after assoc data. |
2632 | * src_skip set to buffer offset where data begins. (Assoc data could |
2633 | * end in the middle of a buffer.) |
2634 | */ |
2635 | if (spu_sg_at_offset(sg: req->src, skip: req->assoclen, sge: &rctx->src_sg, |
2636 | sge_offset: &rctx->src_skip) < 0) { |
2637 | pr_err("%s() Error: Unable to find start of src data\n" , |
2638 | __func__); |
2639 | return -EINVAL; |
2640 | } |
2641 | |
2642 | rctx->src_nents = 0; |
2643 | rctx->dst_nents = 0; |
2644 | if (req->dst == req->src) { |
2645 | rctx->dst_sg = rctx->src_sg; |
2646 | rctx->dst_skip = rctx->src_skip; |
2647 | } else { |
2648 | /* |
2649 | * Expect req->dst to have room for assoc data followed by |
2650 | * output data and ICV, if encrypt. So initialize dst_sg |
2651 | * to point beyond assoc len offset. |
2652 | */ |
2653 | if (spu_sg_at_offset(sg: req->dst, skip: req->assoclen, sge: &rctx->dst_sg, |
2654 | sge_offset: &rctx->dst_skip) < 0) { |
2655 | pr_err("%s() Error: Unable to find start of dst data\n" , |
2656 | __func__); |
2657 | return -EINVAL; |
2658 | } |
2659 | } |
2660 | |
2661 | if (ctx->cipher.mode == CIPHER_MODE_CBC || |
2662 | ctx->cipher.mode == CIPHER_MODE_CTR || |
2663 | ctx->cipher.mode == CIPHER_MODE_OFB || |
2664 | ctx->cipher.mode == CIPHER_MODE_XTS || |
2665 | ctx->cipher.mode == CIPHER_MODE_GCM) { |
2666 | rctx->iv_ctr_len = |
2667 | ctx->salt_len + |
2668 | crypto_aead_ivsize(tfm: crypto_aead_reqtfm(req)); |
2669 | } else if (ctx->cipher.mode == CIPHER_MODE_CCM) { |
2670 | rctx->iv_ctr_len = CCM_AES_IV_SIZE; |
2671 | } else { |
2672 | rctx->iv_ctr_len = 0; |
2673 | } |
2674 | |
2675 | rctx->hash_carry_len = 0; |
2676 | |
2677 | flow_log(format: " src sg: %p\n" , req->src); |
2678 | flow_log(format: " rctx->src_sg: %p, src_skip %u\n" , |
2679 | rctx->src_sg, rctx->src_skip); |
2680 | flow_log(format: " assoc: %p, assoclen %u\n" , rctx->assoc, req->assoclen); |
2681 | flow_log(format: " dst sg: %p\n" , req->dst); |
2682 | flow_log(format: " rctx->dst_sg: %p, dst_skip %u\n" , |
2683 | rctx->dst_sg, rctx->dst_skip); |
2684 | flow_log(format: " iv_ctr_len:%u\n" , rctx->iv_ctr_len); |
2685 | flow_dump(msg: " iv: " , var: req->iv, var_len: rctx->iv_ctr_len); |
2686 | flow_log(format: " authkeylen:%u\n" , ctx->authkeylen); |
2687 | flow_log(format: " is_esp: %s\n" , ctx->is_esp ? "yes" : "no" ); |
2688 | |
2689 | if (ctx->max_payload == SPU_MAX_PAYLOAD_INF) |
2690 | flow_log(format: " max_payload infinite" ); |
2691 | else |
2692 | flow_log(format: " max_payload: %u\n" , ctx->max_payload); |
2693 | |
2694 | if (unlikely(aead_need_fallback(req))) |
2695 | return aead_do_fallback(req, is_encrypt); |
2696 | |
2697 | /* |
2698 | * Do memory allocations for request after fallback check, because if we |
2699 | * do fallback, we won't call finish_req() to dealloc. |
2700 | */ |
2701 | if (rctx->iv_ctr_len) { |
2702 | if (ctx->salt_len) |
2703 | memcpy(rctx->msg_buf.iv_ctr + ctx->salt_offset, |
2704 | ctx->salt, ctx->salt_len); |
2705 | memcpy(rctx->msg_buf.iv_ctr + ctx->salt_offset + ctx->salt_len, |
2706 | req->iv, |
2707 | rctx->iv_ctr_len - ctx->salt_len - ctx->salt_offset); |
2708 | } |
2709 | |
2710 | rctx->chan_idx = select_channel(); |
2711 | err = handle_aead_req(rctx); |
2712 | if (err != -EINPROGRESS) |
2713 | /* synchronous result */ |
2714 | spu_chunk_cleanup(rctx); |
2715 | |
2716 | return err; |
2717 | } |
2718 | |
2719 | static int aead_authenc_setkey(struct crypto_aead *cipher, |
2720 | const u8 *key, unsigned int keylen) |
2721 | { |
2722 | struct spu_hw *spu = &iproc_priv.spu; |
2723 | struct iproc_ctx_s *ctx = crypto_aead_ctx(tfm: cipher); |
2724 | struct crypto_tfm *tfm = crypto_aead_tfm(tfm: cipher); |
2725 | struct crypto_authenc_keys keys; |
2726 | int ret; |
2727 | |
2728 | flow_log(format: "%s() aead:%p key:%p keylen:%u\n" , __func__, cipher, key, |
2729 | keylen); |
2730 | flow_dump(msg: " key: " , var: key, var_len: keylen); |
2731 | |
2732 | ret = crypto_authenc_extractkeys(keys: &keys, key, keylen); |
2733 | if (ret) |
2734 | goto badkey; |
2735 | |
2736 | if (keys.enckeylen > MAX_KEY_SIZE || |
2737 | keys.authkeylen > MAX_KEY_SIZE) |
2738 | goto badkey; |
2739 | |
2740 | ctx->enckeylen = keys.enckeylen; |
2741 | ctx->authkeylen = keys.authkeylen; |
2742 | |
2743 | memcpy(ctx->enckey, keys.enckey, keys.enckeylen); |
2744 | /* May end up padding auth key. So make sure it's zeroed. */ |
2745 | memset(ctx->authkey, 0, sizeof(ctx->authkey)); |
2746 | memcpy(ctx->authkey, keys.authkey, keys.authkeylen); |
2747 | |
2748 | switch (ctx->alg->cipher_info.alg) { |
2749 | case CIPHER_ALG_DES: |
2750 | if (verify_aead_des_key(tfm: cipher, key: keys.enckey, keylen: keys.enckeylen)) |
2751 | return -EINVAL; |
2752 | |
2753 | ctx->cipher_type = CIPHER_TYPE_DES; |
2754 | break; |
2755 | case CIPHER_ALG_3DES: |
2756 | if (verify_aead_des3_key(tfm: cipher, key: keys.enckey, keylen: keys.enckeylen)) |
2757 | return -EINVAL; |
2758 | |
2759 | ctx->cipher_type = CIPHER_TYPE_3DES; |
2760 | break; |
2761 | case CIPHER_ALG_AES: |
2762 | switch (ctx->enckeylen) { |
2763 | case AES_KEYSIZE_128: |
2764 | ctx->cipher_type = CIPHER_TYPE_AES128; |
2765 | break; |
2766 | case AES_KEYSIZE_192: |
2767 | ctx->cipher_type = CIPHER_TYPE_AES192; |
2768 | break; |
2769 | case AES_KEYSIZE_256: |
2770 | ctx->cipher_type = CIPHER_TYPE_AES256; |
2771 | break; |
2772 | default: |
2773 | goto badkey; |
2774 | } |
2775 | break; |
2776 | default: |
2777 | pr_err("%s() Error: Unknown cipher alg\n" , __func__); |
2778 | return -EINVAL; |
2779 | } |
2780 | |
2781 | flow_log(format: " enckeylen:%u authkeylen:%u\n" , ctx->enckeylen, |
2782 | ctx->authkeylen); |
2783 | flow_dump(msg: " enc: " , var: ctx->enckey, var_len: ctx->enckeylen); |
2784 | flow_dump(msg: " auth: " , var: ctx->authkey, var_len: ctx->authkeylen); |
2785 | |
2786 | /* setkey the fallback just in case we needto use it */ |
2787 | if (ctx->fallback_cipher) { |
2788 | flow_log(format: " running fallback setkey()\n" ); |
2789 | |
2790 | ctx->fallback_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; |
2791 | ctx->fallback_cipher->base.crt_flags |= |
2792 | tfm->crt_flags & CRYPTO_TFM_REQ_MASK; |
2793 | ret = crypto_aead_setkey(tfm: ctx->fallback_cipher, key, keylen); |
2794 | if (ret) |
2795 | flow_log(format: " fallback setkey() returned:%d\n" , ret); |
2796 | } |
2797 | |
2798 | ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen, |
2799 | ctx->enckeylen, |
2800 | false); |
2801 | |
2802 | atomic_inc(v: &iproc_priv.setkey_cnt[SPU_OP_AEAD]); |
2803 | |
2804 | return ret; |
2805 | |
2806 | badkey: |
2807 | ctx->enckeylen = 0; |
2808 | ctx->authkeylen = 0; |
2809 | ctx->digestsize = 0; |
2810 | |
2811 | return -EINVAL; |
2812 | } |
2813 | |
2814 | static int aead_gcm_ccm_setkey(struct crypto_aead *cipher, |
2815 | const u8 *key, unsigned int keylen) |
2816 | { |
2817 | struct spu_hw *spu = &iproc_priv.spu; |
2818 | struct iproc_ctx_s *ctx = crypto_aead_ctx(tfm: cipher); |
2819 | struct crypto_tfm *tfm = crypto_aead_tfm(tfm: cipher); |
2820 | |
2821 | int ret = 0; |
2822 | |
2823 | flow_log(format: "%s() keylen:%u\n" , __func__, keylen); |
2824 | flow_dump(msg: " key: " , var: key, var_len: keylen); |
2825 | |
2826 | if (!ctx->is_esp) |
2827 | ctx->digestsize = keylen; |
2828 | |
2829 | ctx->enckeylen = keylen; |
2830 | ctx->authkeylen = 0; |
2831 | |
2832 | switch (ctx->enckeylen) { |
2833 | case AES_KEYSIZE_128: |
2834 | ctx->cipher_type = CIPHER_TYPE_AES128; |
2835 | break; |
2836 | case AES_KEYSIZE_192: |
2837 | ctx->cipher_type = CIPHER_TYPE_AES192; |
2838 | break; |
2839 | case AES_KEYSIZE_256: |
2840 | ctx->cipher_type = CIPHER_TYPE_AES256; |
2841 | break; |
2842 | default: |
2843 | goto badkey; |
2844 | } |
2845 | |
2846 | memcpy(ctx->enckey, key, ctx->enckeylen); |
2847 | |
2848 | flow_log(format: " enckeylen:%u authkeylen:%u\n" , ctx->enckeylen, |
2849 | ctx->authkeylen); |
2850 | flow_dump(msg: " enc: " , var: ctx->enckey, var_len: ctx->enckeylen); |
2851 | flow_dump(msg: " auth: " , var: ctx->authkey, var_len: ctx->authkeylen); |
2852 | |
2853 | /* setkey the fallback just in case we need to use it */ |
2854 | if (ctx->fallback_cipher) { |
2855 | flow_log(format: " running fallback setkey()\n" ); |
2856 | |
2857 | ctx->fallback_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; |
2858 | ctx->fallback_cipher->base.crt_flags |= |
2859 | tfm->crt_flags & CRYPTO_TFM_REQ_MASK; |
2860 | ret = crypto_aead_setkey(tfm: ctx->fallback_cipher, key, |
2861 | keylen: keylen + ctx->salt_len); |
2862 | if (ret) |
2863 | flow_log(format: " fallback setkey() returned:%d\n" , ret); |
2864 | } |
2865 | |
2866 | ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen, |
2867 | ctx->enckeylen, |
2868 | false); |
2869 | |
2870 | atomic_inc(v: &iproc_priv.setkey_cnt[SPU_OP_AEAD]); |
2871 | |
2872 | flow_log(format: " enckeylen:%u authkeylen:%u\n" , ctx->enckeylen, |
2873 | ctx->authkeylen); |
2874 | |
2875 | return ret; |
2876 | |
2877 | badkey: |
2878 | ctx->enckeylen = 0; |
2879 | ctx->authkeylen = 0; |
2880 | ctx->digestsize = 0; |
2881 | |
2882 | return -EINVAL; |
2883 | } |
2884 | |
2885 | /** |
2886 | * aead_gcm_esp_setkey() - setkey() operation for ESP variant of GCM AES. |
2887 | * @cipher: AEAD structure |
2888 | * @key: Key followed by 4 bytes of salt |
2889 | * @keylen: Length of key plus salt, in bytes |
2890 | * |
2891 | * Extracts salt from key and stores it to be prepended to IV on each request. |
2892 | * Digest is always 16 bytes |
2893 | * |
2894 | * Return: Value from generic gcm setkey. |
2895 | */ |
2896 | static int aead_gcm_esp_setkey(struct crypto_aead *cipher, |
2897 | const u8 *key, unsigned int keylen) |
2898 | { |
2899 | struct iproc_ctx_s *ctx = crypto_aead_ctx(tfm: cipher); |
2900 | |
2901 | flow_log(format: "%s\n" , __func__); |
2902 | |
2903 | if (keylen < GCM_ESP_SALT_SIZE) |
2904 | return -EINVAL; |
2905 | |
2906 | ctx->salt_len = GCM_ESP_SALT_SIZE; |
2907 | ctx->salt_offset = GCM_ESP_SALT_OFFSET; |
2908 | memcpy(ctx->salt, key + keylen - GCM_ESP_SALT_SIZE, GCM_ESP_SALT_SIZE); |
2909 | keylen -= GCM_ESP_SALT_SIZE; |
2910 | ctx->digestsize = GCM_ESP_DIGESTSIZE; |
2911 | ctx->is_esp = true; |
2912 | flow_dump(msg: "salt: " , var: ctx->salt, GCM_ESP_SALT_SIZE); |
2913 | |
2914 | return aead_gcm_ccm_setkey(cipher, key, keylen); |
2915 | } |
2916 | |
2917 | /** |
2918 | * rfc4543_gcm_esp_setkey() - setkey operation for RFC4543 variant of GCM/GMAC. |
2919 | * @cipher: AEAD structure |
2920 | * @key: Key followed by 4 bytes of salt |
2921 | * @keylen: Length of key plus salt, in bytes |
2922 | * |
2923 | * Extracts salt from key and stores it to be prepended to IV on each request. |
2924 | * Digest is always 16 bytes |
2925 | * |
2926 | * Return: Value from generic gcm setkey. |
2927 | */ |
2928 | static int rfc4543_gcm_esp_setkey(struct crypto_aead *cipher, |
2929 | const u8 *key, unsigned int keylen) |
2930 | { |
2931 | struct iproc_ctx_s *ctx = crypto_aead_ctx(tfm: cipher); |
2932 | |
2933 | flow_log(format: "%s\n" , __func__); |
2934 | |
2935 | if (keylen < GCM_ESP_SALT_SIZE) |
2936 | return -EINVAL; |
2937 | |
2938 | ctx->salt_len = GCM_ESP_SALT_SIZE; |
2939 | ctx->salt_offset = GCM_ESP_SALT_OFFSET; |
2940 | memcpy(ctx->salt, key + keylen - GCM_ESP_SALT_SIZE, GCM_ESP_SALT_SIZE); |
2941 | keylen -= GCM_ESP_SALT_SIZE; |
2942 | ctx->digestsize = GCM_ESP_DIGESTSIZE; |
2943 | ctx->is_esp = true; |
2944 | ctx->is_rfc4543 = true; |
2945 | flow_dump(msg: "salt: " , var: ctx->salt, GCM_ESP_SALT_SIZE); |
2946 | |
2947 | return aead_gcm_ccm_setkey(cipher, key, keylen); |
2948 | } |
2949 | |
2950 | /** |
2951 | * aead_ccm_esp_setkey() - setkey() operation for ESP variant of CCM AES. |
2952 | * @cipher: AEAD structure |
2953 | * @key: Key followed by 4 bytes of salt |
2954 | * @keylen: Length of key plus salt, in bytes |
2955 | * |
2956 | * Extracts salt from key and stores it to be prepended to IV on each request. |
2957 | * Digest is always 16 bytes |
2958 | * |
2959 | * Return: Value from generic ccm setkey. |
2960 | */ |
2961 | static int aead_ccm_esp_setkey(struct crypto_aead *cipher, |
2962 | const u8 *key, unsigned int keylen) |
2963 | { |
2964 | struct iproc_ctx_s *ctx = crypto_aead_ctx(tfm: cipher); |
2965 | |
2966 | flow_log(format: "%s\n" , __func__); |
2967 | |
2968 | if (keylen < CCM_ESP_SALT_SIZE) |
2969 | return -EINVAL; |
2970 | |
2971 | ctx->salt_len = CCM_ESP_SALT_SIZE; |
2972 | ctx->salt_offset = CCM_ESP_SALT_OFFSET; |
2973 | memcpy(ctx->salt, key + keylen - CCM_ESP_SALT_SIZE, CCM_ESP_SALT_SIZE); |
2974 | keylen -= CCM_ESP_SALT_SIZE; |
2975 | ctx->is_esp = true; |
2976 | flow_dump(msg: "salt: " , var: ctx->salt, CCM_ESP_SALT_SIZE); |
2977 | |
2978 | return aead_gcm_ccm_setkey(cipher, key, keylen); |
2979 | } |
2980 | |
2981 | static int aead_setauthsize(struct crypto_aead *cipher, unsigned int authsize) |
2982 | { |
2983 | struct iproc_ctx_s *ctx = crypto_aead_ctx(tfm: cipher); |
2984 | int ret = 0; |
2985 | |
2986 | flow_log(format: "%s() authkeylen:%u authsize:%u\n" , |
2987 | __func__, ctx->authkeylen, authsize); |
2988 | |
2989 | ctx->digestsize = authsize; |
2990 | |
2991 | /* setkey the fallback just in case we needto use it */ |
2992 | if (ctx->fallback_cipher) { |
2993 | flow_log(format: " running fallback setauth()\n" ); |
2994 | |
2995 | ret = crypto_aead_setauthsize(tfm: ctx->fallback_cipher, authsize); |
2996 | if (ret) |
2997 | flow_log(format: " fallback setauth() returned:%d\n" , ret); |
2998 | } |
2999 | |
3000 | return ret; |
3001 | } |
3002 | |
3003 | static int aead_encrypt(struct aead_request *req) |
3004 | { |
3005 | flow_log(format: "%s() cryptlen:%u %08x\n" , __func__, req->cryptlen, |
3006 | req->cryptlen); |
3007 | dump_sg(sg: req->src, skip: 0, len: req->cryptlen + req->assoclen); |
3008 | flow_log(format: " assoc_len:%u\n" , req->assoclen); |
3009 | |
3010 | return aead_enqueue(req, is_encrypt: true); |
3011 | } |
3012 | |
3013 | static int aead_decrypt(struct aead_request *req) |
3014 | { |
3015 | flow_log(format: "%s() cryptlen:%u\n" , __func__, req->cryptlen); |
3016 | dump_sg(sg: req->src, skip: 0, len: req->cryptlen + req->assoclen); |
3017 | flow_log(format: " assoc_len:%u\n" , req->assoclen); |
3018 | |
3019 | return aead_enqueue(req, is_encrypt: false); |
3020 | } |
3021 | |
3022 | /* ==================== Supported Cipher Algorithms ==================== */ |
3023 | |
3024 | static struct iproc_alg_s driver_algs[] = { |
3025 | { |
3026 | .type = CRYPTO_ALG_TYPE_AEAD, |
3027 | .alg.aead = { |
3028 | .base = { |
3029 | .cra_name = "gcm(aes)" , |
3030 | .cra_driver_name = "gcm-aes-iproc" , |
3031 | .cra_blocksize = AES_BLOCK_SIZE, |
3032 | .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3033 | }, |
3034 | .setkey = aead_gcm_ccm_setkey, |
3035 | .ivsize = GCM_AES_IV_SIZE, |
3036 | .maxauthsize = AES_BLOCK_SIZE, |
3037 | }, |
3038 | .cipher_info = { |
3039 | .alg = CIPHER_ALG_AES, |
3040 | .mode = CIPHER_MODE_GCM, |
3041 | }, |
3042 | .auth_info = { |
3043 | .alg = HASH_ALG_AES, |
3044 | .mode = HASH_MODE_GCM, |
3045 | }, |
3046 | .auth_first = 0, |
3047 | }, |
3048 | { |
3049 | .type = CRYPTO_ALG_TYPE_AEAD, |
3050 | .alg.aead = { |
3051 | .base = { |
3052 | .cra_name = "ccm(aes)" , |
3053 | .cra_driver_name = "ccm-aes-iproc" , |
3054 | .cra_blocksize = AES_BLOCK_SIZE, |
3055 | .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3056 | }, |
3057 | .setkey = aead_gcm_ccm_setkey, |
3058 | .ivsize = CCM_AES_IV_SIZE, |
3059 | .maxauthsize = AES_BLOCK_SIZE, |
3060 | }, |
3061 | .cipher_info = { |
3062 | .alg = CIPHER_ALG_AES, |
3063 | .mode = CIPHER_MODE_CCM, |
3064 | }, |
3065 | .auth_info = { |
3066 | .alg = HASH_ALG_AES, |
3067 | .mode = HASH_MODE_CCM, |
3068 | }, |
3069 | .auth_first = 0, |
3070 | }, |
3071 | { |
3072 | .type = CRYPTO_ALG_TYPE_AEAD, |
3073 | .alg.aead = { |
3074 | .base = { |
3075 | .cra_name = "rfc4106(gcm(aes))" , |
3076 | .cra_driver_name = "gcm-aes-esp-iproc" , |
3077 | .cra_blocksize = AES_BLOCK_SIZE, |
3078 | .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3079 | }, |
3080 | .setkey = aead_gcm_esp_setkey, |
3081 | .ivsize = GCM_RFC4106_IV_SIZE, |
3082 | .maxauthsize = AES_BLOCK_SIZE, |
3083 | }, |
3084 | .cipher_info = { |
3085 | .alg = CIPHER_ALG_AES, |
3086 | .mode = CIPHER_MODE_GCM, |
3087 | }, |
3088 | .auth_info = { |
3089 | .alg = HASH_ALG_AES, |
3090 | .mode = HASH_MODE_GCM, |
3091 | }, |
3092 | .auth_first = 0, |
3093 | }, |
3094 | { |
3095 | .type = CRYPTO_ALG_TYPE_AEAD, |
3096 | .alg.aead = { |
3097 | .base = { |
3098 | .cra_name = "rfc4309(ccm(aes))" , |
3099 | .cra_driver_name = "ccm-aes-esp-iproc" , |
3100 | .cra_blocksize = AES_BLOCK_SIZE, |
3101 | .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3102 | }, |
3103 | .setkey = aead_ccm_esp_setkey, |
3104 | .ivsize = CCM_AES_IV_SIZE, |
3105 | .maxauthsize = AES_BLOCK_SIZE, |
3106 | }, |
3107 | .cipher_info = { |
3108 | .alg = CIPHER_ALG_AES, |
3109 | .mode = CIPHER_MODE_CCM, |
3110 | }, |
3111 | .auth_info = { |
3112 | .alg = HASH_ALG_AES, |
3113 | .mode = HASH_MODE_CCM, |
3114 | }, |
3115 | .auth_first = 0, |
3116 | }, |
3117 | { |
3118 | .type = CRYPTO_ALG_TYPE_AEAD, |
3119 | .alg.aead = { |
3120 | .base = { |
3121 | .cra_name = "rfc4543(gcm(aes))" , |
3122 | .cra_driver_name = "gmac-aes-esp-iproc" , |
3123 | .cra_blocksize = AES_BLOCK_SIZE, |
3124 | .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3125 | }, |
3126 | .setkey = rfc4543_gcm_esp_setkey, |
3127 | .ivsize = GCM_RFC4106_IV_SIZE, |
3128 | .maxauthsize = AES_BLOCK_SIZE, |
3129 | }, |
3130 | .cipher_info = { |
3131 | .alg = CIPHER_ALG_AES, |
3132 | .mode = CIPHER_MODE_GCM, |
3133 | }, |
3134 | .auth_info = { |
3135 | .alg = HASH_ALG_AES, |
3136 | .mode = HASH_MODE_GCM, |
3137 | }, |
3138 | .auth_first = 0, |
3139 | }, |
3140 | { |
3141 | .type = CRYPTO_ALG_TYPE_AEAD, |
3142 | .alg.aead = { |
3143 | .base = { |
3144 | .cra_name = "authenc(hmac(md5),cbc(aes))" , |
3145 | .cra_driver_name = "authenc-hmac-md5-cbc-aes-iproc" , |
3146 | .cra_blocksize = AES_BLOCK_SIZE, |
3147 | .cra_flags = CRYPTO_ALG_NEED_FALLBACK | |
3148 | CRYPTO_ALG_ASYNC | |
3149 | CRYPTO_ALG_ALLOCATES_MEMORY |
3150 | }, |
3151 | .setkey = aead_authenc_setkey, |
3152 | .ivsize = AES_BLOCK_SIZE, |
3153 | .maxauthsize = MD5_DIGEST_SIZE, |
3154 | }, |
3155 | .cipher_info = { |
3156 | .alg = CIPHER_ALG_AES, |
3157 | .mode = CIPHER_MODE_CBC, |
3158 | }, |
3159 | .auth_info = { |
3160 | .alg = HASH_ALG_MD5, |
3161 | .mode = HASH_MODE_HMAC, |
3162 | }, |
3163 | .auth_first = 0, |
3164 | }, |
3165 | { |
3166 | .type = CRYPTO_ALG_TYPE_AEAD, |
3167 | .alg.aead = { |
3168 | .base = { |
3169 | .cra_name = "authenc(hmac(sha1),cbc(aes))" , |
3170 | .cra_driver_name = "authenc-hmac-sha1-cbc-aes-iproc" , |
3171 | .cra_blocksize = AES_BLOCK_SIZE, |
3172 | .cra_flags = CRYPTO_ALG_NEED_FALLBACK | |
3173 | CRYPTO_ALG_ASYNC | |
3174 | CRYPTO_ALG_ALLOCATES_MEMORY |
3175 | }, |
3176 | .setkey = aead_authenc_setkey, |
3177 | .ivsize = AES_BLOCK_SIZE, |
3178 | .maxauthsize = SHA1_DIGEST_SIZE, |
3179 | }, |
3180 | .cipher_info = { |
3181 | .alg = CIPHER_ALG_AES, |
3182 | .mode = CIPHER_MODE_CBC, |
3183 | }, |
3184 | .auth_info = { |
3185 | .alg = HASH_ALG_SHA1, |
3186 | .mode = HASH_MODE_HMAC, |
3187 | }, |
3188 | .auth_first = 0, |
3189 | }, |
3190 | { |
3191 | .type = CRYPTO_ALG_TYPE_AEAD, |
3192 | .alg.aead = { |
3193 | .base = { |
3194 | .cra_name = "authenc(hmac(sha256),cbc(aes))" , |
3195 | .cra_driver_name = "authenc-hmac-sha256-cbc-aes-iproc" , |
3196 | .cra_blocksize = AES_BLOCK_SIZE, |
3197 | .cra_flags = CRYPTO_ALG_NEED_FALLBACK | |
3198 | CRYPTO_ALG_ASYNC | |
3199 | CRYPTO_ALG_ALLOCATES_MEMORY |
3200 | }, |
3201 | .setkey = aead_authenc_setkey, |
3202 | .ivsize = AES_BLOCK_SIZE, |
3203 | .maxauthsize = SHA256_DIGEST_SIZE, |
3204 | }, |
3205 | .cipher_info = { |
3206 | .alg = CIPHER_ALG_AES, |
3207 | .mode = CIPHER_MODE_CBC, |
3208 | }, |
3209 | .auth_info = { |
3210 | .alg = HASH_ALG_SHA256, |
3211 | .mode = HASH_MODE_HMAC, |
3212 | }, |
3213 | .auth_first = 0, |
3214 | }, |
3215 | { |
3216 | .type = CRYPTO_ALG_TYPE_AEAD, |
3217 | .alg.aead = { |
3218 | .base = { |
3219 | .cra_name = "authenc(hmac(md5),cbc(des))" , |
3220 | .cra_driver_name = "authenc-hmac-md5-cbc-des-iproc" , |
3221 | .cra_blocksize = DES_BLOCK_SIZE, |
3222 | .cra_flags = CRYPTO_ALG_NEED_FALLBACK | |
3223 | CRYPTO_ALG_ASYNC | |
3224 | CRYPTO_ALG_ALLOCATES_MEMORY |
3225 | }, |
3226 | .setkey = aead_authenc_setkey, |
3227 | .ivsize = DES_BLOCK_SIZE, |
3228 | .maxauthsize = MD5_DIGEST_SIZE, |
3229 | }, |
3230 | .cipher_info = { |
3231 | .alg = CIPHER_ALG_DES, |
3232 | .mode = CIPHER_MODE_CBC, |
3233 | }, |
3234 | .auth_info = { |
3235 | .alg = HASH_ALG_MD5, |
3236 | .mode = HASH_MODE_HMAC, |
3237 | }, |
3238 | .auth_first = 0, |
3239 | }, |
3240 | { |
3241 | .type = CRYPTO_ALG_TYPE_AEAD, |
3242 | .alg.aead = { |
3243 | .base = { |
3244 | .cra_name = "authenc(hmac(sha1),cbc(des))" , |
3245 | .cra_driver_name = "authenc-hmac-sha1-cbc-des-iproc" , |
3246 | .cra_blocksize = DES_BLOCK_SIZE, |
3247 | .cra_flags = CRYPTO_ALG_NEED_FALLBACK | |
3248 | CRYPTO_ALG_ASYNC | |
3249 | CRYPTO_ALG_ALLOCATES_MEMORY |
3250 | }, |
3251 | .setkey = aead_authenc_setkey, |
3252 | .ivsize = DES_BLOCK_SIZE, |
3253 | .maxauthsize = SHA1_DIGEST_SIZE, |
3254 | }, |
3255 | .cipher_info = { |
3256 | .alg = CIPHER_ALG_DES, |
3257 | .mode = CIPHER_MODE_CBC, |
3258 | }, |
3259 | .auth_info = { |
3260 | .alg = HASH_ALG_SHA1, |
3261 | .mode = HASH_MODE_HMAC, |
3262 | }, |
3263 | .auth_first = 0, |
3264 | }, |
3265 | { |
3266 | .type = CRYPTO_ALG_TYPE_AEAD, |
3267 | .alg.aead = { |
3268 | .base = { |
3269 | .cra_name = "authenc(hmac(sha224),cbc(des))" , |
3270 | .cra_driver_name = "authenc-hmac-sha224-cbc-des-iproc" , |
3271 | .cra_blocksize = DES_BLOCK_SIZE, |
3272 | .cra_flags = CRYPTO_ALG_NEED_FALLBACK | |
3273 | CRYPTO_ALG_ASYNC | |
3274 | CRYPTO_ALG_ALLOCATES_MEMORY |
3275 | }, |
3276 | .setkey = aead_authenc_setkey, |
3277 | .ivsize = DES_BLOCK_SIZE, |
3278 | .maxauthsize = SHA224_DIGEST_SIZE, |
3279 | }, |
3280 | .cipher_info = { |
3281 | .alg = CIPHER_ALG_DES, |
3282 | .mode = CIPHER_MODE_CBC, |
3283 | }, |
3284 | .auth_info = { |
3285 | .alg = HASH_ALG_SHA224, |
3286 | .mode = HASH_MODE_HMAC, |
3287 | }, |
3288 | .auth_first = 0, |
3289 | }, |
3290 | { |
3291 | .type = CRYPTO_ALG_TYPE_AEAD, |
3292 | .alg.aead = { |
3293 | .base = { |
3294 | .cra_name = "authenc(hmac(sha256),cbc(des))" , |
3295 | .cra_driver_name = "authenc-hmac-sha256-cbc-des-iproc" , |
3296 | .cra_blocksize = DES_BLOCK_SIZE, |
3297 | .cra_flags = CRYPTO_ALG_NEED_FALLBACK | |
3298 | CRYPTO_ALG_ASYNC | |
3299 | CRYPTO_ALG_ALLOCATES_MEMORY |
3300 | }, |
3301 | .setkey = aead_authenc_setkey, |
3302 | .ivsize = DES_BLOCK_SIZE, |
3303 | .maxauthsize = SHA256_DIGEST_SIZE, |
3304 | }, |
3305 | .cipher_info = { |
3306 | .alg = CIPHER_ALG_DES, |
3307 | .mode = CIPHER_MODE_CBC, |
3308 | }, |
3309 | .auth_info = { |
3310 | .alg = HASH_ALG_SHA256, |
3311 | .mode = HASH_MODE_HMAC, |
3312 | }, |
3313 | .auth_first = 0, |
3314 | }, |
3315 | { |
3316 | .type = CRYPTO_ALG_TYPE_AEAD, |
3317 | .alg.aead = { |
3318 | .base = { |
3319 | .cra_name = "authenc(hmac(sha384),cbc(des))" , |
3320 | .cra_driver_name = "authenc-hmac-sha384-cbc-des-iproc" , |
3321 | .cra_blocksize = DES_BLOCK_SIZE, |
3322 | .cra_flags = CRYPTO_ALG_NEED_FALLBACK | |
3323 | CRYPTO_ALG_ASYNC | |
3324 | CRYPTO_ALG_ALLOCATES_MEMORY |
3325 | }, |
3326 | .setkey = aead_authenc_setkey, |
3327 | .ivsize = DES_BLOCK_SIZE, |
3328 | .maxauthsize = SHA384_DIGEST_SIZE, |
3329 | }, |
3330 | .cipher_info = { |
3331 | .alg = CIPHER_ALG_DES, |
3332 | .mode = CIPHER_MODE_CBC, |
3333 | }, |
3334 | .auth_info = { |
3335 | .alg = HASH_ALG_SHA384, |
3336 | .mode = HASH_MODE_HMAC, |
3337 | }, |
3338 | .auth_first = 0, |
3339 | }, |
3340 | { |
3341 | .type = CRYPTO_ALG_TYPE_AEAD, |
3342 | .alg.aead = { |
3343 | .base = { |
3344 | .cra_name = "authenc(hmac(sha512),cbc(des))" , |
3345 | .cra_driver_name = "authenc-hmac-sha512-cbc-des-iproc" , |
3346 | .cra_blocksize = DES_BLOCK_SIZE, |
3347 | .cra_flags = CRYPTO_ALG_NEED_FALLBACK | |
3348 | CRYPTO_ALG_ASYNC | |
3349 | CRYPTO_ALG_ALLOCATES_MEMORY |
3350 | }, |
3351 | .setkey = aead_authenc_setkey, |
3352 | .ivsize = DES_BLOCK_SIZE, |
3353 | .maxauthsize = SHA512_DIGEST_SIZE, |
3354 | }, |
3355 | .cipher_info = { |
3356 | .alg = CIPHER_ALG_DES, |
3357 | .mode = CIPHER_MODE_CBC, |
3358 | }, |
3359 | .auth_info = { |
3360 | .alg = HASH_ALG_SHA512, |
3361 | .mode = HASH_MODE_HMAC, |
3362 | }, |
3363 | .auth_first = 0, |
3364 | }, |
3365 | { |
3366 | .type = CRYPTO_ALG_TYPE_AEAD, |
3367 | .alg.aead = { |
3368 | .base = { |
3369 | .cra_name = "authenc(hmac(md5),cbc(des3_ede))" , |
3370 | .cra_driver_name = "authenc-hmac-md5-cbc-des3-iproc" , |
3371 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
3372 | .cra_flags = CRYPTO_ALG_NEED_FALLBACK | |
3373 | CRYPTO_ALG_ASYNC | |
3374 | CRYPTO_ALG_ALLOCATES_MEMORY |
3375 | }, |
3376 | .setkey = aead_authenc_setkey, |
3377 | .ivsize = DES3_EDE_BLOCK_SIZE, |
3378 | .maxauthsize = MD5_DIGEST_SIZE, |
3379 | }, |
3380 | .cipher_info = { |
3381 | .alg = CIPHER_ALG_3DES, |
3382 | .mode = CIPHER_MODE_CBC, |
3383 | }, |
3384 | .auth_info = { |
3385 | .alg = HASH_ALG_MD5, |
3386 | .mode = HASH_MODE_HMAC, |
3387 | }, |
3388 | .auth_first = 0, |
3389 | }, |
3390 | { |
3391 | .type = CRYPTO_ALG_TYPE_AEAD, |
3392 | .alg.aead = { |
3393 | .base = { |
3394 | .cra_name = "authenc(hmac(sha1),cbc(des3_ede))" , |
3395 | .cra_driver_name = "authenc-hmac-sha1-cbc-des3-iproc" , |
3396 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
3397 | .cra_flags = CRYPTO_ALG_NEED_FALLBACK | |
3398 | CRYPTO_ALG_ASYNC | |
3399 | CRYPTO_ALG_ALLOCATES_MEMORY |
3400 | }, |
3401 | .setkey = aead_authenc_setkey, |
3402 | .ivsize = DES3_EDE_BLOCK_SIZE, |
3403 | .maxauthsize = SHA1_DIGEST_SIZE, |
3404 | }, |
3405 | .cipher_info = { |
3406 | .alg = CIPHER_ALG_3DES, |
3407 | .mode = CIPHER_MODE_CBC, |
3408 | }, |
3409 | .auth_info = { |
3410 | .alg = HASH_ALG_SHA1, |
3411 | .mode = HASH_MODE_HMAC, |
3412 | }, |
3413 | .auth_first = 0, |
3414 | }, |
3415 | { |
3416 | .type = CRYPTO_ALG_TYPE_AEAD, |
3417 | .alg.aead = { |
3418 | .base = { |
3419 | .cra_name = "authenc(hmac(sha224),cbc(des3_ede))" , |
3420 | .cra_driver_name = "authenc-hmac-sha224-cbc-des3-iproc" , |
3421 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
3422 | .cra_flags = CRYPTO_ALG_NEED_FALLBACK | |
3423 | CRYPTO_ALG_ASYNC | |
3424 | CRYPTO_ALG_ALLOCATES_MEMORY |
3425 | }, |
3426 | .setkey = aead_authenc_setkey, |
3427 | .ivsize = DES3_EDE_BLOCK_SIZE, |
3428 | .maxauthsize = SHA224_DIGEST_SIZE, |
3429 | }, |
3430 | .cipher_info = { |
3431 | .alg = CIPHER_ALG_3DES, |
3432 | .mode = CIPHER_MODE_CBC, |
3433 | }, |
3434 | .auth_info = { |
3435 | .alg = HASH_ALG_SHA224, |
3436 | .mode = HASH_MODE_HMAC, |
3437 | }, |
3438 | .auth_first = 0, |
3439 | }, |
3440 | { |
3441 | .type = CRYPTO_ALG_TYPE_AEAD, |
3442 | .alg.aead = { |
3443 | .base = { |
3444 | .cra_name = "authenc(hmac(sha256),cbc(des3_ede))" , |
3445 | .cra_driver_name = "authenc-hmac-sha256-cbc-des3-iproc" , |
3446 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
3447 | .cra_flags = CRYPTO_ALG_NEED_FALLBACK | |
3448 | CRYPTO_ALG_ASYNC | |
3449 | CRYPTO_ALG_ALLOCATES_MEMORY |
3450 | }, |
3451 | .setkey = aead_authenc_setkey, |
3452 | .ivsize = DES3_EDE_BLOCK_SIZE, |
3453 | .maxauthsize = SHA256_DIGEST_SIZE, |
3454 | }, |
3455 | .cipher_info = { |
3456 | .alg = CIPHER_ALG_3DES, |
3457 | .mode = CIPHER_MODE_CBC, |
3458 | }, |
3459 | .auth_info = { |
3460 | .alg = HASH_ALG_SHA256, |
3461 | .mode = HASH_MODE_HMAC, |
3462 | }, |
3463 | .auth_first = 0, |
3464 | }, |
3465 | { |
3466 | .type = CRYPTO_ALG_TYPE_AEAD, |
3467 | .alg.aead = { |
3468 | .base = { |
3469 | .cra_name = "authenc(hmac(sha384),cbc(des3_ede))" , |
3470 | .cra_driver_name = "authenc-hmac-sha384-cbc-des3-iproc" , |
3471 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
3472 | .cra_flags = CRYPTO_ALG_NEED_FALLBACK | |
3473 | CRYPTO_ALG_ASYNC | |
3474 | CRYPTO_ALG_ALLOCATES_MEMORY |
3475 | }, |
3476 | .setkey = aead_authenc_setkey, |
3477 | .ivsize = DES3_EDE_BLOCK_SIZE, |
3478 | .maxauthsize = SHA384_DIGEST_SIZE, |
3479 | }, |
3480 | .cipher_info = { |
3481 | .alg = CIPHER_ALG_3DES, |
3482 | .mode = CIPHER_MODE_CBC, |
3483 | }, |
3484 | .auth_info = { |
3485 | .alg = HASH_ALG_SHA384, |
3486 | .mode = HASH_MODE_HMAC, |
3487 | }, |
3488 | .auth_first = 0, |
3489 | }, |
3490 | { |
3491 | .type = CRYPTO_ALG_TYPE_AEAD, |
3492 | .alg.aead = { |
3493 | .base = { |
3494 | .cra_name = "authenc(hmac(sha512),cbc(des3_ede))" , |
3495 | .cra_driver_name = "authenc-hmac-sha512-cbc-des3-iproc" , |
3496 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
3497 | .cra_flags = CRYPTO_ALG_NEED_FALLBACK | |
3498 | CRYPTO_ALG_ASYNC | |
3499 | CRYPTO_ALG_ALLOCATES_MEMORY |
3500 | }, |
3501 | .setkey = aead_authenc_setkey, |
3502 | .ivsize = DES3_EDE_BLOCK_SIZE, |
3503 | .maxauthsize = SHA512_DIGEST_SIZE, |
3504 | }, |
3505 | .cipher_info = { |
3506 | .alg = CIPHER_ALG_3DES, |
3507 | .mode = CIPHER_MODE_CBC, |
3508 | }, |
3509 | .auth_info = { |
3510 | .alg = HASH_ALG_SHA512, |
3511 | .mode = HASH_MODE_HMAC, |
3512 | }, |
3513 | .auth_first = 0, |
3514 | }, |
3515 | |
3516 | /* SKCIPHER algorithms. */ |
3517 | { |
3518 | .type = CRYPTO_ALG_TYPE_SKCIPHER, |
3519 | .alg.skcipher = { |
3520 | .base.cra_name = "cbc(des)" , |
3521 | .base.cra_driver_name = "cbc-des-iproc" , |
3522 | .base.cra_blocksize = DES_BLOCK_SIZE, |
3523 | .min_keysize = DES_KEY_SIZE, |
3524 | .max_keysize = DES_KEY_SIZE, |
3525 | .ivsize = DES_BLOCK_SIZE, |
3526 | }, |
3527 | .cipher_info = { |
3528 | .alg = CIPHER_ALG_DES, |
3529 | .mode = CIPHER_MODE_CBC, |
3530 | }, |
3531 | .auth_info = { |
3532 | .alg = HASH_ALG_NONE, |
3533 | .mode = HASH_MODE_NONE, |
3534 | }, |
3535 | }, |
3536 | { |
3537 | .type = CRYPTO_ALG_TYPE_SKCIPHER, |
3538 | .alg.skcipher = { |
3539 | .base.cra_name = "ecb(des)" , |
3540 | .base.cra_driver_name = "ecb-des-iproc" , |
3541 | .base.cra_blocksize = DES_BLOCK_SIZE, |
3542 | .min_keysize = DES_KEY_SIZE, |
3543 | .max_keysize = DES_KEY_SIZE, |
3544 | .ivsize = 0, |
3545 | }, |
3546 | .cipher_info = { |
3547 | .alg = CIPHER_ALG_DES, |
3548 | .mode = CIPHER_MODE_ECB, |
3549 | }, |
3550 | .auth_info = { |
3551 | .alg = HASH_ALG_NONE, |
3552 | .mode = HASH_MODE_NONE, |
3553 | }, |
3554 | }, |
3555 | { |
3556 | .type = CRYPTO_ALG_TYPE_SKCIPHER, |
3557 | .alg.skcipher = { |
3558 | .base.cra_name = "cbc(des3_ede)" , |
3559 | .base.cra_driver_name = "cbc-des3-iproc" , |
3560 | .base.cra_blocksize = DES3_EDE_BLOCK_SIZE, |
3561 | .min_keysize = DES3_EDE_KEY_SIZE, |
3562 | .max_keysize = DES3_EDE_KEY_SIZE, |
3563 | .ivsize = DES3_EDE_BLOCK_SIZE, |
3564 | }, |
3565 | .cipher_info = { |
3566 | .alg = CIPHER_ALG_3DES, |
3567 | .mode = CIPHER_MODE_CBC, |
3568 | }, |
3569 | .auth_info = { |
3570 | .alg = HASH_ALG_NONE, |
3571 | .mode = HASH_MODE_NONE, |
3572 | }, |
3573 | }, |
3574 | { |
3575 | .type = CRYPTO_ALG_TYPE_SKCIPHER, |
3576 | .alg.skcipher = { |
3577 | .base.cra_name = "ecb(des3_ede)" , |
3578 | .base.cra_driver_name = "ecb-des3-iproc" , |
3579 | .base.cra_blocksize = DES3_EDE_BLOCK_SIZE, |
3580 | .min_keysize = DES3_EDE_KEY_SIZE, |
3581 | .max_keysize = DES3_EDE_KEY_SIZE, |
3582 | .ivsize = 0, |
3583 | }, |
3584 | .cipher_info = { |
3585 | .alg = CIPHER_ALG_3DES, |
3586 | .mode = CIPHER_MODE_ECB, |
3587 | }, |
3588 | .auth_info = { |
3589 | .alg = HASH_ALG_NONE, |
3590 | .mode = HASH_MODE_NONE, |
3591 | }, |
3592 | }, |
3593 | { |
3594 | .type = CRYPTO_ALG_TYPE_SKCIPHER, |
3595 | .alg.skcipher = { |
3596 | .base.cra_name = "cbc(aes)" , |
3597 | .base.cra_driver_name = "cbc-aes-iproc" , |
3598 | .base.cra_blocksize = AES_BLOCK_SIZE, |
3599 | .min_keysize = AES_MIN_KEY_SIZE, |
3600 | .max_keysize = AES_MAX_KEY_SIZE, |
3601 | .ivsize = AES_BLOCK_SIZE, |
3602 | }, |
3603 | .cipher_info = { |
3604 | .alg = CIPHER_ALG_AES, |
3605 | .mode = CIPHER_MODE_CBC, |
3606 | }, |
3607 | .auth_info = { |
3608 | .alg = HASH_ALG_NONE, |
3609 | .mode = HASH_MODE_NONE, |
3610 | }, |
3611 | }, |
3612 | { |
3613 | .type = CRYPTO_ALG_TYPE_SKCIPHER, |
3614 | .alg.skcipher = { |
3615 | .base.cra_name = "ecb(aes)" , |
3616 | .base.cra_driver_name = "ecb-aes-iproc" , |
3617 | .base.cra_blocksize = AES_BLOCK_SIZE, |
3618 | .min_keysize = AES_MIN_KEY_SIZE, |
3619 | .max_keysize = AES_MAX_KEY_SIZE, |
3620 | .ivsize = 0, |
3621 | }, |
3622 | .cipher_info = { |
3623 | .alg = CIPHER_ALG_AES, |
3624 | .mode = CIPHER_MODE_ECB, |
3625 | }, |
3626 | .auth_info = { |
3627 | .alg = HASH_ALG_NONE, |
3628 | .mode = HASH_MODE_NONE, |
3629 | }, |
3630 | }, |
3631 | { |
3632 | .type = CRYPTO_ALG_TYPE_SKCIPHER, |
3633 | .alg.skcipher = { |
3634 | .base.cra_name = "ctr(aes)" , |
3635 | .base.cra_driver_name = "ctr-aes-iproc" , |
3636 | .base.cra_blocksize = AES_BLOCK_SIZE, |
3637 | .min_keysize = AES_MIN_KEY_SIZE, |
3638 | .max_keysize = AES_MAX_KEY_SIZE, |
3639 | .ivsize = AES_BLOCK_SIZE, |
3640 | }, |
3641 | .cipher_info = { |
3642 | .alg = CIPHER_ALG_AES, |
3643 | .mode = CIPHER_MODE_CTR, |
3644 | }, |
3645 | .auth_info = { |
3646 | .alg = HASH_ALG_NONE, |
3647 | .mode = HASH_MODE_NONE, |
3648 | }, |
3649 | }, |
3650 | { |
3651 | .type = CRYPTO_ALG_TYPE_SKCIPHER, |
3652 | .alg.skcipher = { |
3653 | .base.cra_name = "xts(aes)" , |
3654 | .base.cra_driver_name = "xts-aes-iproc" , |
3655 | .base.cra_blocksize = AES_BLOCK_SIZE, |
3656 | .min_keysize = 2 * AES_MIN_KEY_SIZE, |
3657 | .max_keysize = 2 * AES_MAX_KEY_SIZE, |
3658 | .ivsize = AES_BLOCK_SIZE, |
3659 | }, |
3660 | .cipher_info = { |
3661 | .alg = CIPHER_ALG_AES, |
3662 | .mode = CIPHER_MODE_XTS, |
3663 | }, |
3664 | .auth_info = { |
3665 | .alg = HASH_ALG_NONE, |
3666 | .mode = HASH_MODE_NONE, |
3667 | }, |
3668 | }, |
3669 | |
3670 | /* AHASH algorithms. */ |
3671 | { |
3672 | .type = CRYPTO_ALG_TYPE_AHASH, |
3673 | .alg.hash = { |
3674 | .halg.digestsize = MD5_DIGEST_SIZE, |
3675 | .halg.base = { |
3676 | .cra_name = "md5" , |
3677 | .cra_driver_name = "md5-iproc" , |
3678 | .cra_blocksize = MD5_BLOCK_WORDS * 4, |
3679 | .cra_flags = CRYPTO_ALG_ASYNC | |
3680 | CRYPTO_ALG_ALLOCATES_MEMORY, |
3681 | } |
3682 | }, |
3683 | .cipher_info = { |
3684 | .alg = CIPHER_ALG_NONE, |
3685 | .mode = CIPHER_MODE_NONE, |
3686 | }, |
3687 | .auth_info = { |
3688 | .alg = HASH_ALG_MD5, |
3689 | .mode = HASH_MODE_HASH, |
3690 | }, |
3691 | }, |
3692 | { |
3693 | .type = CRYPTO_ALG_TYPE_AHASH, |
3694 | .alg.hash = { |
3695 | .halg.digestsize = MD5_DIGEST_SIZE, |
3696 | .halg.base = { |
3697 | .cra_name = "hmac(md5)" , |
3698 | .cra_driver_name = "hmac-md5-iproc" , |
3699 | .cra_blocksize = MD5_BLOCK_WORDS * 4, |
3700 | } |
3701 | }, |
3702 | .cipher_info = { |
3703 | .alg = CIPHER_ALG_NONE, |
3704 | .mode = CIPHER_MODE_NONE, |
3705 | }, |
3706 | .auth_info = { |
3707 | .alg = HASH_ALG_MD5, |
3708 | .mode = HASH_MODE_HMAC, |
3709 | }, |
3710 | }, |
3711 | {.type = CRYPTO_ALG_TYPE_AHASH, |
3712 | .alg.hash = { |
3713 | .halg.digestsize = SHA1_DIGEST_SIZE, |
3714 | .halg.base = { |
3715 | .cra_name = "sha1" , |
3716 | .cra_driver_name = "sha1-iproc" , |
3717 | .cra_blocksize = SHA1_BLOCK_SIZE, |
3718 | } |
3719 | }, |
3720 | .cipher_info = { |
3721 | .alg = CIPHER_ALG_NONE, |
3722 | .mode = CIPHER_MODE_NONE, |
3723 | }, |
3724 | .auth_info = { |
3725 | .alg = HASH_ALG_SHA1, |
3726 | .mode = HASH_MODE_HASH, |
3727 | }, |
3728 | }, |
3729 | {.type = CRYPTO_ALG_TYPE_AHASH, |
3730 | .alg.hash = { |
3731 | .halg.digestsize = SHA1_DIGEST_SIZE, |
3732 | .halg.base = { |
3733 | .cra_name = "hmac(sha1)" , |
3734 | .cra_driver_name = "hmac-sha1-iproc" , |
3735 | .cra_blocksize = SHA1_BLOCK_SIZE, |
3736 | } |
3737 | }, |
3738 | .cipher_info = { |
3739 | .alg = CIPHER_ALG_NONE, |
3740 | .mode = CIPHER_MODE_NONE, |
3741 | }, |
3742 | .auth_info = { |
3743 | .alg = HASH_ALG_SHA1, |
3744 | .mode = HASH_MODE_HMAC, |
3745 | }, |
3746 | }, |
3747 | {.type = CRYPTO_ALG_TYPE_AHASH, |
3748 | .alg.hash = { |
3749 | .halg.digestsize = SHA224_DIGEST_SIZE, |
3750 | .halg.base = { |
3751 | .cra_name = "sha224" , |
3752 | .cra_driver_name = "sha224-iproc" , |
3753 | .cra_blocksize = SHA224_BLOCK_SIZE, |
3754 | } |
3755 | }, |
3756 | .cipher_info = { |
3757 | .alg = CIPHER_ALG_NONE, |
3758 | .mode = CIPHER_MODE_NONE, |
3759 | }, |
3760 | .auth_info = { |
3761 | .alg = HASH_ALG_SHA224, |
3762 | .mode = HASH_MODE_HASH, |
3763 | }, |
3764 | }, |
3765 | {.type = CRYPTO_ALG_TYPE_AHASH, |
3766 | .alg.hash = { |
3767 | .halg.digestsize = SHA224_DIGEST_SIZE, |
3768 | .halg.base = { |
3769 | .cra_name = "hmac(sha224)" , |
3770 | .cra_driver_name = "hmac-sha224-iproc" , |
3771 | .cra_blocksize = SHA224_BLOCK_SIZE, |
3772 | } |
3773 | }, |
3774 | .cipher_info = { |
3775 | .alg = CIPHER_ALG_NONE, |
3776 | .mode = CIPHER_MODE_NONE, |
3777 | }, |
3778 | .auth_info = { |
3779 | .alg = HASH_ALG_SHA224, |
3780 | .mode = HASH_MODE_HMAC, |
3781 | }, |
3782 | }, |
3783 | {.type = CRYPTO_ALG_TYPE_AHASH, |
3784 | .alg.hash = { |
3785 | .halg.digestsize = SHA256_DIGEST_SIZE, |
3786 | .halg.base = { |
3787 | .cra_name = "sha256" , |
3788 | .cra_driver_name = "sha256-iproc" , |
3789 | .cra_blocksize = SHA256_BLOCK_SIZE, |
3790 | } |
3791 | }, |
3792 | .cipher_info = { |
3793 | .alg = CIPHER_ALG_NONE, |
3794 | .mode = CIPHER_MODE_NONE, |
3795 | }, |
3796 | .auth_info = { |
3797 | .alg = HASH_ALG_SHA256, |
3798 | .mode = HASH_MODE_HASH, |
3799 | }, |
3800 | }, |
3801 | {.type = CRYPTO_ALG_TYPE_AHASH, |
3802 | .alg.hash = { |
3803 | .halg.digestsize = SHA256_DIGEST_SIZE, |
3804 | .halg.base = { |
3805 | .cra_name = "hmac(sha256)" , |
3806 | .cra_driver_name = "hmac-sha256-iproc" , |
3807 | .cra_blocksize = SHA256_BLOCK_SIZE, |
3808 | } |
3809 | }, |
3810 | .cipher_info = { |
3811 | .alg = CIPHER_ALG_NONE, |
3812 | .mode = CIPHER_MODE_NONE, |
3813 | }, |
3814 | .auth_info = { |
3815 | .alg = HASH_ALG_SHA256, |
3816 | .mode = HASH_MODE_HMAC, |
3817 | }, |
3818 | }, |
3819 | { |
3820 | .type = CRYPTO_ALG_TYPE_AHASH, |
3821 | .alg.hash = { |
3822 | .halg.digestsize = SHA384_DIGEST_SIZE, |
3823 | .halg.base = { |
3824 | .cra_name = "sha384" , |
3825 | .cra_driver_name = "sha384-iproc" , |
3826 | .cra_blocksize = SHA384_BLOCK_SIZE, |
3827 | } |
3828 | }, |
3829 | .cipher_info = { |
3830 | .alg = CIPHER_ALG_NONE, |
3831 | .mode = CIPHER_MODE_NONE, |
3832 | }, |
3833 | .auth_info = { |
3834 | .alg = HASH_ALG_SHA384, |
3835 | .mode = HASH_MODE_HASH, |
3836 | }, |
3837 | }, |
3838 | { |
3839 | .type = CRYPTO_ALG_TYPE_AHASH, |
3840 | .alg.hash = { |
3841 | .halg.digestsize = SHA384_DIGEST_SIZE, |
3842 | .halg.base = { |
3843 | .cra_name = "hmac(sha384)" , |
3844 | .cra_driver_name = "hmac-sha384-iproc" , |
3845 | .cra_blocksize = SHA384_BLOCK_SIZE, |
3846 | } |
3847 | }, |
3848 | .cipher_info = { |
3849 | .alg = CIPHER_ALG_NONE, |
3850 | .mode = CIPHER_MODE_NONE, |
3851 | }, |
3852 | .auth_info = { |
3853 | .alg = HASH_ALG_SHA384, |
3854 | .mode = HASH_MODE_HMAC, |
3855 | }, |
3856 | }, |
3857 | { |
3858 | .type = CRYPTO_ALG_TYPE_AHASH, |
3859 | .alg.hash = { |
3860 | .halg.digestsize = SHA512_DIGEST_SIZE, |
3861 | .halg.base = { |
3862 | .cra_name = "sha512" , |
3863 | .cra_driver_name = "sha512-iproc" , |
3864 | .cra_blocksize = SHA512_BLOCK_SIZE, |
3865 | } |
3866 | }, |
3867 | .cipher_info = { |
3868 | .alg = CIPHER_ALG_NONE, |
3869 | .mode = CIPHER_MODE_NONE, |
3870 | }, |
3871 | .auth_info = { |
3872 | .alg = HASH_ALG_SHA512, |
3873 | .mode = HASH_MODE_HASH, |
3874 | }, |
3875 | }, |
3876 | { |
3877 | .type = CRYPTO_ALG_TYPE_AHASH, |
3878 | .alg.hash = { |
3879 | .halg.digestsize = SHA512_DIGEST_SIZE, |
3880 | .halg.base = { |
3881 | .cra_name = "hmac(sha512)" , |
3882 | .cra_driver_name = "hmac-sha512-iproc" , |
3883 | .cra_blocksize = SHA512_BLOCK_SIZE, |
3884 | } |
3885 | }, |
3886 | .cipher_info = { |
3887 | .alg = CIPHER_ALG_NONE, |
3888 | .mode = CIPHER_MODE_NONE, |
3889 | }, |
3890 | .auth_info = { |
3891 | .alg = HASH_ALG_SHA512, |
3892 | .mode = HASH_MODE_HMAC, |
3893 | }, |
3894 | }, |
3895 | { |
3896 | .type = CRYPTO_ALG_TYPE_AHASH, |
3897 | .alg.hash = { |
3898 | .halg.digestsize = SHA3_224_DIGEST_SIZE, |
3899 | .halg.base = { |
3900 | .cra_name = "sha3-224" , |
3901 | .cra_driver_name = "sha3-224-iproc" , |
3902 | .cra_blocksize = SHA3_224_BLOCK_SIZE, |
3903 | } |
3904 | }, |
3905 | .cipher_info = { |
3906 | .alg = CIPHER_ALG_NONE, |
3907 | .mode = CIPHER_MODE_NONE, |
3908 | }, |
3909 | .auth_info = { |
3910 | .alg = HASH_ALG_SHA3_224, |
3911 | .mode = HASH_MODE_HASH, |
3912 | }, |
3913 | }, |
3914 | { |
3915 | .type = CRYPTO_ALG_TYPE_AHASH, |
3916 | .alg.hash = { |
3917 | .halg.digestsize = SHA3_224_DIGEST_SIZE, |
3918 | .halg.base = { |
3919 | .cra_name = "hmac(sha3-224)" , |
3920 | .cra_driver_name = "hmac-sha3-224-iproc" , |
3921 | .cra_blocksize = SHA3_224_BLOCK_SIZE, |
3922 | } |
3923 | }, |
3924 | .cipher_info = { |
3925 | .alg = CIPHER_ALG_NONE, |
3926 | .mode = CIPHER_MODE_NONE, |
3927 | }, |
3928 | .auth_info = { |
3929 | .alg = HASH_ALG_SHA3_224, |
3930 | .mode = HASH_MODE_HMAC |
3931 | }, |
3932 | }, |
3933 | { |
3934 | .type = CRYPTO_ALG_TYPE_AHASH, |
3935 | .alg.hash = { |
3936 | .halg.digestsize = SHA3_256_DIGEST_SIZE, |
3937 | .halg.base = { |
3938 | .cra_name = "sha3-256" , |
3939 | .cra_driver_name = "sha3-256-iproc" , |
3940 | .cra_blocksize = SHA3_256_BLOCK_SIZE, |
3941 | } |
3942 | }, |
3943 | .cipher_info = { |
3944 | .alg = CIPHER_ALG_NONE, |
3945 | .mode = CIPHER_MODE_NONE, |
3946 | }, |
3947 | .auth_info = { |
3948 | .alg = HASH_ALG_SHA3_256, |
3949 | .mode = HASH_MODE_HASH, |
3950 | }, |
3951 | }, |
3952 | { |
3953 | .type = CRYPTO_ALG_TYPE_AHASH, |
3954 | .alg.hash = { |
3955 | .halg.digestsize = SHA3_256_DIGEST_SIZE, |
3956 | .halg.base = { |
3957 | .cra_name = "hmac(sha3-256)" , |
3958 | .cra_driver_name = "hmac-sha3-256-iproc" , |
3959 | .cra_blocksize = SHA3_256_BLOCK_SIZE, |
3960 | } |
3961 | }, |
3962 | .cipher_info = { |
3963 | .alg = CIPHER_ALG_NONE, |
3964 | .mode = CIPHER_MODE_NONE, |
3965 | }, |
3966 | .auth_info = { |
3967 | .alg = HASH_ALG_SHA3_256, |
3968 | .mode = HASH_MODE_HMAC, |
3969 | }, |
3970 | }, |
3971 | { |
3972 | .type = CRYPTO_ALG_TYPE_AHASH, |
3973 | .alg.hash = { |
3974 | .halg.digestsize = SHA3_384_DIGEST_SIZE, |
3975 | .halg.base = { |
3976 | .cra_name = "sha3-384" , |
3977 | .cra_driver_name = "sha3-384-iproc" , |
3978 | .cra_blocksize = SHA3_224_BLOCK_SIZE, |
3979 | } |
3980 | }, |
3981 | .cipher_info = { |
3982 | .alg = CIPHER_ALG_NONE, |
3983 | .mode = CIPHER_MODE_NONE, |
3984 | }, |
3985 | .auth_info = { |
3986 | .alg = HASH_ALG_SHA3_384, |
3987 | .mode = HASH_MODE_HASH, |
3988 | }, |
3989 | }, |
3990 | { |
3991 | .type = CRYPTO_ALG_TYPE_AHASH, |
3992 | .alg.hash = { |
3993 | .halg.digestsize = SHA3_384_DIGEST_SIZE, |
3994 | .halg.base = { |
3995 | .cra_name = "hmac(sha3-384)" , |
3996 | .cra_driver_name = "hmac-sha3-384-iproc" , |
3997 | .cra_blocksize = SHA3_384_BLOCK_SIZE, |
3998 | } |
3999 | }, |
4000 | .cipher_info = { |
4001 | .alg = CIPHER_ALG_NONE, |
4002 | .mode = CIPHER_MODE_NONE, |
4003 | }, |
4004 | .auth_info = { |
4005 | .alg = HASH_ALG_SHA3_384, |
4006 | .mode = HASH_MODE_HMAC, |
4007 | }, |
4008 | }, |
4009 | { |
4010 | .type = CRYPTO_ALG_TYPE_AHASH, |
4011 | .alg.hash = { |
4012 | .halg.digestsize = SHA3_512_DIGEST_SIZE, |
4013 | .halg.base = { |
4014 | .cra_name = "sha3-512" , |
4015 | .cra_driver_name = "sha3-512-iproc" , |
4016 | .cra_blocksize = SHA3_512_BLOCK_SIZE, |
4017 | } |
4018 | }, |
4019 | .cipher_info = { |
4020 | .alg = CIPHER_ALG_NONE, |
4021 | .mode = CIPHER_MODE_NONE, |
4022 | }, |
4023 | .auth_info = { |
4024 | .alg = HASH_ALG_SHA3_512, |
4025 | .mode = HASH_MODE_HASH, |
4026 | }, |
4027 | }, |
4028 | { |
4029 | .type = CRYPTO_ALG_TYPE_AHASH, |
4030 | .alg.hash = { |
4031 | .halg.digestsize = SHA3_512_DIGEST_SIZE, |
4032 | .halg.base = { |
4033 | .cra_name = "hmac(sha3-512)" , |
4034 | .cra_driver_name = "hmac-sha3-512-iproc" , |
4035 | .cra_blocksize = SHA3_512_BLOCK_SIZE, |
4036 | } |
4037 | }, |
4038 | .cipher_info = { |
4039 | .alg = CIPHER_ALG_NONE, |
4040 | .mode = CIPHER_MODE_NONE, |
4041 | }, |
4042 | .auth_info = { |
4043 | .alg = HASH_ALG_SHA3_512, |
4044 | .mode = HASH_MODE_HMAC, |
4045 | }, |
4046 | }, |
4047 | { |
4048 | .type = CRYPTO_ALG_TYPE_AHASH, |
4049 | .alg.hash = { |
4050 | .halg.digestsize = AES_BLOCK_SIZE, |
4051 | .halg.base = { |
4052 | .cra_name = "xcbc(aes)" , |
4053 | .cra_driver_name = "xcbc-aes-iproc" , |
4054 | .cra_blocksize = AES_BLOCK_SIZE, |
4055 | } |
4056 | }, |
4057 | .cipher_info = { |
4058 | .alg = CIPHER_ALG_NONE, |
4059 | .mode = CIPHER_MODE_NONE, |
4060 | }, |
4061 | .auth_info = { |
4062 | .alg = HASH_ALG_AES, |
4063 | .mode = HASH_MODE_XCBC, |
4064 | }, |
4065 | }, |
4066 | { |
4067 | .type = CRYPTO_ALG_TYPE_AHASH, |
4068 | .alg.hash = { |
4069 | .halg.digestsize = AES_BLOCK_SIZE, |
4070 | .halg.base = { |
4071 | .cra_name = "cmac(aes)" , |
4072 | .cra_driver_name = "cmac-aes-iproc" , |
4073 | .cra_blocksize = AES_BLOCK_SIZE, |
4074 | } |
4075 | }, |
4076 | .cipher_info = { |
4077 | .alg = CIPHER_ALG_NONE, |
4078 | .mode = CIPHER_MODE_NONE, |
4079 | }, |
4080 | .auth_info = { |
4081 | .alg = HASH_ALG_AES, |
4082 | .mode = HASH_MODE_CMAC, |
4083 | }, |
4084 | }, |
4085 | }; |
4086 | |
4087 | static int generic_cra_init(struct crypto_tfm *tfm, |
4088 | struct iproc_alg_s *cipher_alg) |
4089 | { |
4090 | struct spu_hw *spu = &iproc_priv.spu; |
4091 | struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm); |
4092 | unsigned int blocksize = crypto_tfm_alg_blocksize(tfm); |
4093 | |
4094 | flow_log(format: "%s()\n" , __func__); |
4095 | |
4096 | ctx->alg = cipher_alg; |
4097 | ctx->cipher = cipher_alg->cipher_info; |
4098 | ctx->auth = cipher_alg->auth_info; |
4099 | ctx->auth_first = cipher_alg->auth_first; |
4100 | ctx->max_payload = spu->spu_ctx_max_payload(ctx->cipher.alg, |
4101 | ctx->cipher.mode, |
4102 | blocksize); |
4103 | ctx->fallback_cipher = NULL; |
4104 | |
4105 | ctx->enckeylen = 0; |
4106 | ctx->authkeylen = 0; |
4107 | |
4108 | atomic_inc(v: &iproc_priv.stream_count); |
4109 | atomic_inc(v: &iproc_priv.session_count); |
4110 | |
4111 | return 0; |
4112 | } |
4113 | |
4114 | static int skcipher_init_tfm(struct crypto_skcipher *skcipher) |
4115 | { |
4116 | struct crypto_tfm *tfm = crypto_skcipher_tfm(tfm: skcipher); |
4117 | struct skcipher_alg *alg = crypto_skcipher_alg(tfm: skcipher); |
4118 | struct iproc_alg_s *cipher_alg; |
4119 | |
4120 | flow_log(format: "%s()\n" , __func__); |
4121 | |
4122 | crypto_skcipher_set_reqsize(skcipher, reqsize: sizeof(struct iproc_reqctx_s)); |
4123 | |
4124 | cipher_alg = container_of(alg, struct iproc_alg_s, alg.skcipher); |
4125 | return generic_cra_init(tfm, cipher_alg); |
4126 | } |
4127 | |
4128 | static int ahash_cra_init(struct crypto_tfm *tfm) |
4129 | { |
4130 | int err; |
4131 | struct crypto_alg *alg = tfm->__crt_alg; |
4132 | struct iproc_alg_s *cipher_alg; |
4133 | |
4134 | cipher_alg = container_of(__crypto_ahash_alg(alg), struct iproc_alg_s, |
4135 | alg.hash); |
4136 | |
4137 | err = generic_cra_init(tfm, cipher_alg); |
4138 | flow_log(format: "%s()\n" , __func__); |
4139 | |
4140 | /* |
4141 | * export state size has to be < 512 bytes. So don't include msg bufs |
4142 | * in state size. |
4143 | */ |
4144 | crypto_ahash_set_reqsize(tfm: __crypto_ahash_cast(tfm), |
4145 | reqsize: sizeof(struct iproc_reqctx_s)); |
4146 | |
4147 | return err; |
4148 | } |
4149 | |
4150 | static int aead_cra_init(struct crypto_aead *aead) |
4151 | { |
4152 | unsigned int reqsize = sizeof(struct iproc_reqctx_s); |
4153 | struct crypto_tfm *tfm = crypto_aead_tfm(tfm: aead); |
4154 | struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm); |
4155 | struct crypto_alg *alg = tfm->__crt_alg; |
4156 | struct aead_alg *aalg = container_of(alg, struct aead_alg, base); |
4157 | struct iproc_alg_s *cipher_alg = container_of(aalg, struct iproc_alg_s, |
4158 | alg.aead); |
4159 | |
4160 | int err = generic_cra_init(tfm, cipher_alg); |
4161 | |
4162 | flow_log(format: "%s()\n" , __func__); |
4163 | |
4164 | ctx->is_esp = false; |
4165 | ctx->salt_len = 0; |
4166 | ctx->salt_offset = 0; |
4167 | |
4168 | /* random first IV */ |
4169 | get_random_bytes(buf: ctx->iv, MAX_IV_SIZE); |
4170 | flow_dump(msg: " iv: " , var: ctx->iv, MAX_IV_SIZE); |
4171 | |
4172 | if (err) |
4173 | goto out; |
4174 | |
4175 | if (!(alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK)) |
4176 | goto reqsize; |
4177 | |
4178 | flow_log(format: "%s() creating fallback cipher\n" , __func__); |
4179 | |
4180 | ctx->fallback_cipher = crypto_alloc_aead(alg_name: alg->cra_name, type: 0, |
4181 | CRYPTO_ALG_ASYNC | |
4182 | CRYPTO_ALG_NEED_FALLBACK); |
4183 | if (IS_ERR(ptr: ctx->fallback_cipher)) { |
4184 | pr_err("%s() Error: failed to allocate fallback for %s\n" , |
4185 | __func__, alg->cra_name); |
4186 | return PTR_ERR(ptr: ctx->fallback_cipher); |
4187 | } |
4188 | |
4189 | reqsize += crypto_aead_reqsize(tfm: ctx->fallback_cipher); |
4190 | |
4191 | reqsize: |
4192 | crypto_aead_set_reqsize(aead, reqsize); |
4193 | |
4194 | out: |
4195 | return err; |
4196 | } |
4197 | |
4198 | static void generic_cra_exit(struct crypto_tfm *tfm) |
4199 | { |
4200 | atomic_dec(v: &iproc_priv.session_count); |
4201 | } |
4202 | |
4203 | static void skcipher_exit_tfm(struct crypto_skcipher *tfm) |
4204 | { |
4205 | generic_cra_exit(tfm: crypto_skcipher_tfm(tfm)); |
4206 | } |
4207 | |
4208 | static void aead_cra_exit(struct crypto_aead *aead) |
4209 | { |
4210 | struct crypto_tfm *tfm = crypto_aead_tfm(tfm: aead); |
4211 | struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm); |
4212 | |
4213 | generic_cra_exit(tfm); |
4214 | |
4215 | if (ctx->fallback_cipher) { |
4216 | crypto_free_aead(tfm: ctx->fallback_cipher); |
4217 | ctx->fallback_cipher = NULL; |
4218 | } |
4219 | } |
4220 | |
4221 | /** |
4222 | * spu_functions_register() - Specify hardware-specific SPU functions based on |
4223 | * SPU type read from device tree. |
4224 | * @dev: device structure |
4225 | * @spu_type: SPU hardware generation |
4226 | * @spu_subtype: SPU hardware version |
4227 | */ |
4228 | static void spu_functions_register(struct device *dev, |
4229 | enum spu_spu_type spu_type, |
4230 | enum spu_spu_subtype spu_subtype) |
4231 | { |
4232 | struct spu_hw *spu = &iproc_priv.spu; |
4233 | |
4234 | if (spu_type == SPU_TYPE_SPUM) { |
4235 | dev_dbg(dev, "Registering SPUM functions" ); |
4236 | spu->spu_dump_msg_hdr = spum_dump_msg_hdr; |
4237 | spu->spu_payload_length = spum_payload_length; |
4238 | spu->spu_response_hdr_len = spum_response_hdr_len; |
4239 | spu->spu_hash_pad_len = spum_hash_pad_len; |
4240 | spu->spu_gcm_ccm_pad_len = spum_gcm_ccm_pad_len; |
4241 | spu->spu_assoc_resp_len = spum_assoc_resp_len; |
4242 | spu->spu_aead_ivlen = spum_aead_ivlen; |
4243 | spu->spu_hash_type = spum_hash_type; |
4244 | spu->spu_digest_size = spum_digest_size; |
4245 | spu->spu_create_request = spum_create_request; |
4246 | spu->spu_cipher_req_init = spum_cipher_req_init; |
4247 | spu->spu_cipher_req_finish = spum_cipher_req_finish; |
4248 | spu->spu_request_pad = spum_request_pad; |
4249 | spu->spu_tx_status_len = spum_tx_status_len; |
4250 | spu->spu_rx_status_len = spum_rx_status_len; |
4251 | spu->spu_status_process = spum_status_process; |
4252 | spu->spu_xts_tweak_in_payload = spum_xts_tweak_in_payload; |
4253 | spu->spu_ccm_update_iv = spum_ccm_update_iv; |
4254 | spu->spu_wordalign_padlen = spum_wordalign_padlen; |
4255 | if (spu_subtype == SPU_SUBTYPE_SPUM_NS2) |
4256 | spu->spu_ctx_max_payload = spum_ns2_ctx_max_payload; |
4257 | else |
4258 | spu->spu_ctx_max_payload = spum_nsp_ctx_max_payload; |
4259 | } else { |
4260 | dev_dbg(dev, "Registering SPU2 functions" ); |
4261 | spu->spu_dump_msg_hdr = spu2_dump_msg_hdr; |
4262 | spu->spu_ctx_max_payload = spu2_ctx_max_payload; |
4263 | spu->spu_payload_length = spu2_payload_length; |
4264 | spu->spu_response_hdr_len = spu2_response_hdr_len; |
4265 | spu->spu_hash_pad_len = spu2_hash_pad_len; |
4266 | spu->spu_gcm_ccm_pad_len = spu2_gcm_ccm_pad_len; |
4267 | spu->spu_assoc_resp_len = spu2_assoc_resp_len; |
4268 | spu->spu_aead_ivlen = spu2_aead_ivlen; |
4269 | spu->spu_hash_type = spu2_hash_type; |
4270 | spu->spu_digest_size = spu2_digest_size; |
4271 | spu->spu_create_request = spu2_create_request; |
4272 | spu->spu_cipher_req_init = spu2_cipher_req_init; |
4273 | spu->spu_cipher_req_finish = spu2_cipher_req_finish; |
4274 | spu->spu_request_pad = spu2_request_pad; |
4275 | spu->spu_tx_status_len = spu2_tx_status_len; |
4276 | spu->spu_rx_status_len = spu2_rx_status_len; |
4277 | spu->spu_status_process = spu2_status_process; |
4278 | spu->spu_xts_tweak_in_payload = spu2_xts_tweak_in_payload; |
4279 | spu->spu_ccm_update_iv = spu2_ccm_update_iv; |
4280 | spu->spu_wordalign_padlen = spu2_wordalign_padlen; |
4281 | } |
4282 | } |
4283 | |
4284 | /** |
4285 | * spu_mb_init() - Initialize mailbox client. Request ownership of a mailbox |
4286 | * channel for the SPU being probed. |
4287 | * @dev: SPU driver device structure |
4288 | * |
4289 | * Return: 0 if successful |
4290 | * < 0 otherwise |
4291 | */ |
4292 | static int spu_mb_init(struct device *dev) |
4293 | { |
4294 | struct mbox_client *mcl = &iproc_priv.mcl; |
4295 | int err, i; |
4296 | |
4297 | iproc_priv.mbox = devm_kcalloc(dev, n: iproc_priv.spu.num_chan, |
4298 | size: sizeof(struct mbox_chan *), GFP_KERNEL); |
4299 | if (!iproc_priv.mbox) |
4300 | return -ENOMEM; |
4301 | |
4302 | mcl->dev = dev; |
4303 | mcl->tx_block = false; |
4304 | mcl->tx_tout = 0; |
4305 | mcl->knows_txdone = true; |
4306 | mcl->rx_callback = spu_rx_callback; |
4307 | mcl->tx_done = NULL; |
4308 | |
4309 | for (i = 0; i < iproc_priv.spu.num_chan; i++) { |
4310 | iproc_priv.mbox[i] = mbox_request_channel(cl: mcl, index: i); |
4311 | if (IS_ERR(ptr: iproc_priv.mbox[i])) { |
4312 | err = PTR_ERR(ptr: iproc_priv.mbox[i]); |
4313 | dev_err(dev, |
4314 | "Mbox channel %d request failed with err %d" , |
4315 | i, err); |
4316 | iproc_priv.mbox[i] = NULL; |
4317 | goto free_channels; |
4318 | } |
4319 | } |
4320 | |
4321 | return 0; |
4322 | free_channels: |
4323 | for (i = 0; i < iproc_priv.spu.num_chan; i++) { |
4324 | if (iproc_priv.mbox[i]) |
4325 | mbox_free_channel(chan: iproc_priv.mbox[i]); |
4326 | } |
4327 | |
4328 | return err; |
4329 | } |
4330 | |
4331 | static void spu_mb_release(struct platform_device *pdev) |
4332 | { |
4333 | int i; |
4334 | |
4335 | for (i = 0; i < iproc_priv.spu.num_chan; i++) |
4336 | mbox_free_channel(chan: iproc_priv.mbox[i]); |
4337 | } |
4338 | |
4339 | static void spu_counters_init(void) |
4340 | { |
4341 | int i; |
4342 | int j; |
4343 | |
4344 | atomic_set(v: &iproc_priv.session_count, i: 0); |
4345 | atomic_set(v: &iproc_priv.stream_count, i: 0); |
4346 | atomic_set(v: &iproc_priv.next_chan, i: (int)iproc_priv.spu.num_chan); |
4347 | atomic64_set(v: &iproc_priv.bytes_in, i: 0); |
4348 | atomic64_set(v: &iproc_priv.bytes_out, i: 0); |
4349 | for (i = 0; i < SPU_OP_NUM; i++) { |
4350 | atomic_set(v: &iproc_priv.op_counts[i], i: 0); |
4351 | atomic_set(v: &iproc_priv.setkey_cnt[i], i: 0); |
4352 | } |
4353 | for (i = 0; i < CIPHER_ALG_LAST; i++) |
4354 | for (j = 0; j < CIPHER_MODE_LAST; j++) |
4355 | atomic_set(v: &iproc_priv.cipher_cnt[i][j], i: 0); |
4356 | |
4357 | for (i = 0; i < HASH_ALG_LAST; i++) { |
4358 | atomic_set(v: &iproc_priv.hash_cnt[i], i: 0); |
4359 | atomic_set(v: &iproc_priv.hmac_cnt[i], i: 0); |
4360 | } |
4361 | for (i = 0; i < AEAD_TYPE_LAST; i++) |
4362 | atomic_set(v: &iproc_priv.aead_cnt[i], i: 0); |
4363 | |
4364 | atomic_set(v: &iproc_priv.mb_no_spc, i: 0); |
4365 | atomic_set(v: &iproc_priv.mb_send_fail, i: 0); |
4366 | atomic_set(v: &iproc_priv.bad_icv, i: 0); |
4367 | } |
4368 | |
4369 | static int spu_register_skcipher(struct iproc_alg_s *driver_alg) |
4370 | { |
4371 | struct skcipher_alg *crypto = &driver_alg->alg.skcipher; |
4372 | int err; |
4373 | |
4374 | crypto->base.cra_module = THIS_MODULE; |
4375 | crypto->base.cra_priority = cipher_pri; |
4376 | crypto->base.cra_alignmask = 0; |
4377 | crypto->base.cra_ctxsize = sizeof(struct iproc_ctx_s); |
4378 | crypto->base.cra_flags = CRYPTO_ALG_ASYNC | |
4379 | CRYPTO_ALG_ALLOCATES_MEMORY | |
4380 | CRYPTO_ALG_KERN_DRIVER_ONLY; |
4381 | |
4382 | crypto->init = skcipher_init_tfm; |
4383 | crypto->exit = skcipher_exit_tfm; |
4384 | crypto->setkey = skcipher_setkey; |
4385 | crypto->encrypt = skcipher_encrypt; |
4386 | crypto->decrypt = skcipher_decrypt; |
4387 | |
4388 | err = crypto_register_skcipher(alg: crypto); |
4389 | /* Mark alg as having been registered, if successful */ |
4390 | if (err == 0) |
4391 | driver_alg->registered = true; |
4392 | pr_debug(" registered skcipher %s\n" , crypto->base.cra_driver_name); |
4393 | return err; |
4394 | } |
4395 | |
4396 | static int spu_register_ahash(struct iproc_alg_s *driver_alg) |
4397 | { |
4398 | struct spu_hw *spu = &iproc_priv.spu; |
4399 | struct ahash_alg *hash = &driver_alg->alg.hash; |
4400 | int err; |
4401 | |
4402 | /* AES-XCBC is the only AES hash type currently supported on SPU-M */ |
4403 | if ((driver_alg->auth_info.alg == HASH_ALG_AES) && |
4404 | (driver_alg->auth_info.mode != HASH_MODE_XCBC) && |
4405 | (spu->spu_type == SPU_TYPE_SPUM)) |
4406 | return 0; |
4407 | |
4408 | /* SHA3 algorithm variants are not registered for SPU-M or SPU2. */ |
4409 | if ((driver_alg->auth_info.alg >= HASH_ALG_SHA3_224) && |
4410 | (spu->spu_subtype != SPU_SUBTYPE_SPU2_V2)) |
4411 | return 0; |
4412 | |
4413 | hash->halg.base.cra_module = THIS_MODULE; |
4414 | hash->halg.base.cra_priority = hash_pri; |
4415 | hash->halg.base.cra_alignmask = 0; |
4416 | hash->halg.base.cra_ctxsize = sizeof(struct iproc_ctx_s); |
4417 | hash->halg.base.cra_init = ahash_cra_init; |
4418 | hash->halg.base.cra_exit = generic_cra_exit; |
4419 | hash->halg.base.cra_flags = CRYPTO_ALG_ASYNC | |
4420 | CRYPTO_ALG_ALLOCATES_MEMORY; |
4421 | hash->halg.statesize = sizeof(struct spu_hash_export_s); |
4422 | |
4423 | if (driver_alg->auth_info.mode != HASH_MODE_HMAC) { |
4424 | hash->init = ahash_init; |
4425 | hash->update = ahash_update; |
4426 | hash->final = ahash_final; |
4427 | hash->finup = ahash_finup; |
4428 | hash->digest = ahash_digest; |
4429 | if ((driver_alg->auth_info.alg == HASH_ALG_AES) && |
4430 | ((driver_alg->auth_info.mode == HASH_MODE_XCBC) || |
4431 | (driver_alg->auth_info.mode == HASH_MODE_CMAC))) { |
4432 | hash->setkey = ahash_setkey; |
4433 | } |
4434 | } else { |
4435 | hash->setkey = ahash_hmac_setkey; |
4436 | hash->init = ahash_hmac_init; |
4437 | hash->update = ahash_hmac_update; |
4438 | hash->final = ahash_hmac_final; |
4439 | hash->finup = ahash_hmac_finup; |
4440 | hash->digest = ahash_hmac_digest; |
4441 | } |
4442 | hash->export = ahash_export; |
4443 | hash->import = ahash_import; |
4444 | |
4445 | err = crypto_register_ahash(alg: hash); |
4446 | /* Mark alg as having been registered, if successful */ |
4447 | if (err == 0) |
4448 | driver_alg->registered = true; |
4449 | pr_debug(" registered ahash %s\n" , |
4450 | hash->halg.base.cra_driver_name); |
4451 | return err; |
4452 | } |
4453 | |
4454 | static int spu_register_aead(struct iproc_alg_s *driver_alg) |
4455 | { |
4456 | struct aead_alg *aead = &driver_alg->alg.aead; |
4457 | int err; |
4458 | |
4459 | aead->base.cra_module = THIS_MODULE; |
4460 | aead->base.cra_priority = aead_pri; |
4461 | aead->base.cra_alignmask = 0; |
4462 | aead->base.cra_ctxsize = sizeof(struct iproc_ctx_s); |
4463 | |
4464 | aead->base.cra_flags |= CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY; |
4465 | /* setkey set in alg initialization */ |
4466 | aead->setauthsize = aead_setauthsize; |
4467 | aead->encrypt = aead_encrypt; |
4468 | aead->decrypt = aead_decrypt; |
4469 | aead->init = aead_cra_init; |
4470 | aead->exit = aead_cra_exit; |
4471 | |
4472 | err = crypto_register_aead(alg: aead); |
4473 | /* Mark alg as having been registered, if successful */ |
4474 | if (err == 0) |
4475 | driver_alg->registered = true; |
4476 | pr_debug(" registered aead %s\n" , aead->base.cra_driver_name); |
4477 | return err; |
4478 | } |
4479 | |
4480 | /* register crypto algorithms the device supports */ |
4481 | static int spu_algs_register(struct device *dev) |
4482 | { |
4483 | int i, j; |
4484 | int err; |
4485 | |
4486 | for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { |
4487 | switch (driver_algs[i].type) { |
4488 | case CRYPTO_ALG_TYPE_SKCIPHER: |
4489 | err = spu_register_skcipher(driver_alg: &driver_algs[i]); |
4490 | break; |
4491 | case CRYPTO_ALG_TYPE_AHASH: |
4492 | err = spu_register_ahash(driver_alg: &driver_algs[i]); |
4493 | break; |
4494 | case CRYPTO_ALG_TYPE_AEAD: |
4495 | err = spu_register_aead(driver_alg: &driver_algs[i]); |
4496 | break; |
4497 | default: |
4498 | dev_err(dev, |
4499 | "iproc-crypto: unknown alg type: %d" , |
4500 | driver_algs[i].type); |
4501 | err = -EINVAL; |
4502 | } |
4503 | |
4504 | if (err) { |
4505 | dev_err(dev, "alg registration failed with error %d\n" , |
4506 | err); |
4507 | goto err_algs; |
4508 | } |
4509 | } |
4510 | |
4511 | return 0; |
4512 | |
4513 | err_algs: |
4514 | for (j = 0; j < i; j++) { |
4515 | /* Skip any algorithm not registered */ |
4516 | if (!driver_algs[j].registered) |
4517 | continue; |
4518 | switch (driver_algs[j].type) { |
4519 | case CRYPTO_ALG_TYPE_SKCIPHER: |
4520 | crypto_unregister_skcipher(alg: &driver_algs[j].alg.skcipher); |
4521 | driver_algs[j].registered = false; |
4522 | break; |
4523 | case CRYPTO_ALG_TYPE_AHASH: |
4524 | crypto_unregister_ahash(alg: &driver_algs[j].alg.hash); |
4525 | driver_algs[j].registered = false; |
4526 | break; |
4527 | case CRYPTO_ALG_TYPE_AEAD: |
4528 | crypto_unregister_aead(alg: &driver_algs[j].alg.aead); |
4529 | driver_algs[j].registered = false; |
4530 | break; |
4531 | } |
4532 | } |
4533 | return err; |
4534 | } |
4535 | |
4536 | /* ==================== Kernel Platform API ==================== */ |
4537 | |
4538 | static struct spu_type_subtype spum_ns2_types = { |
4539 | SPU_TYPE_SPUM, SPU_SUBTYPE_SPUM_NS2 |
4540 | }; |
4541 | |
4542 | static struct spu_type_subtype spum_nsp_types = { |
4543 | SPU_TYPE_SPUM, SPU_SUBTYPE_SPUM_NSP |
4544 | }; |
4545 | |
4546 | static struct spu_type_subtype spu2_types = { |
4547 | SPU_TYPE_SPU2, SPU_SUBTYPE_SPU2_V1 |
4548 | }; |
4549 | |
4550 | static struct spu_type_subtype spu2_v2_types = { |
4551 | SPU_TYPE_SPU2, SPU_SUBTYPE_SPU2_V2 |
4552 | }; |
4553 | |
4554 | static const struct of_device_id bcm_spu_dt_ids[] = { |
4555 | { |
4556 | .compatible = "brcm,spum-crypto" , |
4557 | .data = &spum_ns2_types, |
4558 | }, |
4559 | { |
4560 | .compatible = "brcm,spum-nsp-crypto" , |
4561 | .data = &spum_nsp_types, |
4562 | }, |
4563 | { |
4564 | .compatible = "brcm,spu2-crypto" , |
4565 | .data = &spu2_types, |
4566 | }, |
4567 | { |
4568 | .compatible = "brcm,spu2-v2-crypto" , |
4569 | .data = &spu2_v2_types, |
4570 | }, |
4571 | { /* sentinel */ } |
4572 | }; |
4573 | |
4574 | MODULE_DEVICE_TABLE(of, bcm_spu_dt_ids); |
4575 | |
4576 | static int spu_dt_read(struct platform_device *pdev) |
4577 | { |
4578 | struct device *dev = &pdev->dev; |
4579 | struct spu_hw *spu = &iproc_priv.spu; |
4580 | struct resource *spu_ctrl_regs; |
4581 | const struct spu_type_subtype *matched_spu_type; |
4582 | struct device_node *dn = pdev->dev.of_node; |
4583 | int err, i; |
4584 | |
4585 | /* Count number of mailbox channels */ |
4586 | spu->num_chan = of_count_phandle_with_args(np: dn, list_name: "mboxes" , cells_name: "#mbox-cells" ); |
4587 | |
4588 | matched_spu_type = of_device_get_match_data(dev); |
4589 | if (!matched_spu_type) { |
4590 | dev_err(dev, "Failed to match device\n" ); |
4591 | return -ENODEV; |
4592 | } |
4593 | |
4594 | spu->spu_type = matched_spu_type->type; |
4595 | spu->spu_subtype = matched_spu_type->subtype; |
4596 | |
4597 | for (i = 0; (i < MAX_SPUS) && ((spu_ctrl_regs = |
4598 | platform_get_resource(pdev, IORESOURCE_MEM, i)) != NULL); i++) { |
4599 | |
4600 | spu->reg_vbase[i] = devm_ioremap_resource(dev, res: spu_ctrl_regs); |
4601 | if (IS_ERR(ptr: spu->reg_vbase[i])) { |
4602 | err = PTR_ERR(ptr: spu->reg_vbase[i]); |
4603 | dev_err(dev, "Failed to map registers: %d\n" , |
4604 | err); |
4605 | spu->reg_vbase[i] = NULL; |
4606 | return err; |
4607 | } |
4608 | } |
4609 | spu->num_spu = i; |
4610 | dev_dbg(dev, "Device has %d SPUs" , spu->num_spu); |
4611 | |
4612 | return 0; |
4613 | } |
4614 | |
4615 | static int bcm_spu_probe(struct platform_device *pdev) |
4616 | { |
4617 | struct device *dev = &pdev->dev; |
4618 | struct spu_hw *spu = &iproc_priv.spu; |
4619 | int err; |
4620 | |
4621 | iproc_priv.pdev = pdev; |
4622 | platform_set_drvdata(pdev: iproc_priv.pdev, |
4623 | data: &iproc_priv); |
4624 | |
4625 | err = spu_dt_read(pdev); |
4626 | if (err < 0) |
4627 | goto failure; |
4628 | |
4629 | err = spu_mb_init(dev); |
4630 | if (err < 0) |
4631 | goto failure; |
4632 | |
4633 | if (spu->spu_type == SPU_TYPE_SPUM) |
4634 | iproc_priv.bcm_hdr_len = 8; |
4635 | else if (spu->spu_type == SPU_TYPE_SPU2) |
4636 | iproc_priv.bcm_hdr_len = 0; |
4637 | |
4638 | spu_functions_register(dev, spu_type: spu->spu_type, spu_subtype: spu->spu_subtype); |
4639 | |
4640 | spu_counters_init(); |
4641 | |
4642 | spu_setup_debugfs(); |
4643 | |
4644 | err = spu_algs_register(dev); |
4645 | if (err < 0) |
4646 | goto fail_reg; |
4647 | |
4648 | return 0; |
4649 | |
4650 | fail_reg: |
4651 | spu_free_debugfs(); |
4652 | failure: |
4653 | spu_mb_release(pdev); |
4654 | dev_err(dev, "%s failed with error %d.\n" , __func__, err); |
4655 | |
4656 | return err; |
4657 | } |
4658 | |
4659 | static void bcm_spu_remove(struct platform_device *pdev) |
4660 | { |
4661 | int i; |
4662 | struct device *dev = &pdev->dev; |
4663 | char *cdn; |
4664 | |
4665 | for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { |
4666 | /* |
4667 | * Not all algorithms were registered, depending on whether |
4668 | * hardware is SPU or SPU2. So here we make sure to skip |
4669 | * those algorithms that were not previously registered. |
4670 | */ |
4671 | if (!driver_algs[i].registered) |
4672 | continue; |
4673 | |
4674 | switch (driver_algs[i].type) { |
4675 | case CRYPTO_ALG_TYPE_SKCIPHER: |
4676 | crypto_unregister_skcipher(alg: &driver_algs[i].alg.skcipher); |
4677 | dev_dbg(dev, " unregistered cipher %s\n" , |
4678 | driver_algs[i].alg.skcipher.base.cra_driver_name); |
4679 | driver_algs[i].registered = false; |
4680 | break; |
4681 | case CRYPTO_ALG_TYPE_AHASH: |
4682 | crypto_unregister_ahash(alg: &driver_algs[i].alg.hash); |
4683 | cdn = driver_algs[i].alg.hash.halg.base.cra_driver_name; |
4684 | dev_dbg(dev, " unregistered hash %s\n" , cdn); |
4685 | driver_algs[i].registered = false; |
4686 | break; |
4687 | case CRYPTO_ALG_TYPE_AEAD: |
4688 | crypto_unregister_aead(alg: &driver_algs[i].alg.aead); |
4689 | dev_dbg(dev, " unregistered aead %s\n" , |
4690 | driver_algs[i].alg.aead.base.cra_driver_name); |
4691 | driver_algs[i].registered = false; |
4692 | break; |
4693 | } |
4694 | } |
4695 | spu_free_debugfs(); |
4696 | spu_mb_release(pdev); |
4697 | } |
4698 | |
4699 | /* ===== Kernel Module API ===== */ |
4700 | |
4701 | static struct platform_driver bcm_spu_pdriver = { |
4702 | .driver = { |
4703 | .name = "brcm-spu-crypto" , |
4704 | .of_match_table = of_match_ptr(bcm_spu_dt_ids), |
4705 | }, |
4706 | .probe = bcm_spu_probe, |
4707 | .remove_new = bcm_spu_remove, |
4708 | }; |
4709 | module_platform_driver(bcm_spu_pdriver); |
4710 | |
4711 | MODULE_AUTHOR("Rob Rice <rob.rice@broadcom.com>" ); |
4712 | MODULE_DESCRIPTION("Broadcom symmetric crypto offload driver" ); |
4713 | MODULE_LICENSE("GPL v2" ); |
4714 | |