1// SPDX-License-Identifier: GPL-2.0
2/*
3 * K3 SA2UL crypto accelerator driver
4 *
5 * Copyright (C) 2018-2020 Texas Instruments Incorporated - http://www.ti.com
6 *
7 * Authors: Keerthy
8 * Vitaly Andrianov
9 * Tero Kristo
10 */
11#include <linux/bitfield.h>
12#include <linux/clk.h>
13#include <linux/dma-mapping.h>
14#include <linux/dmaengine.h>
15#include <linux/dmapool.h>
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/of.h>
19#include <linux/of_platform.h>
20#include <linux/platform_device.h>
21#include <linux/pm_runtime.h>
22
23#include <crypto/aes.h>
24#include <crypto/authenc.h>
25#include <crypto/des.h>
26#include <crypto/internal/aead.h>
27#include <crypto/internal/hash.h>
28#include <crypto/internal/skcipher.h>
29#include <crypto/scatterwalk.h>
30#include <crypto/sha1.h>
31#include <crypto/sha2.h>
32
33#include "sa2ul.h"
34
35/* Byte offset for key in encryption security context */
36#define SC_ENC_KEY_OFFSET (1 + 27 + 4)
37/* Byte offset for Aux-1 in encryption security context */
38#define SC_ENC_AUX1_OFFSET (1 + 27 + 4 + 32)
39
40#define SA_CMDL_UPD_ENC 0x0001
41#define SA_CMDL_UPD_AUTH 0x0002
42#define SA_CMDL_UPD_ENC_IV 0x0004
43#define SA_CMDL_UPD_AUTH_IV 0x0008
44#define SA_CMDL_UPD_AUX_KEY 0x0010
45
46#define SA_AUTH_SUBKEY_LEN 16
47#define SA_CMDL_PAYLOAD_LENGTH_MASK 0xFFFF
48#define SA_CMDL_SOP_BYPASS_LEN_MASK 0xFF000000
49
50#define MODE_CONTROL_BYTES 27
51#define SA_HASH_PROCESSING 0
52#define SA_CRYPTO_PROCESSING 0
53#define SA_UPLOAD_HASH_TO_TLR BIT(6)
54
55#define SA_SW0_FLAGS_MASK 0xF0000
56#define SA_SW0_CMDL_INFO_MASK 0x1F00000
57#define SA_SW0_CMDL_PRESENT BIT(4)
58#define SA_SW0_ENG_ID_MASK 0x3E000000
59#define SA_SW0_DEST_INFO_PRESENT BIT(30)
60#define SA_SW2_EGRESS_LENGTH 0xFF000000
61#define SA_BASIC_HASH 0x10
62
63#define SHA256_DIGEST_WORDS 8
64/* Make 32-bit word from 4 bytes */
65#define SA_MK_U32(b0, b1, b2, b3) (((b0) << 24) | ((b1) << 16) | \
66 ((b2) << 8) | (b3))
67
68/* size of SCCTL structure in bytes */
69#define SA_SCCTL_SZ 16
70
71/* Max Authentication tag size */
72#define SA_MAX_AUTH_TAG_SZ 64
73
74enum sa_algo_id {
75 SA_ALG_CBC_AES = 0,
76 SA_ALG_EBC_AES,
77 SA_ALG_CBC_DES3,
78 SA_ALG_ECB_DES3,
79 SA_ALG_SHA1,
80 SA_ALG_SHA256,
81 SA_ALG_SHA512,
82 SA_ALG_AUTHENC_SHA1_AES,
83 SA_ALG_AUTHENC_SHA256_AES,
84};
85
86struct sa_match_data {
87 u8 priv;
88 u8 priv_id;
89 u32 supported_algos;
90};
91
92static struct device *sa_k3_dev;
93
94/**
95 * struct sa_cmdl_cfg - Command label configuration descriptor
96 * @aalg: authentication algorithm ID
97 * @enc_eng_id: Encryption Engine ID supported by the SA hardware
98 * @auth_eng_id: Authentication Engine ID
99 * @iv_size: Initialization Vector size
100 * @akey: Authentication key
101 * @akey_len: Authentication key length
102 * @enc: True, if this is an encode request
103 */
104struct sa_cmdl_cfg {
105 int aalg;
106 u8 enc_eng_id;
107 u8 auth_eng_id;
108 u8 iv_size;
109 const u8 *akey;
110 u16 akey_len;
111 bool enc;
112};
113
114/**
115 * struct algo_data - Crypto algorithm specific data
116 * @enc_eng: Encryption engine info structure
117 * @auth_eng: Authentication engine info structure
118 * @auth_ctrl: Authentication control word
119 * @hash_size: Size of digest
120 * @iv_idx: iv index in psdata
121 * @iv_out_size: iv out size
122 * @ealg_id: Encryption Algorithm ID
123 * @aalg_id: Authentication algorithm ID
124 * @mci_enc: Mode Control Instruction for Encryption algorithm
125 * @mci_dec: Mode Control Instruction for Decryption
126 * @inv_key: Whether the encryption algorithm demands key inversion
127 * @ctx: Pointer to the algorithm context
128 * @keyed_mac: Whether the authentication algorithm has key
129 * @prep_iopad: Function pointer to generate intermediate ipad/opad
130 */
131struct algo_data {
132 struct sa_eng_info enc_eng;
133 struct sa_eng_info auth_eng;
134 u8 auth_ctrl;
135 u8 hash_size;
136 u8 iv_idx;
137 u8 iv_out_size;
138 u8 ealg_id;
139 u8 aalg_id;
140 u8 *mci_enc;
141 u8 *mci_dec;
142 bool inv_key;
143 struct sa_tfm_ctx *ctx;
144 bool keyed_mac;
145 void (*prep_iopad)(struct algo_data *algo, const u8 *key,
146 u16 key_sz, __be32 *ipad, __be32 *opad);
147};
148
149/**
150 * struct sa_alg_tmpl: A generic template encompassing crypto/aead algorithms
151 * @type: Type of the crypto algorithm.
152 * @alg: Union of crypto algorithm definitions.
153 * @registered: Flag indicating if the crypto algorithm is already registered
154 */
155struct sa_alg_tmpl {
156 u32 type; /* CRYPTO_ALG_TYPE from <linux/crypto.h> */
157 union {
158 struct skcipher_alg skcipher;
159 struct ahash_alg ahash;
160 struct aead_alg aead;
161 } alg;
162 bool registered;
163};
164
165/**
166 * struct sa_mapped_sg: scatterlist information for tx and rx
167 * @mapped: Set to true if the @sgt is mapped
168 * @dir: mapping direction used for @sgt
169 * @split_sg: Set if the sg is split and needs to be freed up
170 * @static_sg: Static scatterlist entry for overriding data
171 * @sgt: scatterlist table for DMA API use
172 */
173struct sa_mapped_sg {
174 bool mapped;
175 enum dma_data_direction dir;
176 struct scatterlist static_sg;
177 struct scatterlist *split_sg;
178 struct sg_table sgt;
179};
180/**
181 * struct sa_rx_data: RX Packet miscellaneous data place holder
182 * @req: crypto request data pointer
183 * @ddev: pointer to the DMA device
184 * @tx_in: dma_async_tx_descriptor pointer for rx channel
185 * @mapped_sg: Information on tx (0) and rx (1) scatterlist DMA mapping
186 * @enc: Flag indicating either encryption or decryption
187 * @enc_iv_size: Initialisation vector size
188 * @iv_idx: Initialisation vector index
189 */
190struct sa_rx_data {
191 void *req;
192 struct device *ddev;
193 struct dma_async_tx_descriptor *tx_in;
194 struct sa_mapped_sg mapped_sg[2];
195 u8 enc;
196 u8 enc_iv_size;
197 u8 iv_idx;
198};
199
200/**
201 * struct sa_req: SA request definition
202 * @dev: device for the request
203 * @size: total data to the xmitted via DMA
204 * @enc_offset: offset of cipher data
205 * @enc_size: data to be passed to cipher engine
206 * @enc_iv: cipher IV
207 * @auth_offset: offset of the authentication data
208 * @auth_size: size of the authentication data
209 * @auth_iv: authentication IV
210 * @type: algorithm type for the request
211 * @cmdl: command label pointer
212 * @base: pointer to the base request
213 * @ctx: pointer to the algorithm context data
214 * @enc: true if this is an encode request
215 * @src: source data
216 * @dst: destination data
217 * @callback: DMA callback for the request
218 * @mdata_size: metadata size passed to DMA
219 */
220struct sa_req {
221 struct device *dev;
222 u16 size;
223 u8 enc_offset;
224 u16 enc_size;
225 u8 *enc_iv;
226 u8 auth_offset;
227 u16 auth_size;
228 u8 *auth_iv;
229 u32 type;
230 u32 *cmdl;
231 struct crypto_async_request *base;
232 struct sa_tfm_ctx *ctx;
233 bool enc;
234 struct scatterlist *src;
235 struct scatterlist *dst;
236 dma_async_tx_callback callback;
237 u16 mdata_size;
238};
239
240/*
241 * Mode Control Instructions for various Key lengths 128, 192, 256
242 * For CBC (Cipher Block Chaining) mode for encryption
243 */
244static u8 mci_cbc_enc_array[3][MODE_CONTROL_BYTES] = {
245 { 0x61, 0x00, 0x00, 0x18, 0x88, 0x0a, 0xaa, 0x4b, 0x7e, 0x00,
246 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
247 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
248 { 0x61, 0x00, 0x00, 0x18, 0x88, 0x4a, 0xaa, 0x4b, 0x7e, 0x00,
249 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
250 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
251 { 0x61, 0x00, 0x00, 0x18, 0x88, 0x8a, 0xaa, 0x4b, 0x7e, 0x00,
252 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
253 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
254};
255
256/*
257 * Mode Control Instructions for various Key lengths 128, 192, 256
258 * For CBC (Cipher Block Chaining) mode for decryption
259 */
260static u8 mci_cbc_dec_array[3][MODE_CONTROL_BYTES] = {
261 { 0x71, 0x00, 0x00, 0x80, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
262 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
263 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
264 { 0x71, 0x00, 0x00, 0x84, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
265 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
266 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
267 { 0x71, 0x00, 0x00, 0x88, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
268 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
269 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
270};
271
272/*
273 * Mode Control Instructions for various Key lengths 128, 192, 256
274 * For CBC (Cipher Block Chaining) mode for encryption
275 */
276static u8 mci_cbc_enc_no_iv_array[3][MODE_CONTROL_BYTES] = {
277 { 0x21, 0x00, 0x00, 0x18, 0x88, 0x0a, 0xaa, 0x4b, 0x7e, 0x00,
278 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
279 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
280 { 0x21, 0x00, 0x00, 0x18, 0x88, 0x4a, 0xaa, 0x4b, 0x7e, 0x00,
281 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
282 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
283 { 0x21, 0x00, 0x00, 0x18, 0x88, 0x8a, 0xaa, 0x4b, 0x7e, 0x00,
284 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
285 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
286};
287
288/*
289 * Mode Control Instructions for various Key lengths 128, 192, 256
290 * For CBC (Cipher Block Chaining) mode for decryption
291 */
292static u8 mci_cbc_dec_no_iv_array[3][MODE_CONTROL_BYTES] = {
293 { 0x31, 0x00, 0x00, 0x80, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
294 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
295 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
296 { 0x31, 0x00, 0x00, 0x84, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
297 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
298 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
299 { 0x31, 0x00, 0x00, 0x88, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
300 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
301 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
302};
303
304/*
305 * Mode Control Instructions for various Key lengths 128, 192, 256
306 * For ECB (Electronic Code Book) mode for encryption
307 */
308static u8 mci_ecb_enc_array[3][27] = {
309 { 0x21, 0x00, 0x00, 0x80, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
310 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
311 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
312 { 0x21, 0x00, 0x00, 0x84, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
313 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
314 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
315 { 0x21, 0x00, 0x00, 0x88, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
316 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
317 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
318};
319
320/*
321 * Mode Control Instructions for various Key lengths 128, 192, 256
322 * For ECB (Electronic Code Book) mode for decryption
323 */
324static u8 mci_ecb_dec_array[3][27] = {
325 { 0x31, 0x00, 0x00, 0x80, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
326 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
327 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
328 { 0x31, 0x00, 0x00, 0x84, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
329 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
330 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
331 { 0x31, 0x00, 0x00, 0x88, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
332 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
333 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
334};
335
336/*
337 * Mode Control Instructions for DES algorithm
338 * For CBC (Cipher Block Chaining) mode and ECB mode
339 * encryption and for decryption respectively
340 */
341static u8 mci_cbc_3des_enc_array[MODE_CONTROL_BYTES] = {
342 0x60, 0x00, 0x00, 0x18, 0x88, 0x52, 0xaa, 0x4b, 0x7e, 0x00, 0x00, 0x00,
343 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
344 0x00, 0x00, 0x00,
345};
346
347static u8 mci_cbc_3des_dec_array[MODE_CONTROL_BYTES] = {
348 0x70, 0x00, 0x00, 0x85, 0x0a, 0xca, 0x98, 0xf4, 0x40, 0xc0, 0x00, 0x00,
349 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
350 0x00, 0x00, 0x00,
351};
352
353static u8 mci_ecb_3des_enc_array[MODE_CONTROL_BYTES] = {
354 0x20, 0x00, 0x00, 0x85, 0x0a, 0x04, 0xb7, 0x90, 0x00, 0x00, 0x00, 0x00,
355 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
356 0x00, 0x00, 0x00,
357};
358
359static u8 mci_ecb_3des_dec_array[MODE_CONTROL_BYTES] = {
360 0x30, 0x00, 0x00, 0x85, 0x0a, 0x04, 0xb7, 0x90, 0x00, 0x00, 0x00, 0x00,
361 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
362 0x00, 0x00, 0x00,
363};
364
365/*
366 * Perform 16 byte or 128 bit swizzling
367 * The SA2UL Expects the security context to
368 * be in little Endian and the bus width is 128 bits or 16 bytes
369 * Hence swap 16 bytes at a time from higher to lower address
370 */
371static void sa_swiz_128(u8 *in, u16 len)
372{
373 u8 data[16];
374 int i, j;
375
376 for (i = 0; i < len; i += 16) {
377 memcpy(data, &in[i], 16);
378 for (j = 0; j < 16; j++)
379 in[i + j] = data[15 - j];
380 }
381}
382
383/* Prepare the ipad and opad from key as per SHA algorithm step 1*/
384static void prepare_kipad(u8 *k_ipad, const u8 *key, u16 key_sz)
385{
386 int i;
387
388 for (i = 0; i < key_sz; i++)
389 k_ipad[i] = key[i] ^ 0x36;
390
391 /* Instead of XOR with 0 */
392 for (; i < SHA1_BLOCK_SIZE; i++)
393 k_ipad[i] = 0x36;
394}
395
396static void prepare_kopad(u8 *k_opad, const u8 *key, u16 key_sz)
397{
398 int i;
399
400 for (i = 0; i < key_sz; i++)
401 k_opad[i] = key[i] ^ 0x5c;
402
403 /* Instead of XOR with 0 */
404 for (; i < SHA1_BLOCK_SIZE; i++)
405 k_opad[i] = 0x5c;
406}
407
408static void sa_export_shash(void *state, struct shash_desc *hash,
409 int digest_size, __be32 *out)
410{
411 struct sha1_state *sha1;
412 struct sha256_state *sha256;
413 u32 *result;
414
415 switch (digest_size) {
416 case SHA1_DIGEST_SIZE:
417 sha1 = state;
418 result = sha1->state;
419 break;
420 case SHA256_DIGEST_SIZE:
421 sha256 = state;
422 result = sha256->state;
423 break;
424 default:
425 dev_err(sa_k3_dev, "%s: bad digest_size=%d\n", __func__,
426 digest_size);
427 return;
428 }
429
430 crypto_shash_export(desc: hash, out: state);
431
432 cpu_to_be32_array(dst: out, src: result, len: digest_size / 4);
433}
434
435static void sa_prepare_iopads(struct algo_data *data, const u8 *key,
436 u16 key_sz, __be32 *ipad, __be32 *opad)
437{
438 SHASH_DESC_ON_STACK(shash, data->ctx->shash);
439 int block_size = crypto_shash_blocksize(tfm: data->ctx->shash);
440 int digest_size = crypto_shash_digestsize(tfm: data->ctx->shash);
441 union {
442 struct sha1_state sha1;
443 struct sha256_state sha256;
444 u8 k_pad[SHA1_BLOCK_SIZE];
445 } sha;
446
447 shash->tfm = data->ctx->shash;
448
449 prepare_kipad(k_ipad: sha.k_pad, key, key_sz);
450
451 crypto_shash_init(desc: shash);
452 crypto_shash_update(desc: shash, data: sha.k_pad, len: block_size);
453 sa_export_shash(state: &sha, hash: shash, digest_size, out: ipad);
454
455 prepare_kopad(k_opad: sha.k_pad, key, key_sz);
456
457 crypto_shash_init(desc: shash);
458 crypto_shash_update(desc: shash, data: sha.k_pad, len: block_size);
459
460 sa_export_shash(state: &sha, hash: shash, digest_size, out: opad);
461
462 memzero_explicit(s: &sha, count: sizeof(sha));
463}
464
465/* Derive the inverse key used in AES-CBC decryption operation */
466static inline int sa_aes_inv_key(u8 *inv_key, const u8 *key, u16 key_sz)
467{
468 struct crypto_aes_ctx ctx;
469 int key_pos;
470
471 if (aes_expandkey(ctx: &ctx, in_key: key, key_len: key_sz)) {
472 dev_err(sa_k3_dev, "%s: bad key len(%d)\n", __func__, key_sz);
473 return -EINVAL;
474 }
475
476 /* work around to get the right inverse for AES_KEYSIZE_192 size keys */
477 if (key_sz == AES_KEYSIZE_192) {
478 ctx.key_enc[52] = ctx.key_enc[51] ^ ctx.key_enc[46];
479 ctx.key_enc[53] = ctx.key_enc[52] ^ ctx.key_enc[47];
480 }
481
482 /* Based crypto_aes_expand_key logic */
483 switch (key_sz) {
484 case AES_KEYSIZE_128:
485 case AES_KEYSIZE_192:
486 key_pos = key_sz + 24;
487 break;
488
489 case AES_KEYSIZE_256:
490 key_pos = key_sz + 24 - 4;
491 break;
492
493 default:
494 dev_err(sa_k3_dev, "%s: bad key len(%d)\n", __func__, key_sz);
495 return -EINVAL;
496 }
497
498 memcpy(inv_key, &ctx.key_enc[key_pos], key_sz);
499 return 0;
500}
501
502/* Set Security context for the encryption engine */
503static int sa_set_sc_enc(struct algo_data *ad, const u8 *key, u16 key_sz,
504 u8 enc, u8 *sc_buf)
505{
506 const u8 *mci = NULL;
507
508 /* Set Encryption mode selector to crypto processing */
509 sc_buf[0] = SA_CRYPTO_PROCESSING;
510
511 if (enc)
512 mci = ad->mci_enc;
513 else
514 mci = ad->mci_dec;
515 /* Set the mode control instructions in security context */
516 if (mci)
517 memcpy(&sc_buf[1], mci, MODE_CONTROL_BYTES);
518
519 /* For AES-CBC decryption get the inverse key */
520 if (ad->inv_key && !enc) {
521 if (sa_aes_inv_key(inv_key: &sc_buf[SC_ENC_KEY_OFFSET], key, key_sz))
522 return -EINVAL;
523 /* For all other cases: key is used */
524 } else {
525 memcpy(&sc_buf[SC_ENC_KEY_OFFSET], key, key_sz);
526 }
527
528 return 0;
529}
530
531/* Set Security context for the authentication engine */
532static void sa_set_sc_auth(struct algo_data *ad, const u8 *key, u16 key_sz,
533 u8 *sc_buf)
534{
535 __be32 *ipad = (void *)(sc_buf + 32);
536 __be32 *opad = (void *)(sc_buf + 64);
537
538 /* Set Authentication mode selector to hash processing */
539 sc_buf[0] = SA_HASH_PROCESSING;
540 /* Auth SW ctrl word: bit[6]=1 (upload computed hash to TLR section) */
541 sc_buf[1] = SA_UPLOAD_HASH_TO_TLR;
542 sc_buf[1] |= ad->auth_ctrl;
543
544 /* Copy the keys or ipad/opad */
545 if (ad->keyed_mac)
546 ad->prep_iopad(ad, key, key_sz, ipad, opad);
547 else {
548 /* basic hash */
549 sc_buf[1] |= SA_BASIC_HASH;
550 }
551}
552
553static inline void sa_copy_iv(__be32 *out, const u8 *iv, bool size16)
554{
555 int j;
556
557 for (j = 0; j < ((size16) ? 4 : 2); j++) {
558 *out = cpu_to_be32(*((u32 *)iv));
559 iv += 4;
560 out++;
561 }
562}
563
564/* Format general command label */
565static int sa_format_cmdl_gen(struct sa_cmdl_cfg *cfg, u8 *cmdl,
566 struct sa_cmdl_upd_info *upd_info)
567{
568 u8 enc_offset = 0, auth_offset = 0, total = 0;
569 u8 enc_next_eng = SA_ENG_ID_OUTPORT2;
570 u8 auth_next_eng = SA_ENG_ID_OUTPORT2;
571 u32 *word_ptr = (u32 *)cmdl;
572 int i;
573
574 /* Clear the command label */
575 memzero_explicit(s: cmdl, count: (SA_MAX_CMDL_WORDS * sizeof(u32)));
576
577 /* Initialize the command update structure */
578 memzero_explicit(s: upd_info, count: sizeof(*upd_info));
579
580 if (cfg->enc_eng_id && cfg->auth_eng_id) {
581 if (cfg->enc) {
582 auth_offset = SA_CMDL_HEADER_SIZE_BYTES;
583 enc_next_eng = cfg->auth_eng_id;
584
585 if (cfg->iv_size)
586 auth_offset += cfg->iv_size;
587 } else {
588 enc_offset = SA_CMDL_HEADER_SIZE_BYTES;
589 auth_next_eng = cfg->enc_eng_id;
590 }
591 }
592
593 if (cfg->enc_eng_id) {
594 upd_info->flags |= SA_CMDL_UPD_ENC;
595 upd_info->enc_size.index = enc_offset >> 2;
596 upd_info->enc_offset.index = upd_info->enc_size.index + 1;
597 /* Encryption command label */
598 cmdl[enc_offset + SA_CMDL_OFFSET_NESC] = enc_next_eng;
599
600 /* Encryption modes requiring IV */
601 if (cfg->iv_size) {
602 upd_info->flags |= SA_CMDL_UPD_ENC_IV;
603 upd_info->enc_iv.index =
604 (enc_offset + SA_CMDL_HEADER_SIZE_BYTES) >> 2;
605 upd_info->enc_iv.size = cfg->iv_size;
606
607 cmdl[enc_offset + SA_CMDL_OFFSET_LABEL_LEN] =
608 SA_CMDL_HEADER_SIZE_BYTES + cfg->iv_size;
609
610 cmdl[enc_offset + SA_CMDL_OFFSET_OPTION_CTRL1] =
611 (SA_CTX_ENC_AUX2_OFFSET | (cfg->iv_size >> 3));
612 total += SA_CMDL_HEADER_SIZE_BYTES + cfg->iv_size;
613 } else {
614 cmdl[enc_offset + SA_CMDL_OFFSET_LABEL_LEN] =
615 SA_CMDL_HEADER_SIZE_BYTES;
616 total += SA_CMDL_HEADER_SIZE_BYTES;
617 }
618 }
619
620 if (cfg->auth_eng_id) {
621 upd_info->flags |= SA_CMDL_UPD_AUTH;
622 upd_info->auth_size.index = auth_offset >> 2;
623 upd_info->auth_offset.index = upd_info->auth_size.index + 1;
624 cmdl[auth_offset + SA_CMDL_OFFSET_NESC] = auth_next_eng;
625 cmdl[auth_offset + SA_CMDL_OFFSET_LABEL_LEN] =
626 SA_CMDL_HEADER_SIZE_BYTES;
627 total += SA_CMDL_HEADER_SIZE_BYTES;
628 }
629
630 total = roundup(total, 8);
631
632 for (i = 0; i < total / 4; i++)
633 word_ptr[i] = swab32(word_ptr[i]);
634
635 return total;
636}
637
638/* Update Command label */
639static inline void sa_update_cmdl(struct sa_req *req, u32 *cmdl,
640 struct sa_cmdl_upd_info *upd_info)
641{
642 int i = 0, j;
643
644 if (likely(upd_info->flags & SA_CMDL_UPD_ENC)) {
645 cmdl[upd_info->enc_size.index] &= ~SA_CMDL_PAYLOAD_LENGTH_MASK;
646 cmdl[upd_info->enc_size.index] |= req->enc_size;
647 cmdl[upd_info->enc_offset.index] &=
648 ~SA_CMDL_SOP_BYPASS_LEN_MASK;
649 cmdl[upd_info->enc_offset.index] |=
650 FIELD_PREP(SA_CMDL_SOP_BYPASS_LEN_MASK,
651 req->enc_offset);
652
653 if (likely(upd_info->flags & SA_CMDL_UPD_ENC_IV)) {
654 __be32 *data = (__be32 *)&cmdl[upd_info->enc_iv.index];
655 u32 *enc_iv = (u32 *)req->enc_iv;
656
657 for (j = 0; i < upd_info->enc_iv.size; i += 4, j++) {
658 data[j] = cpu_to_be32(*enc_iv);
659 enc_iv++;
660 }
661 }
662 }
663
664 if (likely(upd_info->flags & SA_CMDL_UPD_AUTH)) {
665 cmdl[upd_info->auth_size.index] &= ~SA_CMDL_PAYLOAD_LENGTH_MASK;
666 cmdl[upd_info->auth_size.index] |= req->auth_size;
667 cmdl[upd_info->auth_offset.index] &=
668 ~SA_CMDL_SOP_BYPASS_LEN_MASK;
669 cmdl[upd_info->auth_offset.index] |=
670 FIELD_PREP(SA_CMDL_SOP_BYPASS_LEN_MASK,
671 req->auth_offset);
672 if (upd_info->flags & SA_CMDL_UPD_AUTH_IV) {
673 sa_copy_iv(out: (void *)&cmdl[upd_info->auth_iv.index],
674 iv: req->auth_iv,
675 size16: (upd_info->auth_iv.size > 8));
676 }
677 if (upd_info->flags & SA_CMDL_UPD_AUX_KEY) {
678 int offset = (req->auth_size & 0xF) ? 4 : 0;
679
680 memcpy(&cmdl[upd_info->aux_key_info.index],
681 &upd_info->aux_key[offset], 16);
682 }
683 }
684}
685
686/* Format SWINFO words to be sent to SA */
687static
688void sa_set_swinfo(u8 eng_id, u16 sc_id, dma_addr_t sc_phys,
689 u8 cmdl_present, u8 cmdl_offset, u8 flags,
690 u8 hash_size, u32 *swinfo)
691{
692 swinfo[0] = sc_id;
693 swinfo[0] |= FIELD_PREP(SA_SW0_FLAGS_MASK, flags);
694 if (likely(cmdl_present))
695 swinfo[0] |= FIELD_PREP(SA_SW0_CMDL_INFO_MASK,
696 cmdl_offset | SA_SW0_CMDL_PRESENT);
697 swinfo[0] |= FIELD_PREP(SA_SW0_ENG_ID_MASK, eng_id);
698
699 swinfo[0] |= SA_SW0_DEST_INFO_PRESENT;
700 swinfo[1] = (u32)(sc_phys & 0xFFFFFFFFULL);
701 swinfo[2] = (u32)((sc_phys & 0xFFFFFFFF00000000ULL) >> 32);
702 swinfo[2] |= FIELD_PREP(SA_SW2_EGRESS_LENGTH, hash_size);
703}
704
705/* Dump the security context */
706static void sa_dump_sc(u8 *buf, dma_addr_t dma_addr)
707{
708#ifdef DEBUG
709 dev_info(sa_k3_dev, "Security context dump:: 0x%pad\n", &dma_addr);
710 print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET,
711 16, 1, buf, SA_CTX_MAX_SZ, false);
712#endif
713}
714
715static
716int sa_init_sc(struct sa_ctx_info *ctx, const struct sa_match_data *match_data,
717 const u8 *enc_key, u16 enc_key_sz,
718 const u8 *auth_key, u16 auth_key_sz,
719 struct algo_data *ad, u8 enc, u32 *swinfo)
720{
721 int enc_sc_offset = 0;
722 int auth_sc_offset = 0;
723 u8 *sc_buf = ctx->sc;
724 u16 sc_id = ctx->sc_id;
725 u8 first_engine = 0;
726
727 memzero_explicit(s: sc_buf, SA_CTX_MAX_SZ);
728
729 if (ad->auth_eng.eng_id) {
730 if (enc)
731 first_engine = ad->enc_eng.eng_id;
732 else
733 first_engine = ad->auth_eng.eng_id;
734
735 enc_sc_offset = SA_CTX_PHP_PE_CTX_SZ;
736 auth_sc_offset = enc_sc_offset + ad->enc_eng.sc_size;
737 sc_buf[1] = SA_SCCTL_FE_AUTH_ENC;
738 if (!ad->hash_size)
739 return -EINVAL;
740 ad->hash_size = roundup(ad->hash_size, 8);
741
742 } else if (ad->enc_eng.eng_id && !ad->auth_eng.eng_id) {
743 enc_sc_offset = SA_CTX_PHP_PE_CTX_SZ;
744 first_engine = ad->enc_eng.eng_id;
745 sc_buf[1] = SA_SCCTL_FE_ENC;
746 ad->hash_size = ad->iv_out_size;
747 }
748
749 /* SCCTL Owner info: 0=host, 1=CP_ACE */
750 sc_buf[SA_CTX_SCCTL_OWNER_OFFSET] = 0;
751 memcpy(&sc_buf[2], &sc_id, 2);
752 sc_buf[4] = 0x0;
753 sc_buf[5] = match_data->priv_id;
754 sc_buf[6] = match_data->priv;
755 sc_buf[7] = 0x0;
756
757 /* Prepare context for encryption engine */
758 if (ad->enc_eng.sc_size) {
759 if (sa_set_sc_enc(ad, key: enc_key, key_sz: enc_key_sz, enc,
760 sc_buf: &sc_buf[enc_sc_offset]))
761 return -EINVAL;
762 }
763
764 /* Prepare context for authentication engine */
765 if (ad->auth_eng.sc_size)
766 sa_set_sc_auth(ad, key: auth_key, key_sz: auth_key_sz,
767 sc_buf: &sc_buf[auth_sc_offset]);
768
769 /* Set the ownership of context to CP_ACE */
770 sc_buf[SA_CTX_SCCTL_OWNER_OFFSET] = 0x80;
771
772 /* swizzle the security context */
773 sa_swiz_128(in: sc_buf, SA_CTX_MAX_SZ);
774
775 sa_set_swinfo(eng_id: first_engine, sc_id: ctx->sc_id, sc_phys: ctx->sc_phys, cmdl_present: 1, cmdl_offset: 0,
776 SA_SW_INFO_FLAG_EVICT, hash_size: ad->hash_size, swinfo);
777
778 sa_dump_sc(buf: sc_buf, dma_addr: ctx->sc_phys);
779
780 return 0;
781}
782
783/* Free the per direction context memory */
784static void sa_free_ctx_info(struct sa_ctx_info *ctx,
785 struct sa_crypto_data *data)
786{
787 unsigned long bn;
788
789 bn = ctx->sc_id - data->sc_id_start;
790 spin_lock(lock: &data->scid_lock);
791 __clear_bit(bn, data->ctx_bm);
792 data->sc_id--;
793 spin_unlock(lock: &data->scid_lock);
794
795 if (ctx->sc) {
796 dma_pool_free(pool: data->sc_pool, vaddr: ctx->sc, addr: ctx->sc_phys);
797 ctx->sc = NULL;
798 }
799}
800
801static int sa_init_ctx_info(struct sa_ctx_info *ctx,
802 struct sa_crypto_data *data)
803{
804 unsigned long bn;
805 int err;
806
807 spin_lock(lock: &data->scid_lock);
808 bn = find_first_zero_bit(addr: data->ctx_bm, SA_MAX_NUM_CTX);
809 __set_bit(bn, data->ctx_bm);
810 data->sc_id++;
811 spin_unlock(lock: &data->scid_lock);
812
813 ctx->sc_id = (u16)(data->sc_id_start + bn);
814
815 ctx->sc = dma_pool_alloc(pool: data->sc_pool, GFP_KERNEL, handle: &ctx->sc_phys);
816 if (!ctx->sc) {
817 dev_err(&data->pdev->dev, "Failed to allocate SC memory\n");
818 err = -ENOMEM;
819 goto scid_rollback;
820 }
821
822 return 0;
823
824scid_rollback:
825 spin_lock(lock: &data->scid_lock);
826 __clear_bit(bn, data->ctx_bm);
827 data->sc_id--;
828 spin_unlock(lock: &data->scid_lock);
829
830 return err;
831}
832
833static void sa_cipher_cra_exit(struct crypto_skcipher *tfm)
834{
835 struct sa_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
836 struct sa_crypto_data *data = dev_get_drvdata(dev: sa_k3_dev);
837
838 dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
839 __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
840 ctx->dec.sc_id, &ctx->dec.sc_phys);
841
842 sa_free_ctx_info(ctx: &ctx->enc, data);
843 sa_free_ctx_info(ctx: &ctx->dec, data);
844
845 crypto_free_skcipher(tfm: ctx->fallback.skcipher);
846}
847
848static int sa_cipher_cra_init(struct crypto_skcipher *tfm)
849{
850 struct sa_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
851 struct sa_crypto_data *data = dev_get_drvdata(dev: sa_k3_dev);
852 const char *name = crypto_tfm_alg_name(tfm: &tfm->base);
853 struct crypto_skcipher *child;
854 int ret;
855
856 memzero_explicit(s: ctx, count: sizeof(*ctx));
857 ctx->dev_data = data;
858
859 ret = sa_init_ctx_info(ctx: &ctx->enc, data);
860 if (ret)
861 return ret;
862 ret = sa_init_ctx_info(ctx: &ctx->dec, data);
863 if (ret) {
864 sa_free_ctx_info(ctx: &ctx->enc, data);
865 return ret;
866 }
867
868 child = crypto_alloc_skcipher(alg_name: name, type: 0, CRYPTO_ALG_NEED_FALLBACK);
869
870 if (IS_ERR(ptr: child)) {
871 dev_err(sa_k3_dev, "Error allocating fallback algo %s\n", name);
872 return PTR_ERR(ptr: child);
873 }
874
875 ctx->fallback.skcipher = child;
876 crypto_skcipher_set_reqsize(skcipher: tfm, reqsize: crypto_skcipher_reqsize(tfm: child) +
877 sizeof(struct skcipher_request));
878
879 dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
880 __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
881 ctx->dec.sc_id, &ctx->dec.sc_phys);
882 return 0;
883}
884
885static int sa_cipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
886 unsigned int keylen, struct algo_data *ad)
887{
888 struct sa_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
889 struct crypto_skcipher *child = ctx->fallback.skcipher;
890 int cmdl_len;
891 struct sa_cmdl_cfg cfg;
892 int ret;
893
894 if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
895 keylen != AES_KEYSIZE_256)
896 return -EINVAL;
897
898 ad->enc_eng.eng_id = SA_ENG_ID_EM1;
899 ad->enc_eng.sc_size = SA_CTX_ENC_TYPE1_SZ;
900
901 memzero_explicit(s: &cfg, count: sizeof(cfg));
902 cfg.enc_eng_id = ad->enc_eng.eng_id;
903 cfg.iv_size = crypto_skcipher_ivsize(tfm);
904
905 crypto_skcipher_clear_flags(tfm: child, CRYPTO_TFM_REQ_MASK);
906 crypto_skcipher_set_flags(tfm: child, flags: tfm->base.crt_flags &
907 CRYPTO_TFM_REQ_MASK);
908 ret = crypto_skcipher_setkey(tfm: child, key, keylen);
909 if (ret)
910 return ret;
911
912 /* Setup Encryption Security Context & Command label template */
913 if (sa_init_sc(ctx: &ctx->enc, match_data: ctx->dev_data->match_data, enc_key: key, enc_key_sz: keylen, NULL, auth_key_sz: 0,
914 ad, enc: 1, swinfo: &ctx->enc.epib[1]))
915 goto badkey;
916
917 cmdl_len = sa_format_cmdl_gen(cfg: &cfg,
918 cmdl: (u8 *)ctx->enc.cmdl,
919 upd_info: &ctx->enc.cmdl_upd_info);
920 if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
921 goto badkey;
922
923 ctx->enc.cmdl_size = cmdl_len;
924
925 /* Setup Decryption Security Context & Command label template */
926 if (sa_init_sc(ctx: &ctx->dec, match_data: ctx->dev_data->match_data, enc_key: key, enc_key_sz: keylen, NULL, auth_key_sz: 0,
927 ad, enc: 0, swinfo: &ctx->dec.epib[1]))
928 goto badkey;
929
930 cfg.enc_eng_id = ad->enc_eng.eng_id;
931 cmdl_len = sa_format_cmdl_gen(cfg: &cfg, cmdl: (u8 *)ctx->dec.cmdl,
932 upd_info: &ctx->dec.cmdl_upd_info);
933
934 if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
935 goto badkey;
936
937 ctx->dec.cmdl_size = cmdl_len;
938 ctx->iv_idx = ad->iv_idx;
939
940 return 0;
941
942badkey:
943 dev_err(sa_k3_dev, "%s: badkey\n", __func__);
944 return -EINVAL;
945}
946
947static int sa_aes_cbc_setkey(struct crypto_skcipher *tfm, const u8 *key,
948 unsigned int keylen)
949{
950 struct algo_data ad = { 0 };
951 /* Convert the key size (16/24/32) to the key size index (0/1/2) */
952 int key_idx = (keylen >> 3) - 2;
953
954 if (key_idx >= 3)
955 return -EINVAL;
956
957 ad.mci_enc = mci_cbc_enc_array[key_idx];
958 ad.mci_dec = mci_cbc_dec_array[key_idx];
959 ad.inv_key = true;
960 ad.ealg_id = SA_EALG_ID_AES_CBC;
961 ad.iv_idx = 4;
962 ad.iv_out_size = 16;
963
964 return sa_cipher_setkey(tfm, key, keylen, ad: &ad);
965}
966
967static int sa_aes_ecb_setkey(struct crypto_skcipher *tfm, const u8 *key,
968 unsigned int keylen)
969{
970 struct algo_data ad = { 0 };
971 /* Convert the key size (16/24/32) to the key size index (0/1/2) */
972 int key_idx = (keylen >> 3) - 2;
973
974 if (key_idx >= 3)
975 return -EINVAL;
976
977 ad.mci_enc = mci_ecb_enc_array[key_idx];
978 ad.mci_dec = mci_ecb_dec_array[key_idx];
979 ad.inv_key = true;
980 ad.ealg_id = SA_EALG_ID_AES_ECB;
981
982 return sa_cipher_setkey(tfm, key, keylen, ad: &ad);
983}
984
985static int sa_3des_cbc_setkey(struct crypto_skcipher *tfm, const u8 *key,
986 unsigned int keylen)
987{
988 struct algo_data ad = { 0 };
989
990 ad.mci_enc = mci_cbc_3des_enc_array;
991 ad.mci_dec = mci_cbc_3des_dec_array;
992 ad.ealg_id = SA_EALG_ID_3DES_CBC;
993 ad.iv_idx = 6;
994 ad.iv_out_size = 8;
995
996 return sa_cipher_setkey(tfm, key, keylen, ad: &ad);
997}
998
999static int sa_3des_ecb_setkey(struct crypto_skcipher *tfm, const u8 *key,
1000 unsigned int keylen)
1001{
1002 struct algo_data ad = { 0 };
1003
1004 ad.mci_enc = mci_ecb_3des_enc_array;
1005 ad.mci_dec = mci_ecb_3des_dec_array;
1006
1007 return sa_cipher_setkey(tfm, key, keylen, ad: &ad);
1008}
1009
1010static void sa_sync_from_device(struct sa_rx_data *rxd)
1011{
1012 struct sg_table *sgt;
1013
1014 if (rxd->mapped_sg[0].dir == DMA_BIDIRECTIONAL)
1015 sgt = &rxd->mapped_sg[0].sgt;
1016 else
1017 sgt = &rxd->mapped_sg[1].sgt;
1018
1019 dma_sync_sgtable_for_cpu(dev: rxd->ddev, sgt, dir: DMA_FROM_DEVICE);
1020}
1021
1022static void sa_free_sa_rx_data(struct sa_rx_data *rxd)
1023{
1024 int i;
1025
1026 for (i = 0; i < ARRAY_SIZE(rxd->mapped_sg); i++) {
1027 struct sa_mapped_sg *mapped_sg = &rxd->mapped_sg[i];
1028
1029 if (mapped_sg->mapped) {
1030 dma_unmap_sgtable(dev: rxd->ddev, sgt: &mapped_sg->sgt,
1031 dir: mapped_sg->dir, attrs: 0);
1032 kfree(objp: mapped_sg->split_sg);
1033 }
1034 }
1035
1036 kfree(objp: rxd);
1037}
1038
1039static void sa_aes_dma_in_callback(void *data)
1040{
1041 struct sa_rx_data *rxd = data;
1042 struct skcipher_request *req;
1043 u32 *result;
1044 __be32 *mdptr;
1045 size_t ml, pl;
1046 int i;
1047
1048 sa_sync_from_device(rxd);
1049 req = container_of(rxd->req, struct skcipher_request, base);
1050
1051 if (req->iv) {
1052 mdptr = (__be32 *)dmaengine_desc_get_metadata_ptr(desc: rxd->tx_in, payload_len: &pl,
1053 max_len: &ml);
1054 result = (u32 *)req->iv;
1055
1056 for (i = 0; i < (rxd->enc_iv_size / 4); i++)
1057 result[i] = be32_to_cpu(mdptr[i + rxd->iv_idx]);
1058 }
1059
1060 sa_free_sa_rx_data(rxd);
1061
1062 skcipher_request_complete(req, err: 0);
1063}
1064
1065static void
1066sa_prepare_tx_desc(u32 *mdptr, u32 pslen, u32 *psdata, u32 epiblen, u32 *epib)
1067{
1068 u32 *out, *in;
1069 int i;
1070
1071 for (out = mdptr, in = epib, i = 0; i < epiblen / sizeof(u32); i++)
1072 *out++ = *in++;
1073
1074 mdptr[4] = (0xFFFF << 16);
1075 for (out = &mdptr[5], in = psdata, i = 0;
1076 i < pslen / sizeof(u32); i++)
1077 *out++ = *in++;
1078}
1079
1080static int sa_run(struct sa_req *req)
1081{
1082 struct sa_rx_data *rxd;
1083 gfp_t gfp_flags;
1084 u32 cmdl[SA_MAX_CMDL_WORDS];
1085 struct sa_crypto_data *pdata = dev_get_drvdata(dev: sa_k3_dev);
1086 struct device *ddev;
1087 struct dma_chan *dma_rx;
1088 int sg_nents, src_nents, dst_nents;
1089 struct scatterlist *src, *dst;
1090 size_t pl, ml, split_size;
1091 struct sa_ctx_info *sa_ctx = req->enc ? &req->ctx->enc : &req->ctx->dec;
1092 int ret;
1093 struct dma_async_tx_descriptor *tx_out;
1094 u32 *mdptr;
1095 bool diff_dst;
1096 enum dma_data_direction dir_src;
1097 struct sa_mapped_sg *mapped_sg;
1098
1099 gfp_flags = req->base->flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
1100 GFP_KERNEL : GFP_ATOMIC;
1101
1102 rxd = kzalloc(sizeof(*rxd), gfp_flags);
1103 if (!rxd)
1104 return -ENOMEM;
1105
1106 if (req->src != req->dst) {
1107 diff_dst = true;
1108 dir_src = DMA_TO_DEVICE;
1109 } else {
1110 diff_dst = false;
1111 dir_src = DMA_BIDIRECTIONAL;
1112 }
1113
1114 /*
1115 * SA2UL has an interesting feature where the receive DMA channel
1116 * is selected based on the data passed to the engine. Within the
1117 * transition range, there is also a space where it is impossible
1118 * to determine where the data will end up, and this should be
1119 * avoided. This will be handled by the SW fallback mechanism by
1120 * the individual algorithm implementations.
1121 */
1122 if (req->size >= 256)
1123 dma_rx = pdata->dma_rx2;
1124 else
1125 dma_rx = pdata->dma_rx1;
1126
1127 ddev = dmaengine_get_dma_device(chan: pdata->dma_tx);
1128 rxd->ddev = ddev;
1129
1130 memcpy(cmdl, sa_ctx->cmdl, sa_ctx->cmdl_size);
1131
1132 sa_update_cmdl(req, cmdl, upd_info: &sa_ctx->cmdl_upd_info);
1133
1134 if (req->type != CRYPTO_ALG_TYPE_AHASH) {
1135 if (req->enc)
1136 req->type |=
1137 (SA_REQ_SUBTYPE_ENC << SA_REQ_SUBTYPE_SHIFT);
1138 else
1139 req->type |=
1140 (SA_REQ_SUBTYPE_DEC << SA_REQ_SUBTYPE_SHIFT);
1141 }
1142
1143 cmdl[sa_ctx->cmdl_size / sizeof(u32)] = req->type;
1144
1145 /*
1146 * Map the packets, first we check if the data fits into a single
1147 * sg entry and use that if possible. If it does not fit, we check
1148 * if we need to do sg_split to align the scatterlist data on the
1149 * actual data size being processed by the crypto engine.
1150 */
1151 src = req->src;
1152 sg_nents = sg_nents_for_len(sg: src, len: req->size);
1153
1154 split_size = req->size;
1155
1156 mapped_sg = &rxd->mapped_sg[0];
1157 if (sg_nents == 1 && split_size <= req->src->length) {
1158 src = &mapped_sg->static_sg;
1159 src_nents = 1;
1160 sg_init_table(src, 1);
1161 sg_set_page(sg: src, page: sg_page(sg: req->src), len: split_size,
1162 offset: req->src->offset);
1163
1164 mapped_sg->sgt.sgl = src;
1165 mapped_sg->sgt.orig_nents = src_nents;
1166 ret = dma_map_sgtable(dev: ddev, sgt: &mapped_sg->sgt, dir: dir_src, attrs: 0);
1167 if (ret) {
1168 kfree(objp: rxd);
1169 return ret;
1170 }
1171
1172 mapped_sg->dir = dir_src;
1173 mapped_sg->mapped = true;
1174 } else {
1175 mapped_sg->sgt.sgl = req->src;
1176 mapped_sg->sgt.orig_nents = sg_nents;
1177 ret = dma_map_sgtable(dev: ddev, sgt: &mapped_sg->sgt, dir: dir_src, attrs: 0);
1178 if (ret) {
1179 kfree(objp: rxd);
1180 return ret;
1181 }
1182
1183 mapped_sg->dir = dir_src;
1184 mapped_sg->mapped = true;
1185
1186 ret = sg_split(in: mapped_sg->sgt.sgl, in_mapped_nents: mapped_sg->sgt.nents, skip: 0, nb_splits: 1,
1187 split_sizes: &split_size, out: &src, out_mapped_nents: &src_nents, gfp_mask: gfp_flags);
1188 if (ret) {
1189 src_nents = mapped_sg->sgt.nents;
1190 src = mapped_sg->sgt.sgl;
1191 } else {
1192 mapped_sg->split_sg = src;
1193 }
1194 }
1195
1196 dma_sync_sgtable_for_device(dev: ddev, sgt: &mapped_sg->sgt, dir: DMA_TO_DEVICE);
1197
1198 if (!diff_dst) {
1199 dst_nents = src_nents;
1200 dst = src;
1201 } else {
1202 dst_nents = sg_nents_for_len(sg: req->dst, len: req->size);
1203 mapped_sg = &rxd->mapped_sg[1];
1204
1205 if (dst_nents == 1 && split_size <= req->dst->length) {
1206 dst = &mapped_sg->static_sg;
1207 dst_nents = 1;
1208 sg_init_table(dst, 1);
1209 sg_set_page(sg: dst, page: sg_page(sg: req->dst), len: split_size,
1210 offset: req->dst->offset);
1211
1212 mapped_sg->sgt.sgl = dst;
1213 mapped_sg->sgt.orig_nents = dst_nents;
1214 ret = dma_map_sgtable(dev: ddev, sgt: &mapped_sg->sgt,
1215 dir: DMA_FROM_DEVICE, attrs: 0);
1216 if (ret)
1217 goto err_cleanup;
1218
1219 mapped_sg->dir = DMA_FROM_DEVICE;
1220 mapped_sg->mapped = true;
1221 } else {
1222 mapped_sg->sgt.sgl = req->dst;
1223 mapped_sg->sgt.orig_nents = dst_nents;
1224 ret = dma_map_sgtable(dev: ddev, sgt: &mapped_sg->sgt,
1225 dir: DMA_FROM_DEVICE, attrs: 0);
1226 if (ret)
1227 goto err_cleanup;
1228
1229 mapped_sg->dir = DMA_FROM_DEVICE;
1230 mapped_sg->mapped = true;
1231
1232 ret = sg_split(in: mapped_sg->sgt.sgl, in_mapped_nents: mapped_sg->sgt.nents,
1233 skip: 0, nb_splits: 1, split_sizes: &split_size, out: &dst, out_mapped_nents: &dst_nents,
1234 gfp_mask: gfp_flags);
1235 if (ret) {
1236 dst_nents = mapped_sg->sgt.nents;
1237 dst = mapped_sg->sgt.sgl;
1238 } else {
1239 mapped_sg->split_sg = dst;
1240 }
1241 }
1242 }
1243
1244 rxd->tx_in = dmaengine_prep_slave_sg(chan: dma_rx, sgl: dst, sg_len: dst_nents,
1245 dir: DMA_DEV_TO_MEM,
1246 flags: DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1247 if (!rxd->tx_in) {
1248 dev_err(pdata->dev, "IN prep_slave_sg() failed\n");
1249 ret = -EINVAL;
1250 goto err_cleanup;
1251 }
1252
1253 rxd->req = (void *)req->base;
1254 rxd->enc = req->enc;
1255 rxd->iv_idx = req->ctx->iv_idx;
1256 rxd->enc_iv_size = sa_ctx->cmdl_upd_info.enc_iv.size;
1257 rxd->tx_in->callback = req->callback;
1258 rxd->tx_in->callback_param = rxd;
1259
1260 tx_out = dmaengine_prep_slave_sg(chan: pdata->dma_tx, sgl: src,
1261 sg_len: src_nents, dir: DMA_MEM_TO_DEV,
1262 flags: DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1263
1264 if (!tx_out) {
1265 dev_err(pdata->dev, "OUT prep_slave_sg() failed\n");
1266 ret = -EINVAL;
1267 goto err_cleanup;
1268 }
1269
1270 /*
1271 * Prepare metadata for DMA engine. This essentially describes the
1272 * crypto algorithm to be used, data sizes, different keys etc.
1273 */
1274 mdptr = (u32 *)dmaengine_desc_get_metadata_ptr(desc: tx_out, payload_len: &pl, max_len: &ml);
1275
1276 sa_prepare_tx_desc(mdptr, pslen: (sa_ctx->cmdl_size + (SA_PSDATA_CTX_WORDS *
1277 sizeof(u32))), psdata: cmdl, epiblen: sizeof(sa_ctx->epib),
1278 epib: sa_ctx->epib);
1279
1280 ml = sa_ctx->cmdl_size + (SA_PSDATA_CTX_WORDS * sizeof(u32));
1281 dmaengine_desc_set_metadata_len(desc: tx_out, payload_len: req->mdata_size);
1282
1283 dmaengine_submit(desc: tx_out);
1284 dmaengine_submit(desc: rxd->tx_in);
1285
1286 dma_async_issue_pending(chan: dma_rx);
1287 dma_async_issue_pending(chan: pdata->dma_tx);
1288
1289 return -EINPROGRESS;
1290
1291err_cleanup:
1292 sa_free_sa_rx_data(rxd);
1293
1294 return ret;
1295}
1296
1297static int sa_cipher_run(struct skcipher_request *req, u8 *iv, int enc)
1298{
1299 struct sa_tfm_ctx *ctx =
1300 crypto_skcipher_ctx(tfm: crypto_skcipher_reqtfm(req));
1301 struct crypto_alg *alg = req->base.tfm->__crt_alg;
1302 struct sa_req sa_req = { 0 };
1303
1304 if (!req->cryptlen)
1305 return 0;
1306
1307 if (req->cryptlen % alg->cra_blocksize)
1308 return -EINVAL;
1309
1310 /* Use SW fallback if the data size is not supported */
1311 if (req->cryptlen > SA_MAX_DATA_SZ ||
1312 (req->cryptlen >= SA_UNSAFE_DATA_SZ_MIN &&
1313 req->cryptlen <= SA_UNSAFE_DATA_SZ_MAX)) {
1314 struct skcipher_request *subreq = skcipher_request_ctx(req);
1315
1316 skcipher_request_set_tfm(req: subreq, tfm: ctx->fallback.skcipher);
1317 skcipher_request_set_callback(req: subreq, flags: req->base.flags,
1318 compl: req->base.complete,
1319 data: req->base.data);
1320 skcipher_request_set_crypt(req: subreq, src: req->src, dst: req->dst,
1321 cryptlen: req->cryptlen, iv: req->iv);
1322 if (enc)
1323 return crypto_skcipher_encrypt(req: subreq);
1324 else
1325 return crypto_skcipher_decrypt(req: subreq);
1326 }
1327
1328 sa_req.size = req->cryptlen;
1329 sa_req.enc_size = req->cryptlen;
1330 sa_req.src = req->src;
1331 sa_req.dst = req->dst;
1332 sa_req.enc_iv = iv;
1333 sa_req.type = CRYPTO_ALG_TYPE_SKCIPHER;
1334 sa_req.enc = enc;
1335 sa_req.callback = sa_aes_dma_in_callback;
1336 sa_req.mdata_size = 44;
1337 sa_req.base = &req->base;
1338 sa_req.ctx = ctx;
1339
1340 return sa_run(req: &sa_req);
1341}
1342
1343static int sa_encrypt(struct skcipher_request *req)
1344{
1345 return sa_cipher_run(req, iv: req->iv, enc: 1);
1346}
1347
1348static int sa_decrypt(struct skcipher_request *req)
1349{
1350 return sa_cipher_run(req, iv: req->iv, enc: 0);
1351}
1352
1353static void sa_sha_dma_in_callback(void *data)
1354{
1355 struct sa_rx_data *rxd = data;
1356 struct ahash_request *req;
1357 struct crypto_ahash *tfm;
1358 unsigned int authsize;
1359 int i;
1360 size_t ml, pl;
1361 u32 *result;
1362 __be32 *mdptr;
1363
1364 sa_sync_from_device(rxd);
1365 req = container_of(rxd->req, struct ahash_request, base);
1366 tfm = crypto_ahash_reqtfm(req);
1367 authsize = crypto_ahash_digestsize(tfm);
1368
1369 mdptr = (__be32 *)dmaengine_desc_get_metadata_ptr(desc: rxd->tx_in, payload_len: &pl, max_len: &ml);
1370 result = (u32 *)req->result;
1371
1372 for (i = 0; i < (authsize / 4); i++)
1373 result[i] = be32_to_cpu(mdptr[i + 4]);
1374
1375 sa_free_sa_rx_data(rxd);
1376
1377 ahash_request_complete(req, err: 0);
1378}
1379
1380static int zero_message_process(struct ahash_request *req)
1381{
1382 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1383 int sa_digest_size = crypto_ahash_digestsize(tfm);
1384
1385 switch (sa_digest_size) {
1386 case SHA1_DIGEST_SIZE:
1387 memcpy(req->result, sha1_zero_message_hash, sa_digest_size);
1388 break;
1389 case SHA256_DIGEST_SIZE:
1390 memcpy(req->result, sha256_zero_message_hash, sa_digest_size);
1391 break;
1392 case SHA512_DIGEST_SIZE:
1393 memcpy(req->result, sha512_zero_message_hash, sa_digest_size);
1394 break;
1395 default:
1396 return -EINVAL;
1397 }
1398
1399 return 0;
1400}
1401
1402static int sa_sha_run(struct ahash_request *req)
1403{
1404 struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm: crypto_ahash_reqtfm(req));
1405 struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1406 struct sa_req sa_req = { 0 };
1407 size_t auth_len;
1408
1409 auth_len = req->nbytes;
1410
1411 if (!auth_len)
1412 return zero_message_process(req);
1413
1414 if (auth_len > SA_MAX_DATA_SZ ||
1415 (auth_len >= SA_UNSAFE_DATA_SZ_MIN &&
1416 auth_len <= SA_UNSAFE_DATA_SZ_MAX)) {
1417 struct ahash_request *subreq = &rctx->fallback_req;
1418 int ret;
1419
1420 ahash_request_set_tfm(req: subreq, tfm: ctx->fallback.ahash);
1421 ahash_request_set_callback(req: subreq, flags: req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
1422 ahash_request_set_crypt(req: subreq, src: req->src, result: req->result, nbytes: auth_len);
1423
1424 ret = crypto_ahash_digest(req: subreq);
1425
1426 return ret;
1427 }
1428
1429 sa_req.size = auth_len;
1430 sa_req.auth_size = auth_len;
1431 sa_req.src = req->src;
1432 sa_req.dst = req->src;
1433 sa_req.enc = true;
1434 sa_req.type = CRYPTO_ALG_TYPE_AHASH;
1435 sa_req.callback = sa_sha_dma_in_callback;
1436 sa_req.mdata_size = 28;
1437 sa_req.ctx = ctx;
1438 sa_req.base = &req->base;
1439
1440 return sa_run(req: &sa_req);
1441}
1442
1443static int sa_sha_setup(struct sa_tfm_ctx *ctx, struct algo_data *ad)
1444{
1445 int bs = crypto_shash_blocksize(tfm: ctx->shash);
1446 int cmdl_len;
1447 struct sa_cmdl_cfg cfg;
1448
1449 ad->enc_eng.sc_size = SA_CTX_ENC_TYPE1_SZ;
1450 ad->auth_eng.eng_id = SA_ENG_ID_AM1;
1451 ad->auth_eng.sc_size = SA_CTX_AUTH_TYPE2_SZ;
1452
1453 memset(ctx->authkey, 0, bs);
1454 memset(&cfg, 0, sizeof(cfg));
1455 cfg.aalg = ad->aalg_id;
1456 cfg.enc_eng_id = ad->enc_eng.eng_id;
1457 cfg.auth_eng_id = ad->auth_eng.eng_id;
1458 cfg.iv_size = 0;
1459 cfg.akey = NULL;
1460 cfg.akey_len = 0;
1461
1462 ctx->dev_data = dev_get_drvdata(dev: sa_k3_dev);
1463 /* Setup Encryption Security Context & Command label template */
1464 if (sa_init_sc(ctx: &ctx->enc, match_data: ctx->dev_data->match_data, NULL, enc_key_sz: 0, NULL, auth_key_sz: 0,
1465 ad, enc: 0, swinfo: &ctx->enc.epib[1]))
1466 goto badkey;
1467
1468 cmdl_len = sa_format_cmdl_gen(cfg: &cfg,
1469 cmdl: (u8 *)ctx->enc.cmdl,
1470 upd_info: &ctx->enc.cmdl_upd_info);
1471 if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
1472 goto badkey;
1473
1474 ctx->enc.cmdl_size = cmdl_len;
1475
1476 return 0;
1477
1478badkey:
1479 dev_err(sa_k3_dev, "%s: badkey\n", __func__);
1480 return -EINVAL;
1481}
1482
1483static int sa_sha_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
1484{
1485 struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
1486 struct sa_crypto_data *data = dev_get_drvdata(dev: sa_k3_dev);
1487 int ret;
1488
1489 memset(ctx, 0, sizeof(*ctx));
1490 ctx->dev_data = data;
1491 ret = sa_init_ctx_info(ctx: &ctx->enc, data);
1492 if (ret)
1493 return ret;
1494
1495 if (alg_base) {
1496 ctx->shash = crypto_alloc_shash(alg_name: alg_base, type: 0, mask: 0);
1497 if (IS_ERR(ptr: ctx->shash)) {
1498 dev_err(sa_k3_dev, "base driver %s couldn't be loaded\n",
1499 alg_base);
1500 return PTR_ERR(ptr: ctx->shash);
1501 }
1502 /* for fallback */
1503 ctx->fallback.ahash =
1504 crypto_alloc_ahash(alg_name: alg_base, type: 0, CRYPTO_ALG_ASYNC);
1505 if (IS_ERR(ptr: ctx->fallback.ahash)) {
1506 dev_err(ctx->dev_data->dev,
1507 "Could not load fallback driver\n");
1508 return PTR_ERR(ptr: ctx->fallback.ahash);
1509 }
1510 }
1511
1512 dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
1513 __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
1514 ctx->dec.sc_id, &ctx->dec.sc_phys);
1515
1516 crypto_ahash_set_reqsize(tfm: __crypto_ahash_cast(tfm),
1517 reqsize: sizeof(struct sa_sha_req_ctx) +
1518 crypto_ahash_reqsize(tfm: ctx->fallback.ahash));
1519
1520 return 0;
1521}
1522
1523static int sa_sha_digest(struct ahash_request *req)
1524{
1525 return sa_sha_run(req);
1526}
1527
1528static int sa_sha_init(struct ahash_request *req)
1529{
1530 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1531 struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1532 struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
1533
1534 dev_dbg(sa_k3_dev, "init: digest size: %u, rctx=%p\n",
1535 crypto_ahash_digestsize(tfm), rctx);
1536
1537 ahash_request_set_tfm(req: &rctx->fallback_req, tfm: ctx->fallback.ahash);
1538 ahash_request_set_callback(req: &rctx->fallback_req, flags: req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
1539 ahash_request_set_crypt(req: &rctx->fallback_req, NULL, NULL, nbytes: 0);
1540
1541 return crypto_ahash_init(req: &rctx->fallback_req);
1542}
1543
1544static int sa_sha_update(struct ahash_request *req)
1545{
1546 struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1547
1548 ahash_request_set_callback(req: &rctx->fallback_req, flags: req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
1549 ahash_request_set_crypt(req: &rctx->fallback_req, src: req->src, NULL, nbytes: req->nbytes);
1550
1551 return crypto_ahash_update(req: &rctx->fallback_req);
1552}
1553
1554static int sa_sha_final(struct ahash_request *req)
1555{
1556 struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1557
1558 ahash_request_set_callback(req: &rctx->fallback_req, flags: req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
1559 ahash_request_set_crypt(req: &rctx->fallback_req, NULL, result: req->result, nbytes: 0);
1560
1561 return crypto_ahash_final(req: &rctx->fallback_req);
1562}
1563
1564static int sa_sha_finup(struct ahash_request *req)
1565{
1566 struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1567
1568 ahash_request_set_callback(req: &rctx->fallback_req, flags: req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
1569 ahash_request_set_crypt(req: &rctx->fallback_req, src: req->src, result: req->result, nbytes: req->nbytes);
1570
1571 return crypto_ahash_finup(req: &rctx->fallback_req);
1572}
1573
1574static int sa_sha_import(struct ahash_request *req, const void *in)
1575{
1576 struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1577 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1578 struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
1579
1580 ahash_request_set_tfm(req: &rctx->fallback_req, tfm: ctx->fallback.ahash);
1581 ahash_request_set_callback(req: &rctx->fallback_req, flags: req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
1582
1583 return crypto_ahash_import(req: &rctx->fallback_req, in);
1584}
1585
1586static int sa_sha_export(struct ahash_request *req, void *out)
1587{
1588 struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1589 struct ahash_request *subreq = &rctx->fallback_req;
1590
1591 ahash_request_set_callback(req: subreq, flags: req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
1592
1593 return crypto_ahash_export(req: subreq, out);
1594}
1595
1596static int sa_sha1_cra_init(struct crypto_tfm *tfm)
1597{
1598 struct algo_data ad = { 0 };
1599 struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
1600
1601 sa_sha_cra_init_alg(tfm, alg_base: "sha1");
1602
1603 ad.aalg_id = SA_AALG_ID_SHA1;
1604 ad.hash_size = SHA1_DIGEST_SIZE;
1605 ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA1;
1606
1607 sa_sha_setup(ctx, ad: &ad);
1608
1609 return 0;
1610}
1611
1612static int sa_sha256_cra_init(struct crypto_tfm *tfm)
1613{
1614 struct algo_data ad = { 0 };
1615 struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
1616
1617 sa_sha_cra_init_alg(tfm, alg_base: "sha256");
1618
1619 ad.aalg_id = SA_AALG_ID_SHA2_256;
1620 ad.hash_size = SHA256_DIGEST_SIZE;
1621 ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA256;
1622
1623 sa_sha_setup(ctx, ad: &ad);
1624
1625 return 0;
1626}
1627
1628static int sa_sha512_cra_init(struct crypto_tfm *tfm)
1629{
1630 struct algo_data ad = { 0 };
1631 struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
1632
1633 sa_sha_cra_init_alg(tfm, alg_base: "sha512");
1634
1635 ad.aalg_id = SA_AALG_ID_SHA2_512;
1636 ad.hash_size = SHA512_DIGEST_SIZE;
1637 ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA512;
1638
1639 sa_sha_setup(ctx, ad: &ad);
1640
1641 return 0;
1642}
1643
1644static void sa_sha_cra_exit(struct crypto_tfm *tfm)
1645{
1646 struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
1647 struct sa_crypto_data *data = dev_get_drvdata(dev: sa_k3_dev);
1648
1649 dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
1650 __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
1651 ctx->dec.sc_id, &ctx->dec.sc_phys);
1652
1653 if (crypto_tfm_alg_type(tfm) == CRYPTO_ALG_TYPE_AHASH)
1654 sa_free_ctx_info(ctx: &ctx->enc, data);
1655
1656 crypto_free_shash(tfm: ctx->shash);
1657 crypto_free_ahash(tfm: ctx->fallback.ahash);
1658}
1659
1660static void sa_aead_dma_in_callback(void *data)
1661{
1662 struct sa_rx_data *rxd = data;
1663 struct aead_request *req;
1664 struct crypto_aead *tfm;
1665 unsigned int start;
1666 unsigned int authsize;
1667 u8 auth_tag[SA_MAX_AUTH_TAG_SZ];
1668 size_t pl, ml;
1669 int i;
1670 int err = 0;
1671 u32 *mdptr;
1672
1673 sa_sync_from_device(rxd);
1674 req = container_of(rxd->req, struct aead_request, base);
1675 tfm = crypto_aead_reqtfm(req);
1676 start = req->assoclen + req->cryptlen;
1677 authsize = crypto_aead_authsize(tfm);
1678
1679 mdptr = (u32 *)dmaengine_desc_get_metadata_ptr(desc: rxd->tx_in, payload_len: &pl, max_len: &ml);
1680 for (i = 0; i < (authsize / 4); i++)
1681 mdptr[i + 4] = swab32(mdptr[i + 4]);
1682
1683 if (rxd->enc) {
1684 scatterwalk_map_and_copy(buf: &mdptr[4], sg: req->dst, start, nbytes: authsize,
1685 out: 1);
1686 } else {
1687 start -= authsize;
1688 scatterwalk_map_and_copy(buf: auth_tag, sg: req->src, start, nbytes: authsize,
1689 out: 0);
1690
1691 err = memcmp(p: &mdptr[4], q: auth_tag, size: authsize) ? -EBADMSG : 0;
1692 }
1693
1694 sa_free_sa_rx_data(rxd);
1695
1696 aead_request_complete(req, err);
1697}
1698
1699static int sa_cra_init_aead(struct crypto_aead *tfm, const char *hash,
1700 const char *fallback)
1701{
1702 struct sa_tfm_ctx *ctx = crypto_aead_ctx(tfm);
1703 struct sa_crypto_data *data = dev_get_drvdata(dev: sa_k3_dev);
1704 int ret;
1705
1706 memzero_explicit(s: ctx, count: sizeof(*ctx));
1707 ctx->dev_data = data;
1708
1709 ctx->shash = crypto_alloc_shash(alg_name: hash, type: 0, CRYPTO_ALG_NEED_FALLBACK);
1710 if (IS_ERR(ptr: ctx->shash)) {
1711 dev_err(sa_k3_dev, "base driver %s couldn't be loaded\n", hash);
1712 return PTR_ERR(ptr: ctx->shash);
1713 }
1714
1715 ctx->fallback.aead = crypto_alloc_aead(alg_name: fallback, type: 0,
1716 CRYPTO_ALG_NEED_FALLBACK);
1717
1718 if (IS_ERR(ptr: ctx->fallback.aead)) {
1719 dev_err(sa_k3_dev, "fallback driver %s couldn't be loaded\n",
1720 fallback);
1721 return PTR_ERR(ptr: ctx->fallback.aead);
1722 }
1723
1724 crypto_aead_set_reqsize(aead: tfm, reqsize: sizeof(struct aead_request) +
1725 crypto_aead_reqsize(tfm: ctx->fallback.aead));
1726
1727 ret = sa_init_ctx_info(ctx: &ctx->enc, data);
1728 if (ret)
1729 return ret;
1730
1731 ret = sa_init_ctx_info(ctx: &ctx->dec, data);
1732 if (ret) {
1733 sa_free_ctx_info(ctx: &ctx->enc, data);
1734 return ret;
1735 }
1736
1737 dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
1738 __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
1739 ctx->dec.sc_id, &ctx->dec.sc_phys);
1740
1741 return ret;
1742}
1743
1744static int sa_cra_init_aead_sha1(struct crypto_aead *tfm)
1745{
1746 return sa_cra_init_aead(tfm, hash: "sha1",
1747 fallback: "authenc(hmac(sha1-ce),cbc(aes-ce))");
1748}
1749
1750static int sa_cra_init_aead_sha256(struct crypto_aead *tfm)
1751{
1752 return sa_cra_init_aead(tfm, hash: "sha256",
1753 fallback: "authenc(hmac(sha256-ce),cbc(aes-ce))");
1754}
1755
1756static void sa_exit_tfm_aead(struct crypto_aead *tfm)
1757{
1758 struct sa_tfm_ctx *ctx = crypto_aead_ctx(tfm);
1759 struct sa_crypto_data *data = dev_get_drvdata(dev: sa_k3_dev);
1760
1761 crypto_free_shash(tfm: ctx->shash);
1762 crypto_free_aead(tfm: ctx->fallback.aead);
1763
1764 sa_free_ctx_info(ctx: &ctx->enc, data);
1765 sa_free_ctx_info(ctx: &ctx->dec, data);
1766}
1767
1768/* AEAD algorithm configuration interface function */
1769static int sa_aead_setkey(struct crypto_aead *authenc,
1770 const u8 *key, unsigned int keylen,
1771 struct algo_data *ad)
1772{
1773 struct sa_tfm_ctx *ctx = crypto_aead_ctx(tfm: authenc);
1774 struct crypto_authenc_keys keys;
1775 int cmdl_len;
1776 struct sa_cmdl_cfg cfg;
1777 int key_idx;
1778
1779 if (crypto_authenc_extractkeys(keys: &keys, key, keylen) != 0)
1780 return -EINVAL;
1781
1782 /* Convert the key size (16/24/32) to the key size index (0/1/2) */
1783 key_idx = (keys.enckeylen >> 3) - 2;
1784 if (key_idx >= 3)
1785 return -EINVAL;
1786
1787 ad->ctx = ctx;
1788 ad->enc_eng.eng_id = SA_ENG_ID_EM1;
1789 ad->enc_eng.sc_size = SA_CTX_ENC_TYPE1_SZ;
1790 ad->auth_eng.eng_id = SA_ENG_ID_AM1;
1791 ad->auth_eng.sc_size = SA_CTX_AUTH_TYPE2_SZ;
1792 ad->mci_enc = mci_cbc_enc_no_iv_array[key_idx];
1793 ad->mci_dec = mci_cbc_dec_no_iv_array[key_idx];
1794 ad->inv_key = true;
1795 ad->keyed_mac = true;
1796 ad->ealg_id = SA_EALG_ID_AES_CBC;
1797 ad->prep_iopad = sa_prepare_iopads;
1798
1799 memset(&cfg, 0, sizeof(cfg));
1800 cfg.enc = true;
1801 cfg.aalg = ad->aalg_id;
1802 cfg.enc_eng_id = ad->enc_eng.eng_id;
1803 cfg.auth_eng_id = ad->auth_eng.eng_id;
1804 cfg.iv_size = crypto_aead_ivsize(tfm: authenc);
1805 cfg.akey = keys.authkey;
1806 cfg.akey_len = keys.authkeylen;
1807
1808 /* Setup Encryption Security Context & Command label template */
1809 if (sa_init_sc(ctx: &ctx->enc, match_data: ctx->dev_data->match_data, enc_key: keys.enckey,
1810 enc_key_sz: keys.enckeylen, auth_key: keys.authkey, auth_key_sz: keys.authkeylen,
1811 ad, enc: 1, swinfo: &ctx->enc.epib[1]))
1812 return -EINVAL;
1813
1814 cmdl_len = sa_format_cmdl_gen(cfg: &cfg,
1815 cmdl: (u8 *)ctx->enc.cmdl,
1816 upd_info: &ctx->enc.cmdl_upd_info);
1817 if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
1818 return -EINVAL;
1819
1820 ctx->enc.cmdl_size = cmdl_len;
1821
1822 /* Setup Decryption Security Context & Command label template */
1823 if (sa_init_sc(ctx: &ctx->dec, match_data: ctx->dev_data->match_data, enc_key: keys.enckey,
1824 enc_key_sz: keys.enckeylen, auth_key: keys.authkey, auth_key_sz: keys.authkeylen,
1825 ad, enc: 0, swinfo: &ctx->dec.epib[1]))
1826 return -EINVAL;
1827
1828 cfg.enc = false;
1829 cmdl_len = sa_format_cmdl_gen(cfg: &cfg, cmdl: (u8 *)ctx->dec.cmdl,
1830 upd_info: &ctx->dec.cmdl_upd_info);
1831
1832 if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
1833 return -EINVAL;
1834
1835 ctx->dec.cmdl_size = cmdl_len;
1836
1837 crypto_aead_clear_flags(tfm: ctx->fallback.aead, CRYPTO_TFM_REQ_MASK);
1838 crypto_aead_set_flags(tfm: ctx->fallback.aead,
1839 flags: crypto_aead_get_flags(tfm: authenc) &
1840 CRYPTO_TFM_REQ_MASK);
1841
1842 return crypto_aead_setkey(tfm: ctx->fallback.aead, key, keylen);
1843}
1844
1845static int sa_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
1846{
1847 struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm: crypto_aead_tfm(tfm));
1848
1849 return crypto_aead_setauthsize(tfm: ctx->fallback.aead, authsize);
1850}
1851
1852static int sa_aead_cbc_sha1_setkey(struct crypto_aead *authenc,
1853 const u8 *key, unsigned int keylen)
1854{
1855 struct algo_data ad = { 0 };
1856
1857 ad.ealg_id = SA_EALG_ID_AES_CBC;
1858 ad.aalg_id = SA_AALG_ID_HMAC_SHA1;
1859 ad.hash_size = SHA1_DIGEST_SIZE;
1860 ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA1;
1861
1862 return sa_aead_setkey(authenc, key, keylen, ad: &ad);
1863}
1864
1865static int sa_aead_cbc_sha256_setkey(struct crypto_aead *authenc,
1866 const u8 *key, unsigned int keylen)
1867{
1868 struct algo_data ad = { 0 };
1869
1870 ad.ealg_id = SA_EALG_ID_AES_CBC;
1871 ad.aalg_id = SA_AALG_ID_HMAC_SHA2_256;
1872 ad.hash_size = SHA256_DIGEST_SIZE;
1873 ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA256;
1874
1875 return sa_aead_setkey(authenc, key, keylen, ad: &ad);
1876}
1877
1878static int sa_aead_run(struct aead_request *req, u8 *iv, int enc)
1879{
1880 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1881 struct sa_tfm_ctx *ctx = crypto_aead_ctx(tfm);
1882 struct sa_req sa_req = { 0 };
1883 size_t auth_size, enc_size;
1884
1885 enc_size = req->cryptlen;
1886 auth_size = req->assoclen + req->cryptlen;
1887
1888 if (!enc) {
1889 enc_size -= crypto_aead_authsize(tfm);
1890 auth_size -= crypto_aead_authsize(tfm);
1891 }
1892
1893 if (auth_size > SA_MAX_DATA_SZ ||
1894 (auth_size >= SA_UNSAFE_DATA_SZ_MIN &&
1895 auth_size <= SA_UNSAFE_DATA_SZ_MAX)) {
1896 struct aead_request *subreq = aead_request_ctx(req);
1897 int ret;
1898
1899 aead_request_set_tfm(req: subreq, tfm: ctx->fallback.aead);
1900 aead_request_set_callback(req: subreq, flags: req->base.flags,
1901 compl: req->base.complete, data: req->base.data);
1902 aead_request_set_crypt(req: subreq, src: req->src, dst: req->dst,
1903 cryptlen: req->cryptlen, iv: req->iv);
1904 aead_request_set_ad(req: subreq, assoclen: req->assoclen);
1905
1906 ret = enc ? crypto_aead_encrypt(req: subreq) :
1907 crypto_aead_decrypt(req: subreq);
1908 return ret;
1909 }
1910
1911 sa_req.enc_offset = req->assoclen;
1912 sa_req.enc_size = enc_size;
1913 sa_req.auth_size = auth_size;
1914 sa_req.size = auth_size;
1915 sa_req.enc_iv = iv;
1916 sa_req.type = CRYPTO_ALG_TYPE_AEAD;
1917 sa_req.enc = enc;
1918 sa_req.callback = sa_aead_dma_in_callback;
1919 sa_req.mdata_size = 52;
1920 sa_req.base = &req->base;
1921 sa_req.ctx = ctx;
1922 sa_req.src = req->src;
1923 sa_req.dst = req->dst;
1924
1925 return sa_run(req: &sa_req);
1926}
1927
1928/* AEAD algorithm encrypt interface function */
1929static int sa_aead_encrypt(struct aead_request *req)
1930{
1931 return sa_aead_run(req, iv: req->iv, enc: 1);
1932}
1933
1934/* AEAD algorithm decrypt interface function */
1935static int sa_aead_decrypt(struct aead_request *req)
1936{
1937 return sa_aead_run(req, iv: req->iv, enc: 0);
1938}
1939
1940static struct sa_alg_tmpl sa_algs[] = {
1941 [SA_ALG_CBC_AES] = {
1942 .type = CRYPTO_ALG_TYPE_SKCIPHER,
1943 .alg.skcipher = {
1944 .base.cra_name = "cbc(aes)",
1945 .base.cra_driver_name = "cbc-aes-sa2ul",
1946 .base.cra_priority = 30000,
1947 .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
1948 CRYPTO_ALG_KERN_DRIVER_ONLY |
1949 CRYPTO_ALG_ASYNC |
1950 CRYPTO_ALG_NEED_FALLBACK,
1951 .base.cra_blocksize = AES_BLOCK_SIZE,
1952 .base.cra_ctxsize = sizeof(struct sa_tfm_ctx),
1953 .base.cra_module = THIS_MODULE,
1954 .init = sa_cipher_cra_init,
1955 .exit = sa_cipher_cra_exit,
1956 .min_keysize = AES_MIN_KEY_SIZE,
1957 .max_keysize = AES_MAX_KEY_SIZE,
1958 .ivsize = AES_BLOCK_SIZE,
1959 .setkey = sa_aes_cbc_setkey,
1960 .encrypt = sa_encrypt,
1961 .decrypt = sa_decrypt,
1962 }
1963 },
1964 [SA_ALG_EBC_AES] = {
1965 .type = CRYPTO_ALG_TYPE_SKCIPHER,
1966 .alg.skcipher = {
1967 .base.cra_name = "ecb(aes)",
1968 .base.cra_driver_name = "ecb-aes-sa2ul",
1969 .base.cra_priority = 30000,
1970 .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
1971 CRYPTO_ALG_KERN_DRIVER_ONLY |
1972 CRYPTO_ALG_ASYNC |
1973 CRYPTO_ALG_NEED_FALLBACK,
1974 .base.cra_blocksize = AES_BLOCK_SIZE,
1975 .base.cra_ctxsize = sizeof(struct sa_tfm_ctx),
1976 .base.cra_module = THIS_MODULE,
1977 .init = sa_cipher_cra_init,
1978 .exit = sa_cipher_cra_exit,
1979 .min_keysize = AES_MIN_KEY_SIZE,
1980 .max_keysize = AES_MAX_KEY_SIZE,
1981 .setkey = sa_aes_ecb_setkey,
1982 .encrypt = sa_encrypt,
1983 .decrypt = sa_decrypt,
1984 }
1985 },
1986 [SA_ALG_CBC_DES3] = {
1987 .type = CRYPTO_ALG_TYPE_SKCIPHER,
1988 .alg.skcipher = {
1989 .base.cra_name = "cbc(des3_ede)",
1990 .base.cra_driver_name = "cbc-des3-sa2ul",
1991 .base.cra_priority = 30000,
1992 .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
1993 CRYPTO_ALG_KERN_DRIVER_ONLY |
1994 CRYPTO_ALG_ASYNC |
1995 CRYPTO_ALG_NEED_FALLBACK,
1996 .base.cra_blocksize = DES_BLOCK_SIZE,
1997 .base.cra_ctxsize = sizeof(struct sa_tfm_ctx),
1998 .base.cra_module = THIS_MODULE,
1999 .init = sa_cipher_cra_init,
2000 .exit = sa_cipher_cra_exit,
2001 .min_keysize = 3 * DES_KEY_SIZE,
2002 .max_keysize = 3 * DES_KEY_SIZE,
2003 .ivsize = DES_BLOCK_SIZE,
2004 .setkey = sa_3des_cbc_setkey,
2005 .encrypt = sa_encrypt,
2006 .decrypt = sa_decrypt,
2007 }
2008 },
2009 [SA_ALG_ECB_DES3] = {
2010 .type = CRYPTO_ALG_TYPE_SKCIPHER,
2011 .alg.skcipher = {
2012 .base.cra_name = "ecb(des3_ede)",
2013 .base.cra_driver_name = "ecb-des3-sa2ul",
2014 .base.cra_priority = 30000,
2015 .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
2016 CRYPTO_ALG_KERN_DRIVER_ONLY |
2017 CRYPTO_ALG_ASYNC |
2018 CRYPTO_ALG_NEED_FALLBACK,
2019 .base.cra_blocksize = DES_BLOCK_SIZE,
2020 .base.cra_ctxsize = sizeof(struct sa_tfm_ctx),
2021 .base.cra_module = THIS_MODULE,
2022 .init = sa_cipher_cra_init,
2023 .exit = sa_cipher_cra_exit,
2024 .min_keysize = 3 * DES_KEY_SIZE,
2025 .max_keysize = 3 * DES_KEY_SIZE,
2026 .setkey = sa_3des_ecb_setkey,
2027 .encrypt = sa_encrypt,
2028 .decrypt = sa_decrypt,
2029 }
2030 },
2031 [SA_ALG_SHA1] = {
2032 .type = CRYPTO_ALG_TYPE_AHASH,
2033 .alg.ahash = {
2034 .halg.base = {
2035 .cra_name = "sha1",
2036 .cra_driver_name = "sha1-sa2ul",
2037 .cra_priority = 400,
2038 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2039 CRYPTO_ALG_ASYNC |
2040 CRYPTO_ALG_KERN_DRIVER_ONLY |
2041 CRYPTO_ALG_NEED_FALLBACK,
2042 .cra_blocksize = SHA1_BLOCK_SIZE,
2043 .cra_ctxsize = sizeof(struct sa_tfm_ctx),
2044 .cra_module = THIS_MODULE,
2045 .cra_init = sa_sha1_cra_init,
2046 .cra_exit = sa_sha_cra_exit,
2047 },
2048 .halg.digestsize = SHA1_DIGEST_SIZE,
2049 .halg.statesize = sizeof(struct sa_sha_req_ctx) +
2050 sizeof(struct sha1_state),
2051 .init = sa_sha_init,
2052 .update = sa_sha_update,
2053 .final = sa_sha_final,
2054 .finup = sa_sha_finup,
2055 .digest = sa_sha_digest,
2056 .export = sa_sha_export,
2057 .import = sa_sha_import,
2058 },
2059 },
2060 [SA_ALG_SHA256] = {
2061 .type = CRYPTO_ALG_TYPE_AHASH,
2062 .alg.ahash = {
2063 .halg.base = {
2064 .cra_name = "sha256",
2065 .cra_driver_name = "sha256-sa2ul",
2066 .cra_priority = 400,
2067 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2068 CRYPTO_ALG_ASYNC |
2069 CRYPTO_ALG_KERN_DRIVER_ONLY |
2070 CRYPTO_ALG_NEED_FALLBACK,
2071 .cra_blocksize = SHA256_BLOCK_SIZE,
2072 .cra_ctxsize = sizeof(struct sa_tfm_ctx),
2073 .cra_module = THIS_MODULE,
2074 .cra_init = sa_sha256_cra_init,
2075 .cra_exit = sa_sha_cra_exit,
2076 },
2077 .halg.digestsize = SHA256_DIGEST_SIZE,
2078 .halg.statesize = sizeof(struct sa_sha_req_ctx) +
2079 sizeof(struct sha256_state),
2080 .init = sa_sha_init,
2081 .update = sa_sha_update,
2082 .final = sa_sha_final,
2083 .finup = sa_sha_finup,
2084 .digest = sa_sha_digest,
2085 .export = sa_sha_export,
2086 .import = sa_sha_import,
2087 },
2088 },
2089 [SA_ALG_SHA512] = {
2090 .type = CRYPTO_ALG_TYPE_AHASH,
2091 .alg.ahash = {
2092 .halg.base = {
2093 .cra_name = "sha512",
2094 .cra_driver_name = "sha512-sa2ul",
2095 .cra_priority = 400,
2096 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2097 CRYPTO_ALG_ASYNC |
2098 CRYPTO_ALG_KERN_DRIVER_ONLY |
2099 CRYPTO_ALG_NEED_FALLBACK,
2100 .cra_blocksize = SHA512_BLOCK_SIZE,
2101 .cra_ctxsize = sizeof(struct sa_tfm_ctx),
2102 .cra_module = THIS_MODULE,
2103 .cra_init = sa_sha512_cra_init,
2104 .cra_exit = sa_sha_cra_exit,
2105 },
2106 .halg.digestsize = SHA512_DIGEST_SIZE,
2107 .halg.statesize = sizeof(struct sa_sha_req_ctx) +
2108 sizeof(struct sha512_state),
2109 .init = sa_sha_init,
2110 .update = sa_sha_update,
2111 .final = sa_sha_final,
2112 .finup = sa_sha_finup,
2113 .digest = sa_sha_digest,
2114 .export = sa_sha_export,
2115 .import = sa_sha_import,
2116 },
2117 },
2118 [SA_ALG_AUTHENC_SHA1_AES] = {
2119 .type = CRYPTO_ALG_TYPE_AEAD,
2120 .alg.aead = {
2121 .base = {
2122 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2123 .cra_driver_name =
2124 "authenc(hmac(sha1),cbc(aes))-sa2ul",
2125 .cra_blocksize = AES_BLOCK_SIZE,
2126 .cra_flags = CRYPTO_ALG_TYPE_AEAD |
2127 CRYPTO_ALG_KERN_DRIVER_ONLY |
2128 CRYPTO_ALG_ASYNC |
2129 CRYPTO_ALG_NEED_FALLBACK,
2130 .cra_ctxsize = sizeof(struct sa_tfm_ctx),
2131 .cra_module = THIS_MODULE,
2132 .cra_priority = 3000,
2133 },
2134 .ivsize = AES_BLOCK_SIZE,
2135 .maxauthsize = SHA1_DIGEST_SIZE,
2136
2137 .init = sa_cra_init_aead_sha1,
2138 .exit = sa_exit_tfm_aead,
2139 .setkey = sa_aead_cbc_sha1_setkey,
2140 .setauthsize = sa_aead_setauthsize,
2141 .encrypt = sa_aead_encrypt,
2142 .decrypt = sa_aead_decrypt,
2143 },
2144 },
2145 [SA_ALG_AUTHENC_SHA256_AES] = {
2146 .type = CRYPTO_ALG_TYPE_AEAD,
2147 .alg.aead = {
2148 .base = {
2149 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2150 .cra_driver_name =
2151 "authenc(hmac(sha256),cbc(aes))-sa2ul",
2152 .cra_blocksize = AES_BLOCK_SIZE,
2153 .cra_flags = CRYPTO_ALG_TYPE_AEAD |
2154 CRYPTO_ALG_KERN_DRIVER_ONLY |
2155 CRYPTO_ALG_ASYNC |
2156 CRYPTO_ALG_NEED_FALLBACK,
2157 .cra_ctxsize = sizeof(struct sa_tfm_ctx),
2158 .cra_module = THIS_MODULE,
2159 .cra_alignmask = 0,
2160 .cra_priority = 3000,
2161 },
2162 .ivsize = AES_BLOCK_SIZE,
2163 .maxauthsize = SHA256_DIGEST_SIZE,
2164
2165 .init = sa_cra_init_aead_sha256,
2166 .exit = sa_exit_tfm_aead,
2167 .setkey = sa_aead_cbc_sha256_setkey,
2168 .setauthsize = sa_aead_setauthsize,
2169 .encrypt = sa_aead_encrypt,
2170 .decrypt = sa_aead_decrypt,
2171 },
2172 },
2173};
2174
2175/* Register the algorithms in crypto framework */
2176static void sa_register_algos(struct sa_crypto_data *dev_data)
2177{
2178 const struct sa_match_data *match_data = dev_data->match_data;
2179 struct device *dev = dev_data->dev;
2180 char *alg_name;
2181 u32 type;
2182 int i, err;
2183
2184 for (i = 0; i < ARRAY_SIZE(sa_algs); i++) {
2185 /* Skip unsupported algos */
2186 if (!(match_data->supported_algos & BIT(i)))
2187 continue;
2188
2189 type = sa_algs[i].type;
2190 if (type == CRYPTO_ALG_TYPE_SKCIPHER) {
2191 alg_name = sa_algs[i].alg.skcipher.base.cra_name;
2192 err = crypto_register_skcipher(alg: &sa_algs[i].alg.skcipher);
2193 } else if (type == CRYPTO_ALG_TYPE_AHASH) {
2194 alg_name = sa_algs[i].alg.ahash.halg.base.cra_name;
2195 err = crypto_register_ahash(alg: &sa_algs[i].alg.ahash);
2196 } else if (type == CRYPTO_ALG_TYPE_AEAD) {
2197 alg_name = sa_algs[i].alg.aead.base.cra_name;
2198 err = crypto_register_aead(alg: &sa_algs[i].alg.aead);
2199 } else {
2200 dev_err(dev,
2201 "un-supported crypto algorithm (%d)",
2202 sa_algs[i].type);
2203 continue;
2204 }
2205
2206 if (err)
2207 dev_err(dev, "Failed to register '%s'\n", alg_name);
2208 else
2209 sa_algs[i].registered = true;
2210 }
2211}
2212
2213/* Unregister the algorithms in crypto framework */
2214static void sa_unregister_algos(const struct device *dev)
2215{
2216 u32 type;
2217 int i;
2218
2219 for (i = 0; i < ARRAY_SIZE(sa_algs); i++) {
2220 type = sa_algs[i].type;
2221 if (!sa_algs[i].registered)
2222 continue;
2223 if (type == CRYPTO_ALG_TYPE_SKCIPHER)
2224 crypto_unregister_skcipher(alg: &sa_algs[i].alg.skcipher);
2225 else if (type == CRYPTO_ALG_TYPE_AHASH)
2226 crypto_unregister_ahash(alg: &sa_algs[i].alg.ahash);
2227 else if (type == CRYPTO_ALG_TYPE_AEAD)
2228 crypto_unregister_aead(alg: &sa_algs[i].alg.aead);
2229
2230 sa_algs[i].registered = false;
2231 }
2232}
2233
2234static int sa_init_mem(struct sa_crypto_data *dev_data)
2235{
2236 struct device *dev = &dev_data->pdev->dev;
2237 /* Setup dma pool for security context buffers */
2238 dev_data->sc_pool = dma_pool_create(name: "keystone-sc", dev,
2239 SA_CTX_MAX_SZ, align: 64, boundary: 0);
2240 if (!dev_data->sc_pool) {
2241 dev_err(dev, "Failed to create dma pool");
2242 return -ENOMEM;
2243 }
2244
2245 return 0;
2246}
2247
2248static int sa_dma_init(struct sa_crypto_data *dd)
2249{
2250 int ret;
2251 struct dma_slave_config cfg;
2252
2253 dd->dma_rx1 = NULL;
2254 dd->dma_tx = NULL;
2255 dd->dma_rx2 = NULL;
2256
2257 ret = dma_coerce_mask_and_coherent(dev: dd->dev, DMA_BIT_MASK(48));
2258 if (ret)
2259 return ret;
2260
2261 dd->dma_rx1 = dma_request_chan(dev: dd->dev, name: "rx1");
2262 if (IS_ERR(ptr: dd->dma_rx1))
2263 return dev_err_probe(dev: dd->dev, err: PTR_ERR(ptr: dd->dma_rx1),
2264 fmt: "Unable to request rx1 DMA channel\n");
2265
2266 dd->dma_rx2 = dma_request_chan(dev: dd->dev, name: "rx2");
2267 if (IS_ERR(ptr: dd->dma_rx2)) {
2268 ret = dev_err_probe(dev: dd->dev, err: PTR_ERR(ptr: dd->dma_rx2),
2269 fmt: "Unable to request rx2 DMA channel\n");
2270 goto err_dma_rx2;
2271 }
2272
2273 dd->dma_tx = dma_request_chan(dev: dd->dev, name: "tx");
2274 if (IS_ERR(ptr: dd->dma_tx)) {
2275 ret = dev_err_probe(dev: dd->dev, err: PTR_ERR(ptr: dd->dma_tx),
2276 fmt: "Unable to request tx DMA channel\n");
2277 goto err_dma_tx;
2278 }
2279
2280 memzero_explicit(s: &cfg, count: sizeof(cfg));
2281
2282 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
2283 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
2284 cfg.src_maxburst = 4;
2285 cfg.dst_maxburst = 4;
2286
2287 ret = dmaengine_slave_config(chan: dd->dma_rx1, config: &cfg);
2288 if (ret) {
2289 dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n",
2290 ret);
2291 goto err_dma_config;
2292 }
2293
2294 ret = dmaengine_slave_config(chan: dd->dma_rx2, config: &cfg);
2295 if (ret) {
2296 dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n",
2297 ret);
2298 goto err_dma_config;
2299 }
2300
2301 ret = dmaengine_slave_config(chan: dd->dma_tx, config: &cfg);
2302 if (ret) {
2303 dev_err(dd->dev, "can't configure OUT dmaengine slave: %d\n",
2304 ret);
2305 goto err_dma_config;
2306 }
2307
2308 return 0;
2309
2310err_dma_config:
2311 dma_release_channel(chan: dd->dma_tx);
2312err_dma_tx:
2313 dma_release_channel(chan: dd->dma_rx2);
2314err_dma_rx2:
2315 dma_release_channel(chan: dd->dma_rx1);
2316
2317 return ret;
2318}
2319
2320static int sa_link_child(struct device *dev, void *data)
2321{
2322 struct device *parent = data;
2323
2324 device_link_add(consumer: dev, supplier: parent, DL_FLAG_AUTOPROBE_CONSUMER);
2325
2326 return 0;
2327}
2328
2329static struct sa_match_data am654_match_data = {
2330 .priv = 1,
2331 .priv_id = 1,
2332 .supported_algos = BIT(SA_ALG_CBC_AES) |
2333 BIT(SA_ALG_EBC_AES) |
2334 BIT(SA_ALG_CBC_DES3) |
2335 BIT(SA_ALG_ECB_DES3) |
2336 BIT(SA_ALG_SHA1) |
2337 BIT(SA_ALG_SHA256) |
2338 BIT(SA_ALG_SHA512) |
2339 BIT(SA_ALG_AUTHENC_SHA1_AES) |
2340 BIT(SA_ALG_AUTHENC_SHA256_AES),
2341};
2342
2343static struct sa_match_data am64_match_data = {
2344 .priv = 0,
2345 .priv_id = 0,
2346 .supported_algos = BIT(SA_ALG_CBC_AES) |
2347 BIT(SA_ALG_EBC_AES) |
2348 BIT(SA_ALG_SHA256) |
2349 BIT(SA_ALG_SHA512) |
2350 BIT(SA_ALG_AUTHENC_SHA256_AES),
2351};
2352
2353static const struct of_device_id of_match[] = {
2354 { .compatible = "ti,j721e-sa2ul", .data = &am654_match_data, },
2355 { .compatible = "ti,am654-sa2ul", .data = &am654_match_data, },
2356 { .compatible = "ti,am64-sa2ul", .data = &am64_match_data, },
2357 { .compatible = "ti,am62-sa3ul", .data = &am64_match_data, },
2358 {},
2359};
2360MODULE_DEVICE_TABLE(of, of_match);
2361
2362static int sa_ul_probe(struct platform_device *pdev)
2363{
2364 struct device *dev = &pdev->dev;
2365 struct device_node *node = dev->of_node;
2366 static void __iomem *saul_base;
2367 struct sa_crypto_data *dev_data;
2368 u32 status, val;
2369 int ret;
2370
2371 dev_data = devm_kzalloc(dev, size: sizeof(*dev_data), GFP_KERNEL);
2372 if (!dev_data)
2373 return -ENOMEM;
2374
2375 dev_data->match_data = of_device_get_match_data(dev);
2376 if (!dev_data->match_data)
2377 return -ENODEV;
2378
2379 saul_base = devm_platform_ioremap_resource(pdev, index: 0);
2380 if (IS_ERR(ptr: saul_base))
2381 return PTR_ERR(ptr: saul_base);
2382
2383 sa_k3_dev = dev;
2384 dev_data->dev = dev;
2385 dev_data->pdev = pdev;
2386 dev_data->base = saul_base;
2387 platform_set_drvdata(pdev, data: dev_data);
2388 dev_set_drvdata(dev: sa_k3_dev, data: dev_data);
2389
2390 pm_runtime_enable(dev);
2391 ret = pm_runtime_resume_and_get(dev);
2392 if (ret < 0) {
2393 dev_err(dev, "%s: failed to get sync: %d\n", __func__, ret);
2394 pm_runtime_disable(dev);
2395 return ret;
2396 }
2397
2398 sa_init_mem(dev_data);
2399 ret = sa_dma_init(dd: dev_data);
2400 if (ret)
2401 goto destroy_dma_pool;
2402
2403 spin_lock_init(&dev_data->scid_lock);
2404
2405 val = SA_EEC_ENCSS_EN | SA_EEC_AUTHSS_EN | SA_EEC_CTXCACH_EN |
2406 SA_EEC_CPPI_PORT_IN_EN | SA_EEC_CPPI_PORT_OUT_EN |
2407 SA_EEC_TRNG_EN;
2408 status = readl_relaxed(saul_base + SA_ENGINE_STATUS);
2409 /* Only enable engines if all are not already enabled */
2410 if (val & ~status)
2411 writel_relaxed(val, saul_base + SA_ENGINE_ENABLE_CONTROL);
2412
2413 sa_register_algos(dev_data);
2414
2415 ret = of_platform_populate(root: node, NULL, NULL, parent: dev);
2416 if (ret)
2417 goto release_dma;
2418
2419 device_for_each_child(parent: dev, data: dev, fn: sa_link_child);
2420
2421 return 0;
2422
2423release_dma:
2424 sa_unregister_algos(dev);
2425
2426 dma_release_channel(chan: dev_data->dma_rx2);
2427 dma_release_channel(chan: dev_data->dma_rx1);
2428 dma_release_channel(chan: dev_data->dma_tx);
2429
2430destroy_dma_pool:
2431 dma_pool_destroy(pool: dev_data->sc_pool);
2432
2433 pm_runtime_put_sync(dev);
2434 pm_runtime_disable(dev);
2435
2436 return ret;
2437}
2438
2439static void sa_ul_remove(struct platform_device *pdev)
2440{
2441 struct sa_crypto_data *dev_data = platform_get_drvdata(pdev);
2442
2443 of_platform_depopulate(parent: &pdev->dev);
2444
2445 sa_unregister_algos(dev: &pdev->dev);
2446
2447 dma_release_channel(chan: dev_data->dma_rx2);
2448 dma_release_channel(chan: dev_data->dma_rx1);
2449 dma_release_channel(chan: dev_data->dma_tx);
2450
2451 dma_pool_destroy(pool: dev_data->sc_pool);
2452
2453 platform_set_drvdata(pdev, NULL);
2454
2455 pm_runtime_put_sync(dev: &pdev->dev);
2456 pm_runtime_disable(dev: &pdev->dev);
2457}
2458
2459static struct platform_driver sa_ul_driver = {
2460 .probe = sa_ul_probe,
2461 .remove = sa_ul_remove,
2462 .driver = {
2463 .name = "saul-crypto",
2464 .of_match_table = of_match,
2465 },
2466};
2467module_platform_driver(sa_ul_driver);
2468MODULE_DESCRIPTION("K3 SA2UL crypto accelerator driver");
2469MODULE_LICENSE("GPL v2");
2470

Provided by KDAB

Privacy Policy
Improve your Profiling and Debugging skills
Find out more

source code of linux/drivers/crypto/sa2ul.c