1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | /* |
3 | * Freescale FSL CAAM support for crypto API over QI backend. |
4 | * Based on caamalg.c |
5 | * |
6 | * Copyright 2013-2016 Freescale Semiconductor, Inc. |
7 | * Copyright 2016-2019 NXP |
8 | */ |
9 | |
10 | #include "compat.h" |
11 | #include "ctrl.h" |
12 | #include "regs.h" |
13 | #include "intern.h" |
14 | #include "desc_constr.h" |
15 | #include "error.h" |
16 | #include "sg_sw_qm.h" |
17 | #include "key_gen.h" |
18 | #include "qi.h" |
19 | #include "jr.h" |
20 | #include "caamalg_desc.h" |
21 | #include <crypto/xts.h> |
22 | #include <asm/unaligned.h> |
23 | #include <linux/device.h> |
24 | #include <linux/err.h> |
25 | #include <linux/dma-mapping.h> |
26 | #include <linux/kernel.h> |
27 | #include <linux/string.h> |
28 | |
29 | /* |
30 | * crypto alg |
31 | */ |
32 | #define CAAM_CRA_PRIORITY 2000 |
33 | /* max key is sum of AES_MAX_KEY_SIZE, max split key size */ |
34 | #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \ |
35 | SHA512_DIGEST_SIZE * 2) |
36 | |
37 | #define DESC_MAX_USED_BYTES (DESC_QI_AEAD_GIVENC_LEN + \ |
38 | CAAM_MAX_KEY_SIZE) |
39 | #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ) |
40 | |
41 | struct caam_alg_entry { |
42 | int class1_alg_type; |
43 | int class2_alg_type; |
44 | bool rfc3686; |
45 | bool geniv; |
46 | bool nodkp; |
47 | }; |
48 | |
49 | struct caam_aead_alg { |
50 | struct aead_alg aead; |
51 | struct caam_alg_entry caam; |
52 | bool registered; |
53 | }; |
54 | |
55 | struct caam_skcipher_alg { |
56 | struct skcipher_alg skcipher; |
57 | struct caam_alg_entry caam; |
58 | bool registered; |
59 | }; |
60 | |
61 | /* |
62 | * per-session context |
63 | */ |
64 | struct caam_ctx { |
65 | struct device *jrdev; |
66 | u32 sh_desc_enc[DESC_MAX_USED_LEN]; |
67 | u32 sh_desc_dec[DESC_MAX_USED_LEN]; |
68 | u8 key[CAAM_MAX_KEY_SIZE]; |
69 | dma_addr_t key_dma; |
70 | enum dma_data_direction dir; |
71 | struct alginfo adata; |
72 | struct alginfo cdata; |
73 | unsigned int authsize; |
74 | struct device *qidev; |
75 | spinlock_t lock; /* Protects multiple init of driver context */ |
76 | struct caam_drv_ctx *drv_ctx[NUM_OP]; |
77 | bool xts_key_fallback; |
78 | struct crypto_skcipher *fallback; |
79 | }; |
80 | |
81 | struct caam_skcipher_req_ctx { |
82 | struct skcipher_request fallback_req; |
83 | }; |
84 | |
85 | static int aead_set_sh_desc(struct crypto_aead *aead) |
86 | { |
87 | struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead), |
88 | typeof(*alg), aead); |
89 | struct caam_ctx *ctx = crypto_aead_ctx_dma(tfm: aead); |
90 | unsigned int ivsize = crypto_aead_ivsize(tfm: aead); |
91 | u32 ctx1_iv_off = 0; |
92 | u32 *nonce = NULL; |
93 | unsigned int data_len[2]; |
94 | u32 inl_mask; |
95 | const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == |
96 | OP_ALG_AAI_CTR_MOD128); |
97 | const bool is_rfc3686 = alg->caam.rfc3686; |
98 | struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev: ctx->jrdev->parent); |
99 | |
100 | if (!ctx->cdata.keylen || !ctx->authsize) |
101 | return 0; |
102 | |
103 | /* |
104 | * AES-CTR needs to load IV in CONTEXT1 reg |
105 | * at an offset of 128bits (16bytes) |
106 | * CONTEXT1[255:128] = IV |
107 | */ |
108 | if (ctr_mode) |
109 | ctx1_iv_off = 16; |
110 | |
111 | /* |
112 | * RFC3686 specific: |
113 | * CONTEXT1[255:128] = {NONCE, IV, COUNTER} |
114 | */ |
115 | if (is_rfc3686) { |
116 | ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE; |
117 | nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad + |
118 | ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE); |
119 | } |
120 | |
121 | /* |
122 | * In case |user key| > |derived key|, using DKP<imm,imm> would result |
123 | * in invalid opcodes (last bytes of user key) in the resulting |
124 | * descriptor. Use DKP<ptr,imm> instead => both virtual and dma key |
125 | * addresses are needed. |
126 | */ |
127 | ctx->adata.key_virt = ctx->key; |
128 | ctx->adata.key_dma = ctx->key_dma; |
129 | |
130 | ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; |
131 | ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; |
132 | |
133 | data_len[0] = ctx->adata.keylen_pad; |
134 | data_len[1] = ctx->cdata.keylen; |
135 | |
136 | if (alg->caam.geniv) |
137 | goto skip_enc; |
138 | |
139 | /* aead_encrypt shared descriptor */ |
140 | if (desc_inline_query(DESC_QI_AEAD_ENC_LEN + |
141 | (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0), |
142 | DESC_JOB_IO_LEN, data_len, inl_mask: &inl_mask, |
143 | ARRAY_SIZE(data_len)) < 0) |
144 | return -EINVAL; |
145 | |
146 | ctx->adata.key_inline = !!(inl_mask & 1); |
147 | ctx->cdata.key_inline = !!(inl_mask & 2); |
148 | |
149 | cnstr_shdsc_aead_encap(desc: ctx->sh_desc_enc, cdata: &ctx->cdata, adata: &ctx->adata, |
150 | ivsize, icvsize: ctx->authsize, is_rfc3686, nonce, |
151 | ctx1_iv_off, is_qi: true, era: ctrlpriv->era); |
152 | |
153 | skip_enc: |
154 | /* aead_decrypt shared descriptor */ |
155 | if (desc_inline_query(DESC_QI_AEAD_DEC_LEN + |
156 | (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0), |
157 | DESC_JOB_IO_LEN, data_len, inl_mask: &inl_mask, |
158 | ARRAY_SIZE(data_len)) < 0) |
159 | return -EINVAL; |
160 | |
161 | ctx->adata.key_inline = !!(inl_mask & 1); |
162 | ctx->cdata.key_inline = !!(inl_mask & 2); |
163 | |
164 | cnstr_shdsc_aead_decap(desc: ctx->sh_desc_dec, cdata: &ctx->cdata, adata: &ctx->adata, |
165 | ivsize, icvsize: ctx->authsize, geniv: alg->caam.geniv, |
166 | is_rfc3686, nonce, ctx1_iv_off, is_qi: true, |
167 | era: ctrlpriv->era); |
168 | |
169 | if (!alg->caam.geniv) |
170 | goto skip_givenc; |
171 | |
172 | /* aead_givencrypt shared descriptor */ |
173 | if (desc_inline_query(DESC_QI_AEAD_GIVENC_LEN + |
174 | (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0), |
175 | DESC_JOB_IO_LEN, data_len, inl_mask: &inl_mask, |
176 | ARRAY_SIZE(data_len)) < 0) |
177 | return -EINVAL; |
178 | |
179 | ctx->adata.key_inline = !!(inl_mask & 1); |
180 | ctx->cdata.key_inline = !!(inl_mask & 2); |
181 | |
182 | cnstr_shdsc_aead_givencap(desc: ctx->sh_desc_enc, cdata: &ctx->cdata, adata: &ctx->adata, |
183 | ivsize, icvsize: ctx->authsize, is_rfc3686, nonce, |
184 | ctx1_iv_off, is_qi: true, era: ctrlpriv->era); |
185 | |
186 | skip_givenc: |
187 | return 0; |
188 | } |
189 | |
190 | static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize) |
191 | { |
192 | struct caam_ctx *ctx = crypto_aead_ctx_dma(tfm: authenc); |
193 | |
194 | ctx->authsize = authsize; |
195 | aead_set_sh_desc(aead: authenc); |
196 | |
197 | return 0; |
198 | } |
199 | |
200 | static int aead_setkey(struct crypto_aead *aead, const u8 *key, |
201 | unsigned int keylen) |
202 | { |
203 | struct caam_ctx *ctx = crypto_aead_ctx_dma(tfm: aead); |
204 | struct device *jrdev = ctx->jrdev; |
205 | struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev: jrdev->parent); |
206 | struct crypto_authenc_keys keys; |
207 | int ret = 0; |
208 | |
209 | if (crypto_authenc_extractkeys(keys: &keys, key, keylen) != 0) |
210 | goto badkey; |
211 | |
212 | dev_dbg(jrdev, "keylen %d enckeylen %d authkeylen %d\n" , |
213 | keys.authkeylen + keys.enckeylen, keys.enckeylen, |
214 | keys.authkeylen); |
215 | print_hex_dump_debug("key in @" __stringify(__LINE__)": " , |
216 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); |
217 | |
218 | /* |
219 | * If DKP is supported, use it in the shared descriptor to generate |
220 | * the split key. |
221 | */ |
222 | if (ctrlpriv->era >= 6) { |
223 | ctx->adata.keylen = keys.authkeylen; |
224 | ctx->adata.keylen_pad = split_key_len(hash: ctx->adata.algtype & |
225 | OP_ALG_ALGSEL_MASK); |
226 | |
227 | if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE) |
228 | goto badkey; |
229 | |
230 | memcpy(ctx->key, keys.authkey, keys.authkeylen); |
231 | memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, |
232 | keys.enckeylen); |
233 | dma_sync_single_for_device(dev: jrdev->parent, addr: ctx->key_dma, |
234 | size: ctx->adata.keylen_pad + |
235 | keys.enckeylen, dir: ctx->dir); |
236 | goto skip_split_key; |
237 | } |
238 | |
239 | ret = gen_split_key(jrdev, key_out: ctx->key, adata: &ctx->adata, key_in: keys.authkey, |
240 | keylen: keys.authkeylen, CAAM_MAX_KEY_SIZE - |
241 | keys.enckeylen); |
242 | if (ret) |
243 | goto badkey; |
244 | |
245 | /* postpend encryption key to auth split key */ |
246 | memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen); |
247 | dma_sync_single_for_device(dev: jrdev->parent, addr: ctx->key_dma, |
248 | size: ctx->adata.keylen_pad + keys.enckeylen, |
249 | dir: ctx->dir); |
250 | |
251 | print_hex_dump_debug("ctx.key@" __stringify(__LINE__)": " , |
252 | DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, |
253 | ctx->adata.keylen_pad + keys.enckeylen, 1); |
254 | |
255 | skip_split_key: |
256 | ctx->cdata.keylen = keys.enckeylen; |
257 | |
258 | ret = aead_set_sh_desc(aead); |
259 | if (ret) |
260 | goto badkey; |
261 | |
262 | /* Now update the driver contexts with the new shared descriptor */ |
263 | if (ctx->drv_ctx[ENCRYPT]) { |
264 | ret = caam_drv_ctx_update(drv_ctx: ctx->drv_ctx[ENCRYPT], |
265 | sh_desc: ctx->sh_desc_enc); |
266 | if (ret) { |
267 | dev_err(jrdev, "driver enc context update failed\n" ); |
268 | goto badkey; |
269 | } |
270 | } |
271 | |
272 | if (ctx->drv_ctx[DECRYPT]) { |
273 | ret = caam_drv_ctx_update(drv_ctx: ctx->drv_ctx[DECRYPT], |
274 | sh_desc: ctx->sh_desc_dec); |
275 | if (ret) { |
276 | dev_err(jrdev, "driver dec context update failed\n" ); |
277 | goto badkey; |
278 | } |
279 | } |
280 | |
281 | memzero_explicit(s: &keys, count: sizeof(keys)); |
282 | return ret; |
283 | badkey: |
284 | memzero_explicit(s: &keys, count: sizeof(keys)); |
285 | return -EINVAL; |
286 | } |
287 | |
288 | static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key, |
289 | unsigned int keylen) |
290 | { |
291 | struct crypto_authenc_keys keys; |
292 | int err; |
293 | |
294 | err = crypto_authenc_extractkeys(keys: &keys, key, keylen); |
295 | if (unlikely(err)) |
296 | return err; |
297 | |
298 | err = verify_aead_des3_key(tfm: aead, key: keys.enckey, keylen: keys.enckeylen) ?: |
299 | aead_setkey(aead, key, keylen); |
300 | |
301 | memzero_explicit(s: &keys, count: sizeof(keys)); |
302 | return err; |
303 | } |
304 | |
305 | static int gcm_set_sh_desc(struct crypto_aead *aead) |
306 | { |
307 | struct caam_ctx *ctx = crypto_aead_ctx_dma(tfm: aead); |
308 | unsigned int ivsize = crypto_aead_ivsize(tfm: aead); |
309 | int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN - |
310 | ctx->cdata.keylen; |
311 | |
312 | if (!ctx->cdata.keylen || !ctx->authsize) |
313 | return 0; |
314 | |
315 | /* |
316 | * Job Descriptor and Shared Descriptor |
317 | * must fit into the 64-word Descriptor h/w Buffer |
318 | */ |
319 | if (rem_bytes >= DESC_QI_GCM_ENC_LEN) { |
320 | ctx->cdata.key_inline = true; |
321 | ctx->cdata.key_virt = ctx->key; |
322 | } else { |
323 | ctx->cdata.key_inline = false; |
324 | ctx->cdata.key_dma = ctx->key_dma; |
325 | } |
326 | |
327 | cnstr_shdsc_gcm_encap(desc: ctx->sh_desc_enc, cdata: &ctx->cdata, ivsize, |
328 | icvsize: ctx->authsize, is_qi: true); |
329 | |
330 | /* |
331 | * Job Descriptor and Shared Descriptor |
332 | * must fit into the 64-word Descriptor h/w Buffer |
333 | */ |
334 | if (rem_bytes >= DESC_QI_GCM_DEC_LEN) { |
335 | ctx->cdata.key_inline = true; |
336 | ctx->cdata.key_virt = ctx->key; |
337 | } else { |
338 | ctx->cdata.key_inline = false; |
339 | ctx->cdata.key_dma = ctx->key_dma; |
340 | } |
341 | |
342 | cnstr_shdsc_gcm_decap(desc: ctx->sh_desc_dec, cdata: &ctx->cdata, ivsize, |
343 | icvsize: ctx->authsize, is_qi: true); |
344 | |
345 | return 0; |
346 | } |
347 | |
348 | static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize) |
349 | { |
350 | struct caam_ctx *ctx = crypto_aead_ctx_dma(tfm: authenc); |
351 | int err; |
352 | |
353 | err = crypto_gcm_check_authsize(authsize); |
354 | if (err) |
355 | return err; |
356 | |
357 | ctx->authsize = authsize; |
358 | gcm_set_sh_desc(aead: authenc); |
359 | |
360 | return 0; |
361 | } |
362 | |
363 | static int gcm_setkey(struct crypto_aead *aead, |
364 | const u8 *key, unsigned int keylen) |
365 | { |
366 | struct caam_ctx *ctx = crypto_aead_ctx_dma(tfm: aead); |
367 | struct device *jrdev = ctx->jrdev; |
368 | int ret; |
369 | |
370 | ret = aes_check_keylen(keylen); |
371 | if (ret) |
372 | return ret; |
373 | |
374 | print_hex_dump_debug("key in @" __stringify(__LINE__)": " , |
375 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); |
376 | |
377 | memcpy(ctx->key, key, keylen); |
378 | dma_sync_single_for_device(dev: jrdev->parent, addr: ctx->key_dma, size: keylen, |
379 | dir: ctx->dir); |
380 | ctx->cdata.keylen = keylen; |
381 | |
382 | ret = gcm_set_sh_desc(aead); |
383 | if (ret) |
384 | return ret; |
385 | |
386 | /* Now update the driver contexts with the new shared descriptor */ |
387 | if (ctx->drv_ctx[ENCRYPT]) { |
388 | ret = caam_drv_ctx_update(drv_ctx: ctx->drv_ctx[ENCRYPT], |
389 | sh_desc: ctx->sh_desc_enc); |
390 | if (ret) { |
391 | dev_err(jrdev, "driver enc context update failed\n" ); |
392 | return ret; |
393 | } |
394 | } |
395 | |
396 | if (ctx->drv_ctx[DECRYPT]) { |
397 | ret = caam_drv_ctx_update(drv_ctx: ctx->drv_ctx[DECRYPT], |
398 | sh_desc: ctx->sh_desc_dec); |
399 | if (ret) { |
400 | dev_err(jrdev, "driver dec context update failed\n" ); |
401 | return ret; |
402 | } |
403 | } |
404 | |
405 | return 0; |
406 | } |
407 | |
408 | static int rfc4106_set_sh_desc(struct crypto_aead *aead) |
409 | { |
410 | struct caam_ctx *ctx = crypto_aead_ctx_dma(tfm: aead); |
411 | unsigned int ivsize = crypto_aead_ivsize(tfm: aead); |
412 | int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN - |
413 | ctx->cdata.keylen; |
414 | |
415 | if (!ctx->cdata.keylen || !ctx->authsize) |
416 | return 0; |
417 | |
418 | ctx->cdata.key_virt = ctx->key; |
419 | |
420 | /* |
421 | * Job Descriptor and Shared Descriptor |
422 | * must fit into the 64-word Descriptor h/w Buffer |
423 | */ |
424 | if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) { |
425 | ctx->cdata.key_inline = true; |
426 | } else { |
427 | ctx->cdata.key_inline = false; |
428 | ctx->cdata.key_dma = ctx->key_dma; |
429 | } |
430 | |
431 | cnstr_shdsc_rfc4106_encap(desc: ctx->sh_desc_enc, cdata: &ctx->cdata, ivsize, |
432 | icvsize: ctx->authsize, is_qi: true); |
433 | |
434 | /* |
435 | * Job Descriptor and Shared Descriptor |
436 | * must fit into the 64-word Descriptor h/w Buffer |
437 | */ |
438 | if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) { |
439 | ctx->cdata.key_inline = true; |
440 | } else { |
441 | ctx->cdata.key_inline = false; |
442 | ctx->cdata.key_dma = ctx->key_dma; |
443 | } |
444 | |
445 | cnstr_shdsc_rfc4106_decap(desc: ctx->sh_desc_dec, cdata: &ctx->cdata, ivsize, |
446 | icvsize: ctx->authsize, is_qi: true); |
447 | |
448 | return 0; |
449 | } |
450 | |
451 | static int rfc4106_setauthsize(struct crypto_aead *authenc, |
452 | unsigned int authsize) |
453 | { |
454 | struct caam_ctx *ctx = crypto_aead_ctx_dma(tfm: authenc); |
455 | int err; |
456 | |
457 | err = crypto_rfc4106_check_authsize(authsize); |
458 | if (err) |
459 | return err; |
460 | |
461 | ctx->authsize = authsize; |
462 | rfc4106_set_sh_desc(aead: authenc); |
463 | |
464 | return 0; |
465 | } |
466 | |
467 | static int rfc4106_setkey(struct crypto_aead *aead, |
468 | const u8 *key, unsigned int keylen) |
469 | { |
470 | struct caam_ctx *ctx = crypto_aead_ctx_dma(tfm: aead); |
471 | struct device *jrdev = ctx->jrdev; |
472 | int ret; |
473 | |
474 | ret = aes_check_keylen(keylen: keylen - 4); |
475 | if (ret) |
476 | return ret; |
477 | |
478 | print_hex_dump_debug("key in @" __stringify(__LINE__)": " , |
479 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); |
480 | |
481 | memcpy(ctx->key, key, keylen); |
482 | /* |
483 | * The last four bytes of the key material are used as the salt value |
484 | * in the nonce. Update the AES key length. |
485 | */ |
486 | ctx->cdata.keylen = keylen - 4; |
487 | dma_sync_single_for_device(dev: jrdev->parent, addr: ctx->key_dma, |
488 | size: ctx->cdata.keylen, dir: ctx->dir); |
489 | |
490 | ret = rfc4106_set_sh_desc(aead); |
491 | if (ret) |
492 | return ret; |
493 | |
494 | /* Now update the driver contexts with the new shared descriptor */ |
495 | if (ctx->drv_ctx[ENCRYPT]) { |
496 | ret = caam_drv_ctx_update(drv_ctx: ctx->drv_ctx[ENCRYPT], |
497 | sh_desc: ctx->sh_desc_enc); |
498 | if (ret) { |
499 | dev_err(jrdev, "driver enc context update failed\n" ); |
500 | return ret; |
501 | } |
502 | } |
503 | |
504 | if (ctx->drv_ctx[DECRYPT]) { |
505 | ret = caam_drv_ctx_update(drv_ctx: ctx->drv_ctx[DECRYPT], |
506 | sh_desc: ctx->sh_desc_dec); |
507 | if (ret) { |
508 | dev_err(jrdev, "driver dec context update failed\n" ); |
509 | return ret; |
510 | } |
511 | } |
512 | |
513 | return 0; |
514 | } |
515 | |
516 | static int rfc4543_set_sh_desc(struct crypto_aead *aead) |
517 | { |
518 | struct caam_ctx *ctx = crypto_aead_ctx_dma(tfm: aead); |
519 | unsigned int ivsize = crypto_aead_ivsize(tfm: aead); |
520 | int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN - |
521 | ctx->cdata.keylen; |
522 | |
523 | if (!ctx->cdata.keylen || !ctx->authsize) |
524 | return 0; |
525 | |
526 | ctx->cdata.key_virt = ctx->key; |
527 | |
528 | /* |
529 | * Job Descriptor and Shared Descriptor |
530 | * must fit into the 64-word Descriptor h/w Buffer |
531 | */ |
532 | if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) { |
533 | ctx->cdata.key_inline = true; |
534 | } else { |
535 | ctx->cdata.key_inline = false; |
536 | ctx->cdata.key_dma = ctx->key_dma; |
537 | } |
538 | |
539 | cnstr_shdsc_rfc4543_encap(desc: ctx->sh_desc_enc, cdata: &ctx->cdata, ivsize, |
540 | icvsize: ctx->authsize, is_qi: true); |
541 | |
542 | /* |
543 | * Job Descriptor and Shared Descriptor |
544 | * must fit into the 64-word Descriptor h/w Buffer |
545 | */ |
546 | if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) { |
547 | ctx->cdata.key_inline = true; |
548 | } else { |
549 | ctx->cdata.key_inline = false; |
550 | ctx->cdata.key_dma = ctx->key_dma; |
551 | } |
552 | |
553 | cnstr_shdsc_rfc4543_decap(desc: ctx->sh_desc_dec, cdata: &ctx->cdata, ivsize, |
554 | icvsize: ctx->authsize, is_qi: true); |
555 | |
556 | return 0; |
557 | } |
558 | |
559 | static int rfc4543_setauthsize(struct crypto_aead *authenc, |
560 | unsigned int authsize) |
561 | { |
562 | struct caam_ctx *ctx = crypto_aead_ctx_dma(tfm: authenc); |
563 | |
564 | if (authsize != 16) |
565 | return -EINVAL; |
566 | |
567 | ctx->authsize = authsize; |
568 | rfc4543_set_sh_desc(aead: authenc); |
569 | |
570 | return 0; |
571 | } |
572 | |
573 | static int rfc4543_setkey(struct crypto_aead *aead, |
574 | const u8 *key, unsigned int keylen) |
575 | { |
576 | struct caam_ctx *ctx = crypto_aead_ctx_dma(tfm: aead); |
577 | struct device *jrdev = ctx->jrdev; |
578 | int ret; |
579 | |
580 | ret = aes_check_keylen(keylen: keylen - 4); |
581 | if (ret) |
582 | return ret; |
583 | |
584 | print_hex_dump_debug("key in @" __stringify(__LINE__)": " , |
585 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); |
586 | |
587 | memcpy(ctx->key, key, keylen); |
588 | /* |
589 | * The last four bytes of the key material are used as the salt value |
590 | * in the nonce. Update the AES key length. |
591 | */ |
592 | ctx->cdata.keylen = keylen - 4; |
593 | dma_sync_single_for_device(dev: jrdev->parent, addr: ctx->key_dma, |
594 | size: ctx->cdata.keylen, dir: ctx->dir); |
595 | |
596 | ret = rfc4543_set_sh_desc(aead); |
597 | if (ret) |
598 | return ret; |
599 | |
600 | /* Now update the driver contexts with the new shared descriptor */ |
601 | if (ctx->drv_ctx[ENCRYPT]) { |
602 | ret = caam_drv_ctx_update(drv_ctx: ctx->drv_ctx[ENCRYPT], |
603 | sh_desc: ctx->sh_desc_enc); |
604 | if (ret) { |
605 | dev_err(jrdev, "driver enc context update failed\n" ); |
606 | return ret; |
607 | } |
608 | } |
609 | |
610 | if (ctx->drv_ctx[DECRYPT]) { |
611 | ret = caam_drv_ctx_update(drv_ctx: ctx->drv_ctx[DECRYPT], |
612 | sh_desc: ctx->sh_desc_dec); |
613 | if (ret) { |
614 | dev_err(jrdev, "driver dec context update failed\n" ); |
615 | return ret; |
616 | } |
617 | } |
618 | |
619 | return 0; |
620 | } |
621 | |
622 | static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, |
623 | unsigned int keylen, const u32 ctx1_iv_off) |
624 | { |
625 | struct caam_ctx *ctx = crypto_skcipher_ctx_dma(tfm: skcipher); |
626 | struct caam_skcipher_alg *alg = |
627 | container_of(crypto_skcipher_alg(skcipher), typeof(*alg), |
628 | skcipher); |
629 | struct device *jrdev = ctx->jrdev; |
630 | unsigned int ivsize = crypto_skcipher_ivsize(tfm: skcipher); |
631 | const bool is_rfc3686 = alg->caam.rfc3686; |
632 | int ret = 0; |
633 | |
634 | print_hex_dump_debug("key in @" __stringify(__LINE__)": " , |
635 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); |
636 | |
637 | ctx->cdata.keylen = keylen; |
638 | ctx->cdata.key_virt = key; |
639 | ctx->cdata.key_inline = true; |
640 | |
641 | /* skcipher encrypt, decrypt shared descriptors */ |
642 | cnstr_shdsc_skcipher_encap(desc: ctx->sh_desc_enc, cdata: &ctx->cdata, ivsize, |
643 | is_rfc3686, ctx1_iv_off); |
644 | cnstr_shdsc_skcipher_decap(desc: ctx->sh_desc_dec, cdata: &ctx->cdata, ivsize, |
645 | is_rfc3686, ctx1_iv_off); |
646 | |
647 | /* Now update the driver contexts with the new shared descriptor */ |
648 | if (ctx->drv_ctx[ENCRYPT]) { |
649 | ret = caam_drv_ctx_update(drv_ctx: ctx->drv_ctx[ENCRYPT], |
650 | sh_desc: ctx->sh_desc_enc); |
651 | if (ret) { |
652 | dev_err(jrdev, "driver enc context update failed\n" ); |
653 | return -EINVAL; |
654 | } |
655 | } |
656 | |
657 | if (ctx->drv_ctx[DECRYPT]) { |
658 | ret = caam_drv_ctx_update(drv_ctx: ctx->drv_ctx[DECRYPT], |
659 | sh_desc: ctx->sh_desc_dec); |
660 | if (ret) { |
661 | dev_err(jrdev, "driver dec context update failed\n" ); |
662 | return -EINVAL; |
663 | } |
664 | } |
665 | |
666 | return ret; |
667 | } |
668 | |
669 | static int aes_skcipher_setkey(struct crypto_skcipher *skcipher, |
670 | const u8 *key, unsigned int keylen) |
671 | { |
672 | int err; |
673 | |
674 | err = aes_check_keylen(keylen); |
675 | if (err) |
676 | return err; |
677 | |
678 | return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off: 0); |
679 | } |
680 | |
681 | static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher, |
682 | const u8 *key, unsigned int keylen) |
683 | { |
684 | u32 ctx1_iv_off; |
685 | int err; |
686 | |
687 | /* |
688 | * RFC3686 specific: |
689 | * | CONTEXT1[255:128] = {NONCE, IV, COUNTER} |
690 | * | *key = {KEY, NONCE} |
691 | */ |
692 | ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE; |
693 | keylen -= CTR_RFC3686_NONCE_SIZE; |
694 | |
695 | err = aes_check_keylen(keylen); |
696 | if (err) |
697 | return err; |
698 | |
699 | return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off); |
700 | } |
701 | |
702 | static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher, |
703 | const u8 *key, unsigned int keylen) |
704 | { |
705 | u32 ctx1_iv_off; |
706 | int err; |
707 | |
708 | /* |
709 | * AES-CTR needs to load IV in CONTEXT1 reg |
710 | * at an offset of 128bits (16bytes) |
711 | * CONTEXT1[255:128] = IV |
712 | */ |
713 | ctx1_iv_off = 16; |
714 | |
715 | err = aes_check_keylen(keylen); |
716 | if (err) |
717 | return err; |
718 | |
719 | return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off); |
720 | } |
721 | |
722 | static int des3_skcipher_setkey(struct crypto_skcipher *skcipher, |
723 | const u8 *key, unsigned int keylen) |
724 | { |
725 | return verify_skcipher_des3_key(tfm: skcipher, key) ?: |
726 | skcipher_setkey(skcipher, key, keylen, ctx1_iv_off: 0); |
727 | } |
728 | |
729 | static int des_skcipher_setkey(struct crypto_skcipher *skcipher, |
730 | const u8 *key, unsigned int keylen) |
731 | { |
732 | return verify_skcipher_des_key(tfm: skcipher, key) ?: |
733 | skcipher_setkey(skcipher, key, keylen, ctx1_iv_off: 0); |
734 | } |
735 | |
736 | static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, |
737 | unsigned int keylen) |
738 | { |
739 | struct caam_ctx *ctx = crypto_skcipher_ctx_dma(tfm: skcipher); |
740 | struct device *jrdev = ctx->jrdev; |
741 | struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev: jrdev->parent); |
742 | int ret = 0; |
743 | int err; |
744 | |
745 | err = xts_verify_key(tfm: skcipher, key, keylen); |
746 | if (err) { |
747 | dev_dbg(jrdev, "key size mismatch\n" ); |
748 | return err; |
749 | } |
750 | |
751 | if (keylen != 2 * AES_KEYSIZE_128 && keylen != 2 * AES_KEYSIZE_256) |
752 | ctx->xts_key_fallback = true; |
753 | |
754 | if (ctrlpriv->era <= 8 || ctx->xts_key_fallback) { |
755 | err = crypto_skcipher_setkey(tfm: ctx->fallback, key, keylen); |
756 | if (err) |
757 | return err; |
758 | } |
759 | |
760 | ctx->cdata.keylen = keylen; |
761 | ctx->cdata.key_virt = key; |
762 | ctx->cdata.key_inline = true; |
763 | |
764 | /* xts skcipher encrypt, decrypt shared descriptors */ |
765 | cnstr_shdsc_xts_skcipher_encap(desc: ctx->sh_desc_enc, cdata: &ctx->cdata); |
766 | cnstr_shdsc_xts_skcipher_decap(desc: ctx->sh_desc_dec, cdata: &ctx->cdata); |
767 | |
768 | /* Now update the driver contexts with the new shared descriptor */ |
769 | if (ctx->drv_ctx[ENCRYPT]) { |
770 | ret = caam_drv_ctx_update(drv_ctx: ctx->drv_ctx[ENCRYPT], |
771 | sh_desc: ctx->sh_desc_enc); |
772 | if (ret) { |
773 | dev_err(jrdev, "driver enc context update failed\n" ); |
774 | return -EINVAL; |
775 | } |
776 | } |
777 | |
778 | if (ctx->drv_ctx[DECRYPT]) { |
779 | ret = caam_drv_ctx_update(drv_ctx: ctx->drv_ctx[DECRYPT], |
780 | sh_desc: ctx->sh_desc_dec); |
781 | if (ret) { |
782 | dev_err(jrdev, "driver dec context update failed\n" ); |
783 | return -EINVAL; |
784 | } |
785 | } |
786 | |
787 | return ret; |
788 | } |
789 | |
790 | /* |
791 | * aead_edesc - s/w-extended aead descriptor |
792 | * @src_nents: number of segments in input scatterlist |
793 | * @dst_nents: number of segments in output scatterlist |
794 | * @iv_dma: dma address of iv for checking continuity and link table |
795 | * @qm_sg_bytes: length of dma mapped h/w link table |
796 | * @qm_sg_dma: bus physical mapped address of h/w link table |
797 | * @assoclen: associated data length, in CAAM endianness |
798 | * @assoclen_dma: bus physical mapped address of req->assoclen |
799 | * @drv_req: driver-specific request structure |
800 | * @sgt: the h/w link table, followed by IV |
801 | */ |
802 | struct aead_edesc { |
803 | int src_nents; |
804 | int dst_nents; |
805 | dma_addr_t iv_dma; |
806 | int qm_sg_bytes; |
807 | dma_addr_t qm_sg_dma; |
808 | unsigned int assoclen; |
809 | dma_addr_t assoclen_dma; |
810 | struct caam_drv_req drv_req; |
811 | struct qm_sg_entry sgt[]; |
812 | }; |
813 | |
814 | /* |
815 | * skcipher_edesc - s/w-extended skcipher descriptor |
816 | * @src_nents: number of segments in input scatterlist |
817 | * @dst_nents: number of segments in output scatterlist |
818 | * @iv_dma: dma address of iv for checking continuity and link table |
819 | * @qm_sg_bytes: length of dma mapped h/w link table |
820 | * @qm_sg_dma: bus physical mapped address of h/w link table |
821 | * @drv_req: driver-specific request structure |
822 | * @sgt: the h/w link table, followed by IV |
823 | */ |
824 | struct skcipher_edesc { |
825 | int src_nents; |
826 | int dst_nents; |
827 | dma_addr_t iv_dma; |
828 | int qm_sg_bytes; |
829 | dma_addr_t qm_sg_dma; |
830 | struct caam_drv_req drv_req; |
831 | struct qm_sg_entry sgt[]; |
832 | }; |
833 | |
834 | static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx, |
835 | enum optype type) |
836 | { |
837 | /* |
838 | * This function is called on the fast path with values of 'type' |
839 | * known at compile time. Invalid arguments are not expected and |
840 | * thus no checks are made. |
841 | */ |
842 | struct caam_drv_ctx *drv_ctx = ctx->drv_ctx[type]; |
843 | u32 *desc; |
844 | |
845 | if (unlikely(!drv_ctx)) { |
846 | spin_lock(lock: &ctx->lock); |
847 | |
848 | /* Read again to check if some other core init drv_ctx */ |
849 | drv_ctx = ctx->drv_ctx[type]; |
850 | if (!drv_ctx) { |
851 | int cpu; |
852 | |
853 | if (type == ENCRYPT) |
854 | desc = ctx->sh_desc_enc; |
855 | else /* (type == DECRYPT) */ |
856 | desc = ctx->sh_desc_dec; |
857 | |
858 | cpu = smp_processor_id(); |
859 | drv_ctx = caam_drv_ctx_init(qidev: ctx->qidev, cpu: &cpu, sh_desc: desc); |
860 | if (!IS_ERR(ptr: drv_ctx)) |
861 | drv_ctx->op_type = type; |
862 | |
863 | ctx->drv_ctx[type] = drv_ctx; |
864 | } |
865 | |
866 | spin_unlock(lock: &ctx->lock); |
867 | } |
868 | |
869 | return drv_ctx; |
870 | } |
871 | |
872 | static void caam_unmap(struct device *dev, struct scatterlist *src, |
873 | struct scatterlist *dst, int src_nents, |
874 | int dst_nents, dma_addr_t iv_dma, int ivsize, |
875 | enum dma_data_direction iv_dir, dma_addr_t qm_sg_dma, |
876 | int qm_sg_bytes) |
877 | { |
878 | if (dst != src) { |
879 | if (src_nents) |
880 | dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE); |
881 | if (dst_nents) |
882 | dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE); |
883 | } else { |
884 | dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL); |
885 | } |
886 | |
887 | if (iv_dma) |
888 | dma_unmap_single(dev, iv_dma, ivsize, iv_dir); |
889 | if (qm_sg_bytes) |
890 | dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE); |
891 | } |
892 | |
893 | static void aead_unmap(struct device *dev, |
894 | struct aead_edesc *edesc, |
895 | struct aead_request *req) |
896 | { |
897 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
898 | int ivsize = crypto_aead_ivsize(tfm: aead); |
899 | |
900 | caam_unmap(dev, src: req->src, dst: req->dst, src_nents: edesc->src_nents, dst_nents: edesc->dst_nents, |
901 | iv_dma: edesc->iv_dma, ivsize, iv_dir: DMA_TO_DEVICE, qm_sg_dma: edesc->qm_sg_dma, |
902 | qm_sg_bytes: edesc->qm_sg_bytes); |
903 | dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); |
904 | } |
905 | |
906 | static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc, |
907 | struct skcipher_request *req) |
908 | { |
909 | struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); |
910 | int ivsize = crypto_skcipher_ivsize(tfm: skcipher); |
911 | |
912 | caam_unmap(dev, src: req->src, dst: req->dst, src_nents: edesc->src_nents, dst_nents: edesc->dst_nents, |
913 | iv_dma: edesc->iv_dma, ivsize, iv_dir: DMA_BIDIRECTIONAL, qm_sg_dma: edesc->qm_sg_dma, |
914 | qm_sg_bytes: edesc->qm_sg_bytes); |
915 | } |
916 | |
917 | static void aead_done(struct caam_drv_req *drv_req, u32 status) |
918 | { |
919 | struct device *qidev; |
920 | struct aead_edesc *edesc; |
921 | struct aead_request *aead_req = drv_req->app_ctx; |
922 | struct crypto_aead *aead = crypto_aead_reqtfm(req: aead_req); |
923 | struct caam_ctx *caam_ctx = crypto_aead_ctx_dma(tfm: aead); |
924 | int ecode = 0; |
925 | |
926 | qidev = caam_ctx->qidev; |
927 | |
928 | if (unlikely(status)) |
929 | ecode = caam_jr_strstatus(qidev, status); |
930 | |
931 | edesc = container_of(drv_req, typeof(*edesc), drv_req); |
932 | aead_unmap(dev: qidev, edesc, req: aead_req); |
933 | |
934 | aead_request_complete(req: aead_req, err: ecode); |
935 | qi_cache_free(obj: edesc); |
936 | } |
937 | |
938 | /* |
939 | * allocate and map the aead extended descriptor |
940 | */ |
941 | static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, |
942 | bool encrypt) |
943 | { |
944 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
945 | struct caam_ctx *ctx = crypto_aead_ctx_dma(tfm: aead); |
946 | struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead), |
947 | typeof(*alg), aead); |
948 | struct device *qidev = ctx->qidev; |
949 | gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? |
950 | GFP_KERNEL : GFP_ATOMIC; |
951 | int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; |
952 | int src_len, dst_len = 0; |
953 | struct aead_edesc *edesc; |
954 | dma_addr_t qm_sg_dma, iv_dma = 0; |
955 | int ivsize = 0; |
956 | unsigned int authsize = ctx->authsize; |
957 | int qm_sg_index = 0, qm_sg_ents = 0, qm_sg_bytes; |
958 | int in_len, out_len; |
959 | struct qm_sg_entry *sg_table, *fd_sgt; |
960 | struct caam_drv_ctx *drv_ctx; |
961 | |
962 | drv_ctx = get_drv_ctx(ctx, type: encrypt ? ENCRYPT : DECRYPT); |
963 | if (IS_ERR(ptr: drv_ctx)) |
964 | return (struct aead_edesc *)drv_ctx; |
965 | |
966 | /* allocate space for base edesc and hw desc commands, link tables */ |
967 | edesc = qi_cache_alloc(flags); |
968 | if (unlikely(!edesc)) { |
969 | dev_err(qidev, "could not allocate extended descriptor\n" ); |
970 | return ERR_PTR(error: -ENOMEM); |
971 | } |
972 | |
973 | if (likely(req->src == req->dst)) { |
974 | src_len = req->assoclen + req->cryptlen + |
975 | (encrypt ? authsize : 0); |
976 | |
977 | src_nents = sg_nents_for_len(sg: req->src, len: src_len); |
978 | if (unlikely(src_nents < 0)) { |
979 | dev_err(qidev, "Insufficient bytes (%d) in src S/G\n" , |
980 | src_len); |
981 | qi_cache_free(obj: edesc); |
982 | return ERR_PTR(error: src_nents); |
983 | } |
984 | |
985 | mapped_src_nents = dma_map_sg(qidev, req->src, src_nents, |
986 | DMA_BIDIRECTIONAL); |
987 | if (unlikely(!mapped_src_nents)) { |
988 | dev_err(qidev, "unable to map source\n" ); |
989 | qi_cache_free(obj: edesc); |
990 | return ERR_PTR(error: -ENOMEM); |
991 | } |
992 | } else { |
993 | src_len = req->assoclen + req->cryptlen; |
994 | dst_len = src_len + (encrypt ? authsize : (-authsize)); |
995 | |
996 | src_nents = sg_nents_for_len(sg: req->src, len: src_len); |
997 | if (unlikely(src_nents < 0)) { |
998 | dev_err(qidev, "Insufficient bytes (%d) in src S/G\n" , |
999 | src_len); |
1000 | qi_cache_free(obj: edesc); |
1001 | return ERR_PTR(error: src_nents); |
1002 | } |
1003 | |
1004 | dst_nents = sg_nents_for_len(sg: req->dst, len: dst_len); |
1005 | if (unlikely(dst_nents < 0)) { |
1006 | dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n" , |
1007 | dst_len); |
1008 | qi_cache_free(obj: edesc); |
1009 | return ERR_PTR(error: dst_nents); |
1010 | } |
1011 | |
1012 | if (src_nents) { |
1013 | mapped_src_nents = dma_map_sg(qidev, req->src, |
1014 | src_nents, DMA_TO_DEVICE); |
1015 | if (unlikely(!mapped_src_nents)) { |
1016 | dev_err(qidev, "unable to map source\n" ); |
1017 | qi_cache_free(obj: edesc); |
1018 | return ERR_PTR(error: -ENOMEM); |
1019 | } |
1020 | } else { |
1021 | mapped_src_nents = 0; |
1022 | } |
1023 | |
1024 | if (dst_nents) { |
1025 | mapped_dst_nents = dma_map_sg(qidev, req->dst, |
1026 | dst_nents, |
1027 | DMA_FROM_DEVICE); |
1028 | if (unlikely(!mapped_dst_nents)) { |
1029 | dev_err(qidev, "unable to map destination\n" ); |
1030 | dma_unmap_sg(qidev, req->src, src_nents, |
1031 | DMA_TO_DEVICE); |
1032 | qi_cache_free(obj: edesc); |
1033 | return ERR_PTR(error: -ENOMEM); |
1034 | } |
1035 | } else { |
1036 | mapped_dst_nents = 0; |
1037 | } |
1038 | } |
1039 | |
1040 | if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv) |
1041 | ivsize = crypto_aead_ivsize(tfm: aead); |
1042 | |
1043 | /* |
1044 | * Create S/G table: req->assoclen, [IV,] req->src [, req->dst]. |
1045 | * Input is not contiguous. |
1046 | * HW reads 4 S/G entries at a time; make sure the reads don't go beyond |
1047 | * the end of the table by allocating more S/G entries. Logic: |
1048 | * if (src != dst && output S/G) |
1049 | * pad output S/G, if needed |
1050 | * else if (src == dst && S/G) |
1051 | * overlapping S/Gs; pad one of them |
1052 | * else if (input S/G) ... |
1053 | * pad input S/G, if needed |
1054 | */ |
1055 | qm_sg_ents = 1 + !!ivsize + mapped_src_nents; |
1056 | if (mapped_dst_nents > 1) |
1057 | qm_sg_ents += pad_sg_nents(sg_nents: mapped_dst_nents); |
1058 | else if ((req->src == req->dst) && (mapped_src_nents > 1)) |
1059 | qm_sg_ents = max(pad_sg_nents(qm_sg_ents), |
1060 | 1 + !!ivsize + pad_sg_nents(mapped_src_nents)); |
1061 | else |
1062 | qm_sg_ents = pad_sg_nents(sg_nents: qm_sg_ents); |
1063 | |
1064 | sg_table = &edesc->sgt[0]; |
1065 | qm_sg_bytes = qm_sg_ents * sizeof(*sg_table); |
1066 | if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize > |
1067 | CAAM_QI_MEMCACHE_SIZE)) { |
1068 | dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n" , |
1069 | qm_sg_ents, ivsize); |
1070 | caam_unmap(dev: qidev, src: req->src, dst: req->dst, src_nents, dst_nents, iv_dma: 0, |
1071 | ivsize: 0, iv_dir: DMA_NONE, qm_sg_dma: 0, qm_sg_bytes: 0); |
1072 | qi_cache_free(obj: edesc); |
1073 | return ERR_PTR(error: -ENOMEM); |
1074 | } |
1075 | |
1076 | if (ivsize) { |
1077 | u8 *iv = (u8 *)(sg_table + qm_sg_ents); |
1078 | |
1079 | /* Make sure IV is located in a DMAable area */ |
1080 | memcpy(iv, req->iv, ivsize); |
1081 | |
1082 | iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE); |
1083 | if (dma_mapping_error(dev: qidev, dma_addr: iv_dma)) { |
1084 | dev_err(qidev, "unable to map IV\n" ); |
1085 | caam_unmap(dev: qidev, src: req->src, dst: req->dst, src_nents, |
1086 | dst_nents, iv_dma: 0, ivsize: 0, iv_dir: DMA_NONE, qm_sg_dma: 0, qm_sg_bytes: 0); |
1087 | qi_cache_free(obj: edesc); |
1088 | return ERR_PTR(error: -ENOMEM); |
1089 | } |
1090 | } |
1091 | |
1092 | edesc->src_nents = src_nents; |
1093 | edesc->dst_nents = dst_nents; |
1094 | edesc->iv_dma = iv_dma; |
1095 | edesc->drv_req.app_ctx = req; |
1096 | edesc->drv_req.cbk = aead_done; |
1097 | edesc->drv_req.drv_ctx = drv_ctx; |
1098 | |
1099 | edesc->assoclen = cpu_to_caam32(val: req->assoclen); |
1100 | edesc->assoclen_dma = dma_map_single(qidev, &edesc->assoclen, 4, |
1101 | DMA_TO_DEVICE); |
1102 | if (dma_mapping_error(dev: qidev, dma_addr: edesc->assoclen_dma)) { |
1103 | dev_err(qidev, "unable to map assoclen\n" ); |
1104 | caam_unmap(dev: qidev, src: req->src, dst: req->dst, src_nents, dst_nents, |
1105 | iv_dma, ivsize, iv_dir: DMA_TO_DEVICE, qm_sg_dma: 0, qm_sg_bytes: 0); |
1106 | qi_cache_free(obj: edesc); |
1107 | return ERR_PTR(error: -ENOMEM); |
1108 | } |
1109 | |
1110 | dma_to_qm_sg_one(qm_sg_ptr: sg_table, dma: edesc->assoclen_dma, len: 4, offset: 0); |
1111 | qm_sg_index++; |
1112 | if (ivsize) { |
1113 | dma_to_qm_sg_one(qm_sg_ptr: sg_table + qm_sg_index, dma: iv_dma, len: ivsize, offset: 0); |
1114 | qm_sg_index++; |
1115 | } |
1116 | sg_to_qm_sg_last(sg: req->src, len: src_len, qm_sg_ptr: sg_table + qm_sg_index, offset: 0); |
1117 | qm_sg_index += mapped_src_nents; |
1118 | |
1119 | if (mapped_dst_nents > 1) |
1120 | sg_to_qm_sg_last(sg: req->dst, len: dst_len, qm_sg_ptr: sg_table + qm_sg_index, offset: 0); |
1121 | |
1122 | qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE); |
1123 | if (dma_mapping_error(dev: qidev, dma_addr: qm_sg_dma)) { |
1124 | dev_err(qidev, "unable to map S/G table\n" ); |
1125 | dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); |
1126 | caam_unmap(dev: qidev, src: req->src, dst: req->dst, src_nents, dst_nents, |
1127 | iv_dma, ivsize, iv_dir: DMA_TO_DEVICE, qm_sg_dma: 0, qm_sg_bytes: 0); |
1128 | qi_cache_free(obj: edesc); |
1129 | return ERR_PTR(error: -ENOMEM); |
1130 | } |
1131 | |
1132 | edesc->qm_sg_dma = qm_sg_dma; |
1133 | edesc->qm_sg_bytes = qm_sg_bytes; |
1134 | |
1135 | out_len = req->assoclen + req->cryptlen + |
1136 | (encrypt ? ctx->authsize : (-ctx->authsize)); |
1137 | in_len = 4 + ivsize + req->assoclen + req->cryptlen; |
1138 | |
1139 | fd_sgt = &edesc->drv_req.fd_sgt[0]; |
1140 | dma_to_qm_sg_one_last_ext(qm_sg_ptr: &fd_sgt[1], dma: qm_sg_dma, len: in_len, offset: 0); |
1141 | |
1142 | if (req->dst == req->src) { |
1143 | if (mapped_src_nents == 1) |
1144 | dma_to_qm_sg_one(qm_sg_ptr: &fd_sgt[0], sg_dma_address(req->src), |
1145 | len: out_len, offset: 0); |
1146 | else |
1147 | dma_to_qm_sg_one_ext(qm_sg_ptr: &fd_sgt[0], dma: qm_sg_dma + |
1148 | (1 + !!ivsize) * sizeof(*sg_table), |
1149 | len: out_len, offset: 0); |
1150 | } else if (mapped_dst_nents <= 1) { |
1151 | dma_to_qm_sg_one(qm_sg_ptr: &fd_sgt[0], sg_dma_address(req->dst), len: out_len, |
1152 | offset: 0); |
1153 | } else { |
1154 | dma_to_qm_sg_one_ext(qm_sg_ptr: &fd_sgt[0], dma: qm_sg_dma + sizeof(*sg_table) * |
1155 | qm_sg_index, len: out_len, offset: 0); |
1156 | } |
1157 | |
1158 | return edesc; |
1159 | } |
1160 | |
1161 | static inline int aead_crypt(struct aead_request *req, bool encrypt) |
1162 | { |
1163 | struct aead_edesc *edesc; |
1164 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
1165 | struct caam_ctx *ctx = crypto_aead_ctx_dma(tfm: aead); |
1166 | int ret; |
1167 | |
1168 | if (unlikely(caam_congested)) |
1169 | return -EAGAIN; |
1170 | |
1171 | /* allocate extended descriptor */ |
1172 | edesc = aead_edesc_alloc(req, encrypt); |
1173 | if (IS_ERR(ptr: edesc)) |
1174 | return PTR_ERR(ptr: edesc); |
1175 | |
1176 | /* Create and submit job descriptor */ |
1177 | ret = caam_qi_enqueue(qidev: ctx->qidev, req: &edesc->drv_req); |
1178 | if (!ret) { |
1179 | ret = -EINPROGRESS; |
1180 | } else { |
1181 | aead_unmap(dev: ctx->qidev, edesc, req); |
1182 | qi_cache_free(obj: edesc); |
1183 | } |
1184 | |
1185 | return ret; |
1186 | } |
1187 | |
1188 | static int aead_encrypt(struct aead_request *req) |
1189 | { |
1190 | return aead_crypt(req, encrypt: true); |
1191 | } |
1192 | |
1193 | static int aead_decrypt(struct aead_request *req) |
1194 | { |
1195 | return aead_crypt(req, encrypt: false); |
1196 | } |
1197 | |
1198 | static int ipsec_gcm_encrypt(struct aead_request *req) |
1199 | { |
1200 | return crypto_ipsec_check_assoclen(assoclen: req->assoclen) ? : aead_crypt(req, |
1201 | encrypt: true); |
1202 | } |
1203 | |
1204 | static int ipsec_gcm_decrypt(struct aead_request *req) |
1205 | { |
1206 | return crypto_ipsec_check_assoclen(assoclen: req->assoclen) ? : aead_crypt(req, |
1207 | encrypt: false); |
1208 | } |
1209 | |
1210 | static inline u8 *skcipher_edesc_iv(struct skcipher_edesc *edesc) |
1211 | { |
1212 | return PTR_ALIGN((u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, |
1213 | dma_get_cache_alignment()); |
1214 | } |
1215 | |
1216 | static void skcipher_done(struct caam_drv_req *drv_req, u32 status) |
1217 | { |
1218 | struct skcipher_edesc *edesc; |
1219 | struct skcipher_request *req = drv_req->app_ctx; |
1220 | struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); |
1221 | struct caam_ctx *caam_ctx = crypto_skcipher_ctx_dma(tfm: skcipher); |
1222 | struct device *qidev = caam_ctx->qidev; |
1223 | int ivsize = crypto_skcipher_ivsize(tfm: skcipher); |
1224 | int ecode = 0; |
1225 | |
1226 | dev_dbg(qidev, "%s %d: status 0x%x\n" , __func__, __LINE__, status); |
1227 | |
1228 | edesc = container_of(drv_req, typeof(*edesc), drv_req); |
1229 | |
1230 | if (status) |
1231 | ecode = caam_jr_strstatus(qidev, status); |
1232 | |
1233 | print_hex_dump_debug("dstiv @" __stringify(__LINE__)": " , |
1234 | DUMP_PREFIX_ADDRESS, 16, 4, req->iv, |
1235 | edesc->src_nents > 1 ? 100 : ivsize, 1); |
1236 | caam_dump_sg(prefix_str: "dst @" __stringify(__LINE__)": " , |
1237 | prefix_type: DUMP_PREFIX_ADDRESS, rowsize: 16, groupsize: 4, sg: req->dst, |
1238 | tlen: edesc->dst_nents > 1 ? 100 : req->cryptlen, ascii: 1); |
1239 | |
1240 | skcipher_unmap(dev: qidev, edesc, req); |
1241 | |
1242 | /* |
1243 | * The crypto API expects us to set the IV (req->iv) to the last |
1244 | * ciphertext block (CBC mode) or last counter (CTR mode). |
1245 | * This is used e.g. by the CTS mode. |
1246 | */ |
1247 | if (!ecode) |
1248 | memcpy(req->iv, skcipher_edesc_iv(edesc), ivsize); |
1249 | |
1250 | qi_cache_free(obj: edesc); |
1251 | skcipher_request_complete(req, err: ecode); |
1252 | } |
1253 | |
1254 | static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req, |
1255 | bool encrypt) |
1256 | { |
1257 | struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); |
1258 | struct caam_ctx *ctx = crypto_skcipher_ctx_dma(tfm: skcipher); |
1259 | struct device *qidev = ctx->qidev; |
1260 | gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? |
1261 | GFP_KERNEL : GFP_ATOMIC; |
1262 | int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; |
1263 | struct skcipher_edesc *edesc; |
1264 | dma_addr_t iv_dma; |
1265 | u8 *iv; |
1266 | int ivsize = crypto_skcipher_ivsize(tfm: skcipher); |
1267 | int dst_sg_idx, qm_sg_ents, qm_sg_bytes; |
1268 | struct qm_sg_entry *sg_table, *fd_sgt; |
1269 | struct caam_drv_ctx *drv_ctx; |
1270 | unsigned int len; |
1271 | |
1272 | drv_ctx = get_drv_ctx(ctx, type: encrypt ? ENCRYPT : DECRYPT); |
1273 | if (IS_ERR(ptr: drv_ctx)) |
1274 | return (struct skcipher_edesc *)drv_ctx; |
1275 | |
1276 | src_nents = sg_nents_for_len(sg: req->src, len: req->cryptlen); |
1277 | if (unlikely(src_nents < 0)) { |
1278 | dev_err(qidev, "Insufficient bytes (%d) in src S/G\n" , |
1279 | req->cryptlen); |
1280 | return ERR_PTR(error: src_nents); |
1281 | } |
1282 | |
1283 | if (unlikely(req->src != req->dst)) { |
1284 | dst_nents = sg_nents_for_len(sg: req->dst, len: req->cryptlen); |
1285 | if (unlikely(dst_nents < 0)) { |
1286 | dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n" , |
1287 | req->cryptlen); |
1288 | return ERR_PTR(error: dst_nents); |
1289 | } |
1290 | |
1291 | mapped_src_nents = dma_map_sg(qidev, req->src, src_nents, |
1292 | DMA_TO_DEVICE); |
1293 | if (unlikely(!mapped_src_nents)) { |
1294 | dev_err(qidev, "unable to map source\n" ); |
1295 | return ERR_PTR(error: -ENOMEM); |
1296 | } |
1297 | |
1298 | mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents, |
1299 | DMA_FROM_DEVICE); |
1300 | if (unlikely(!mapped_dst_nents)) { |
1301 | dev_err(qidev, "unable to map destination\n" ); |
1302 | dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE); |
1303 | return ERR_PTR(error: -ENOMEM); |
1304 | } |
1305 | } else { |
1306 | mapped_src_nents = dma_map_sg(qidev, req->src, src_nents, |
1307 | DMA_BIDIRECTIONAL); |
1308 | if (unlikely(!mapped_src_nents)) { |
1309 | dev_err(qidev, "unable to map source\n" ); |
1310 | return ERR_PTR(error: -ENOMEM); |
1311 | } |
1312 | } |
1313 | |
1314 | qm_sg_ents = 1 + mapped_src_nents; |
1315 | dst_sg_idx = qm_sg_ents; |
1316 | |
1317 | /* |
1318 | * Input, output HW S/G tables: [IV, src][dst, IV] |
1319 | * IV entries point to the same buffer |
1320 | * If src == dst, S/G entries are reused (S/G tables overlap) |
1321 | * |
1322 | * HW reads 4 S/G entries at a time; make sure the reads don't go beyond |
1323 | * the end of the table by allocating more S/G entries. |
1324 | */ |
1325 | if (req->src != req->dst) |
1326 | qm_sg_ents += pad_sg_nents(sg_nents: mapped_dst_nents + 1); |
1327 | else |
1328 | qm_sg_ents = 1 + pad_sg_nents(sg_nents: qm_sg_ents); |
1329 | |
1330 | qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry); |
1331 | |
1332 | len = offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes; |
1333 | len = ALIGN(len, dma_get_cache_alignment()); |
1334 | len += ivsize; |
1335 | |
1336 | if (unlikely(len > CAAM_QI_MEMCACHE_SIZE)) { |
1337 | dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n" , |
1338 | qm_sg_ents, ivsize); |
1339 | caam_unmap(dev: qidev, src: req->src, dst: req->dst, src_nents, dst_nents, iv_dma: 0, |
1340 | ivsize: 0, iv_dir: DMA_NONE, qm_sg_dma: 0, qm_sg_bytes: 0); |
1341 | return ERR_PTR(error: -ENOMEM); |
1342 | } |
1343 | |
1344 | /* allocate space for base edesc, link tables and IV */ |
1345 | edesc = qi_cache_alloc(flags); |
1346 | if (unlikely(!edesc)) { |
1347 | dev_err(qidev, "could not allocate extended descriptor\n" ); |
1348 | caam_unmap(dev: qidev, src: req->src, dst: req->dst, src_nents, dst_nents, iv_dma: 0, |
1349 | ivsize: 0, iv_dir: DMA_NONE, qm_sg_dma: 0, qm_sg_bytes: 0); |
1350 | return ERR_PTR(error: -ENOMEM); |
1351 | } |
1352 | |
1353 | edesc->src_nents = src_nents; |
1354 | edesc->dst_nents = dst_nents; |
1355 | edesc->qm_sg_bytes = qm_sg_bytes; |
1356 | edesc->drv_req.app_ctx = req; |
1357 | edesc->drv_req.cbk = skcipher_done; |
1358 | edesc->drv_req.drv_ctx = drv_ctx; |
1359 | |
1360 | /* Make sure IV is located in a DMAable area */ |
1361 | sg_table = &edesc->sgt[0]; |
1362 | iv = skcipher_edesc_iv(edesc); |
1363 | memcpy(iv, req->iv, ivsize); |
1364 | |
1365 | iv_dma = dma_map_single(qidev, iv, ivsize, DMA_BIDIRECTIONAL); |
1366 | if (dma_mapping_error(dev: qidev, dma_addr: iv_dma)) { |
1367 | dev_err(qidev, "unable to map IV\n" ); |
1368 | caam_unmap(dev: qidev, src: req->src, dst: req->dst, src_nents, dst_nents, iv_dma: 0, |
1369 | ivsize: 0, iv_dir: DMA_NONE, qm_sg_dma: 0, qm_sg_bytes: 0); |
1370 | qi_cache_free(obj: edesc); |
1371 | return ERR_PTR(error: -ENOMEM); |
1372 | } |
1373 | |
1374 | edesc->iv_dma = iv_dma; |
1375 | |
1376 | dma_to_qm_sg_one(qm_sg_ptr: sg_table, dma: iv_dma, len: ivsize, offset: 0); |
1377 | sg_to_qm_sg(sg: req->src, len: req->cryptlen, qm_sg_ptr: sg_table + 1, offset: 0); |
1378 | |
1379 | if (req->src != req->dst) |
1380 | sg_to_qm_sg(sg: req->dst, len: req->cryptlen, qm_sg_ptr: sg_table + dst_sg_idx, offset: 0); |
1381 | |
1382 | dma_to_qm_sg_one(qm_sg_ptr: sg_table + dst_sg_idx + mapped_dst_nents, dma: iv_dma, |
1383 | len: ivsize, offset: 0); |
1384 | |
1385 | edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes, |
1386 | DMA_TO_DEVICE); |
1387 | if (dma_mapping_error(dev: qidev, dma_addr: edesc->qm_sg_dma)) { |
1388 | dev_err(qidev, "unable to map S/G table\n" ); |
1389 | caam_unmap(dev: qidev, src: req->src, dst: req->dst, src_nents, dst_nents, |
1390 | iv_dma, ivsize, iv_dir: DMA_BIDIRECTIONAL, qm_sg_dma: 0, qm_sg_bytes: 0); |
1391 | qi_cache_free(obj: edesc); |
1392 | return ERR_PTR(error: -ENOMEM); |
1393 | } |
1394 | |
1395 | fd_sgt = &edesc->drv_req.fd_sgt[0]; |
1396 | |
1397 | dma_to_qm_sg_one_last_ext(qm_sg_ptr: &fd_sgt[1], dma: edesc->qm_sg_dma, |
1398 | len: ivsize + req->cryptlen, offset: 0); |
1399 | |
1400 | if (req->src == req->dst) |
1401 | dma_to_qm_sg_one_ext(qm_sg_ptr: &fd_sgt[0], dma: edesc->qm_sg_dma + |
1402 | sizeof(*sg_table), len: req->cryptlen + ivsize, |
1403 | offset: 0); |
1404 | else |
1405 | dma_to_qm_sg_one_ext(qm_sg_ptr: &fd_sgt[0], dma: edesc->qm_sg_dma + dst_sg_idx * |
1406 | sizeof(*sg_table), len: req->cryptlen + ivsize, |
1407 | offset: 0); |
1408 | |
1409 | return edesc; |
1410 | } |
1411 | |
1412 | static inline bool xts_skcipher_ivsize(struct skcipher_request *req) |
1413 | { |
1414 | struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); |
1415 | unsigned int ivsize = crypto_skcipher_ivsize(tfm: skcipher); |
1416 | |
1417 | return !!get_unaligned((u64 *)(req->iv + (ivsize / 2))); |
1418 | } |
1419 | |
1420 | static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt) |
1421 | { |
1422 | struct skcipher_edesc *edesc; |
1423 | struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); |
1424 | struct caam_ctx *ctx = crypto_skcipher_ctx_dma(tfm: skcipher); |
1425 | struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev: ctx->jrdev->parent); |
1426 | int ret; |
1427 | |
1428 | /* |
1429 | * XTS is expected to return an error even for input length = 0 |
1430 | * Note that the case input length < block size will be caught during |
1431 | * HW offloading and return an error. |
1432 | */ |
1433 | if (!req->cryptlen && !ctx->fallback) |
1434 | return 0; |
1435 | |
1436 | if (ctx->fallback && ((ctrlpriv->era <= 8 && xts_skcipher_ivsize(req)) || |
1437 | ctx->xts_key_fallback)) { |
1438 | struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req); |
1439 | |
1440 | skcipher_request_set_tfm(req: &rctx->fallback_req, tfm: ctx->fallback); |
1441 | skcipher_request_set_callback(req: &rctx->fallback_req, |
1442 | flags: req->base.flags, |
1443 | compl: req->base.complete, |
1444 | data: req->base.data); |
1445 | skcipher_request_set_crypt(req: &rctx->fallback_req, src: req->src, |
1446 | dst: req->dst, cryptlen: req->cryptlen, iv: req->iv); |
1447 | |
1448 | return encrypt ? crypto_skcipher_encrypt(req: &rctx->fallback_req) : |
1449 | crypto_skcipher_decrypt(req: &rctx->fallback_req); |
1450 | } |
1451 | |
1452 | if (unlikely(caam_congested)) |
1453 | return -EAGAIN; |
1454 | |
1455 | /* allocate extended descriptor */ |
1456 | edesc = skcipher_edesc_alloc(req, encrypt); |
1457 | if (IS_ERR(ptr: edesc)) |
1458 | return PTR_ERR(ptr: edesc); |
1459 | |
1460 | ret = caam_qi_enqueue(qidev: ctx->qidev, req: &edesc->drv_req); |
1461 | if (!ret) { |
1462 | ret = -EINPROGRESS; |
1463 | } else { |
1464 | skcipher_unmap(dev: ctx->qidev, edesc, req); |
1465 | qi_cache_free(obj: edesc); |
1466 | } |
1467 | |
1468 | return ret; |
1469 | } |
1470 | |
1471 | static int skcipher_encrypt(struct skcipher_request *req) |
1472 | { |
1473 | return skcipher_crypt(req, encrypt: true); |
1474 | } |
1475 | |
1476 | static int skcipher_decrypt(struct skcipher_request *req) |
1477 | { |
1478 | return skcipher_crypt(req, encrypt: false); |
1479 | } |
1480 | |
1481 | static struct caam_skcipher_alg driver_algs[] = { |
1482 | { |
1483 | .skcipher = { |
1484 | .base = { |
1485 | .cra_name = "cbc(aes)" , |
1486 | .cra_driver_name = "cbc-aes-caam-qi" , |
1487 | .cra_blocksize = AES_BLOCK_SIZE, |
1488 | }, |
1489 | .setkey = aes_skcipher_setkey, |
1490 | .encrypt = skcipher_encrypt, |
1491 | .decrypt = skcipher_decrypt, |
1492 | .min_keysize = AES_MIN_KEY_SIZE, |
1493 | .max_keysize = AES_MAX_KEY_SIZE, |
1494 | .ivsize = AES_BLOCK_SIZE, |
1495 | }, |
1496 | .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, |
1497 | }, |
1498 | { |
1499 | .skcipher = { |
1500 | .base = { |
1501 | .cra_name = "cbc(des3_ede)" , |
1502 | .cra_driver_name = "cbc-3des-caam-qi" , |
1503 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
1504 | }, |
1505 | .setkey = des3_skcipher_setkey, |
1506 | .encrypt = skcipher_encrypt, |
1507 | .decrypt = skcipher_decrypt, |
1508 | .min_keysize = DES3_EDE_KEY_SIZE, |
1509 | .max_keysize = DES3_EDE_KEY_SIZE, |
1510 | .ivsize = DES3_EDE_BLOCK_SIZE, |
1511 | }, |
1512 | .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, |
1513 | }, |
1514 | { |
1515 | .skcipher = { |
1516 | .base = { |
1517 | .cra_name = "cbc(des)" , |
1518 | .cra_driver_name = "cbc-des-caam-qi" , |
1519 | .cra_blocksize = DES_BLOCK_SIZE, |
1520 | }, |
1521 | .setkey = des_skcipher_setkey, |
1522 | .encrypt = skcipher_encrypt, |
1523 | .decrypt = skcipher_decrypt, |
1524 | .min_keysize = DES_KEY_SIZE, |
1525 | .max_keysize = DES_KEY_SIZE, |
1526 | .ivsize = DES_BLOCK_SIZE, |
1527 | }, |
1528 | .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, |
1529 | }, |
1530 | { |
1531 | .skcipher = { |
1532 | .base = { |
1533 | .cra_name = "ctr(aes)" , |
1534 | .cra_driver_name = "ctr-aes-caam-qi" , |
1535 | .cra_blocksize = 1, |
1536 | }, |
1537 | .setkey = ctr_skcipher_setkey, |
1538 | .encrypt = skcipher_encrypt, |
1539 | .decrypt = skcipher_decrypt, |
1540 | .min_keysize = AES_MIN_KEY_SIZE, |
1541 | .max_keysize = AES_MAX_KEY_SIZE, |
1542 | .ivsize = AES_BLOCK_SIZE, |
1543 | .chunksize = AES_BLOCK_SIZE, |
1544 | }, |
1545 | .caam.class1_alg_type = OP_ALG_ALGSEL_AES | |
1546 | OP_ALG_AAI_CTR_MOD128, |
1547 | }, |
1548 | { |
1549 | .skcipher = { |
1550 | .base = { |
1551 | .cra_name = "rfc3686(ctr(aes))" , |
1552 | .cra_driver_name = "rfc3686-ctr-aes-caam-qi" , |
1553 | .cra_blocksize = 1, |
1554 | }, |
1555 | .setkey = rfc3686_skcipher_setkey, |
1556 | .encrypt = skcipher_encrypt, |
1557 | .decrypt = skcipher_decrypt, |
1558 | .min_keysize = AES_MIN_KEY_SIZE + |
1559 | CTR_RFC3686_NONCE_SIZE, |
1560 | .max_keysize = AES_MAX_KEY_SIZE + |
1561 | CTR_RFC3686_NONCE_SIZE, |
1562 | .ivsize = CTR_RFC3686_IV_SIZE, |
1563 | .chunksize = AES_BLOCK_SIZE, |
1564 | }, |
1565 | .caam = { |
1566 | .class1_alg_type = OP_ALG_ALGSEL_AES | |
1567 | OP_ALG_AAI_CTR_MOD128, |
1568 | .rfc3686 = true, |
1569 | }, |
1570 | }, |
1571 | { |
1572 | .skcipher = { |
1573 | .base = { |
1574 | .cra_name = "xts(aes)" , |
1575 | .cra_driver_name = "xts-aes-caam-qi" , |
1576 | .cra_flags = CRYPTO_ALG_NEED_FALLBACK, |
1577 | .cra_blocksize = AES_BLOCK_SIZE, |
1578 | }, |
1579 | .setkey = xts_skcipher_setkey, |
1580 | .encrypt = skcipher_encrypt, |
1581 | .decrypt = skcipher_decrypt, |
1582 | .min_keysize = 2 * AES_MIN_KEY_SIZE, |
1583 | .max_keysize = 2 * AES_MAX_KEY_SIZE, |
1584 | .ivsize = AES_BLOCK_SIZE, |
1585 | }, |
1586 | .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS, |
1587 | }, |
1588 | }; |
1589 | |
1590 | static struct caam_aead_alg driver_aeads[] = { |
1591 | { |
1592 | .aead = { |
1593 | .base = { |
1594 | .cra_name = "rfc4106(gcm(aes))" , |
1595 | .cra_driver_name = "rfc4106-gcm-aes-caam-qi" , |
1596 | .cra_blocksize = 1, |
1597 | }, |
1598 | .setkey = rfc4106_setkey, |
1599 | .setauthsize = rfc4106_setauthsize, |
1600 | .encrypt = ipsec_gcm_encrypt, |
1601 | .decrypt = ipsec_gcm_decrypt, |
1602 | .ivsize = 8, |
1603 | .maxauthsize = AES_BLOCK_SIZE, |
1604 | }, |
1605 | .caam = { |
1606 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, |
1607 | .nodkp = true, |
1608 | }, |
1609 | }, |
1610 | { |
1611 | .aead = { |
1612 | .base = { |
1613 | .cra_name = "rfc4543(gcm(aes))" , |
1614 | .cra_driver_name = "rfc4543-gcm-aes-caam-qi" , |
1615 | .cra_blocksize = 1, |
1616 | }, |
1617 | .setkey = rfc4543_setkey, |
1618 | .setauthsize = rfc4543_setauthsize, |
1619 | .encrypt = ipsec_gcm_encrypt, |
1620 | .decrypt = ipsec_gcm_decrypt, |
1621 | .ivsize = 8, |
1622 | .maxauthsize = AES_BLOCK_SIZE, |
1623 | }, |
1624 | .caam = { |
1625 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, |
1626 | .nodkp = true, |
1627 | }, |
1628 | }, |
1629 | /* Galois Counter Mode */ |
1630 | { |
1631 | .aead = { |
1632 | .base = { |
1633 | .cra_name = "gcm(aes)" , |
1634 | .cra_driver_name = "gcm-aes-caam-qi" , |
1635 | .cra_blocksize = 1, |
1636 | }, |
1637 | .setkey = gcm_setkey, |
1638 | .setauthsize = gcm_setauthsize, |
1639 | .encrypt = aead_encrypt, |
1640 | .decrypt = aead_decrypt, |
1641 | .ivsize = 12, |
1642 | .maxauthsize = AES_BLOCK_SIZE, |
1643 | }, |
1644 | .caam = { |
1645 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, |
1646 | .nodkp = true, |
1647 | } |
1648 | }, |
1649 | /* single-pass ipsec_esp descriptor */ |
1650 | { |
1651 | .aead = { |
1652 | .base = { |
1653 | .cra_name = "authenc(hmac(md5),cbc(aes))" , |
1654 | .cra_driver_name = "authenc-hmac-md5-" |
1655 | "cbc-aes-caam-qi" , |
1656 | .cra_blocksize = AES_BLOCK_SIZE, |
1657 | }, |
1658 | .setkey = aead_setkey, |
1659 | .setauthsize = aead_setauthsize, |
1660 | .encrypt = aead_encrypt, |
1661 | .decrypt = aead_decrypt, |
1662 | .ivsize = AES_BLOCK_SIZE, |
1663 | .maxauthsize = MD5_DIGEST_SIZE, |
1664 | }, |
1665 | .caam = { |
1666 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, |
1667 | .class2_alg_type = OP_ALG_ALGSEL_MD5 | |
1668 | OP_ALG_AAI_HMAC_PRECOMP, |
1669 | } |
1670 | }, |
1671 | { |
1672 | .aead = { |
1673 | .base = { |
1674 | .cra_name = "echainiv(authenc(hmac(md5)," |
1675 | "cbc(aes)))" , |
1676 | .cra_driver_name = "echainiv-authenc-hmac-md5-" |
1677 | "cbc-aes-caam-qi" , |
1678 | .cra_blocksize = AES_BLOCK_SIZE, |
1679 | }, |
1680 | .setkey = aead_setkey, |
1681 | .setauthsize = aead_setauthsize, |
1682 | .encrypt = aead_encrypt, |
1683 | .decrypt = aead_decrypt, |
1684 | .ivsize = AES_BLOCK_SIZE, |
1685 | .maxauthsize = MD5_DIGEST_SIZE, |
1686 | }, |
1687 | .caam = { |
1688 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, |
1689 | .class2_alg_type = OP_ALG_ALGSEL_MD5 | |
1690 | OP_ALG_AAI_HMAC_PRECOMP, |
1691 | .geniv = true, |
1692 | } |
1693 | }, |
1694 | { |
1695 | .aead = { |
1696 | .base = { |
1697 | .cra_name = "authenc(hmac(sha1),cbc(aes))" , |
1698 | .cra_driver_name = "authenc-hmac-sha1-" |
1699 | "cbc-aes-caam-qi" , |
1700 | .cra_blocksize = AES_BLOCK_SIZE, |
1701 | }, |
1702 | .setkey = aead_setkey, |
1703 | .setauthsize = aead_setauthsize, |
1704 | .encrypt = aead_encrypt, |
1705 | .decrypt = aead_decrypt, |
1706 | .ivsize = AES_BLOCK_SIZE, |
1707 | .maxauthsize = SHA1_DIGEST_SIZE, |
1708 | }, |
1709 | .caam = { |
1710 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, |
1711 | .class2_alg_type = OP_ALG_ALGSEL_SHA1 | |
1712 | OP_ALG_AAI_HMAC_PRECOMP, |
1713 | } |
1714 | }, |
1715 | { |
1716 | .aead = { |
1717 | .base = { |
1718 | .cra_name = "echainiv(authenc(hmac(sha1)," |
1719 | "cbc(aes)))" , |
1720 | .cra_driver_name = "echainiv-authenc-" |
1721 | "hmac-sha1-cbc-aes-caam-qi" , |
1722 | .cra_blocksize = AES_BLOCK_SIZE, |
1723 | }, |
1724 | .setkey = aead_setkey, |
1725 | .setauthsize = aead_setauthsize, |
1726 | .encrypt = aead_encrypt, |
1727 | .decrypt = aead_decrypt, |
1728 | .ivsize = AES_BLOCK_SIZE, |
1729 | .maxauthsize = SHA1_DIGEST_SIZE, |
1730 | }, |
1731 | .caam = { |
1732 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, |
1733 | .class2_alg_type = OP_ALG_ALGSEL_SHA1 | |
1734 | OP_ALG_AAI_HMAC_PRECOMP, |
1735 | .geniv = true, |
1736 | }, |
1737 | }, |
1738 | { |
1739 | .aead = { |
1740 | .base = { |
1741 | .cra_name = "authenc(hmac(sha224),cbc(aes))" , |
1742 | .cra_driver_name = "authenc-hmac-sha224-" |
1743 | "cbc-aes-caam-qi" , |
1744 | .cra_blocksize = AES_BLOCK_SIZE, |
1745 | }, |
1746 | .setkey = aead_setkey, |
1747 | .setauthsize = aead_setauthsize, |
1748 | .encrypt = aead_encrypt, |
1749 | .decrypt = aead_decrypt, |
1750 | .ivsize = AES_BLOCK_SIZE, |
1751 | .maxauthsize = SHA224_DIGEST_SIZE, |
1752 | }, |
1753 | .caam = { |
1754 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, |
1755 | .class2_alg_type = OP_ALG_ALGSEL_SHA224 | |
1756 | OP_ALG_AAI_HMAC_PRECOMP, |
1757 | } |
1758 | }, |
1759 | { |
1760 | .aead = { |
1761 | .base = { |
1762 | .cra_name = "echainiv(authenc(hmac(sha224)," |
1763 | "cbc(aes)))" , |
1764 | .cra_driver_name = "echainiv-authenc-" |
1765 | "hmac-sha224-cbc-aes-caam-qi" , |
1766 | .cra_blocksize = AES_BLOCK_SIZE, |
1767 | }, |
1768 | .setkey = aead_setkey, |
1769 | .setauthsize = aead_setauthsize, |
1770 | .encrypt = aead_encrypt, |
1771 | .decrypt = aead_decrypt, |
1772 | .ivsize = AES_BLOCK_SIZE, |
1773 | .maxauthsize = SHA224_DIGEST_SIZE, |
1774 | }, |
1775 | .caam = { |
1776 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, |
1777 | .class2_alg_type = OP_ALG_ALGSEL_SHA224 | |
1778 | OP_ALG_AAI_HMAC_PRECOMP, |
1779 | .geniv = true, |
1780 | } |
1781 | }, |
1782 | { |
1783 | .aead = { |
1784 | .base = { |
1785 | .cra_name = "authenc(hmac(sha256),cbc(aes))" , |
1786 | .cra_driver_name = "authenc-hmac-sha256-" |
1787 | "cbc-aes-caam-qi" , |
1788 | .cra_blocksize = AES_BLOCK_SIZE, |
1789 | }, |
1790 | .setkey = aead_setkey, |
1791 | .setauthsize = aead_setauthsize, |
1792 | .encrypt = aead_encrypt, |
1793 | .decrypt = aead_decrypt, |
1794 | .ivsize = AES_BLOCK_SIZE, |
1795 | .maxauthsize = SHA256_DIGEST_SIZE, |
1796 | }, |
1797 | .caam = { |
1798 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, |
1799 | .class2_alg_type = OP_ALG_ALGSEL_SHA256 | |
1800 | OP_ALG_AAI_HMAC_PRECOMP, |
1801 | } |
1802 | }, |
1803 | { |
1804 | .aead = { |
1805 | .base = { |
1806 | .cra_name = "echainiv(authenc(hmac(sha256)," |
1807 | "cbc(aes)))" , |
1808 | .cra_driver_name = "echainiv-authenc-" |
1809 | "hmac-sha256-cbc-aes-" |
1810 | "caam-qi" , |
1811 | .cra_blocksize = AES_BLOCK_SIZE, |
1812 | }, |
1813 | .setkey = aead_setkey, |
1814 | .setauthsize = aead_setauthsize, |
1815 | .encrypt = aead_encrypt, |
1816 | .decrypt = aead_decrypt, |
1817 | .ivsize = AES_BLOCK_SIZE, |
1818 | .maxauthsize = SHA256_DIGEST_SIZE, |
1819 | }, |
1820 | .caam = { |
1821 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, |
1822 | .class2_alg_type = OP_ALG_ALGSEL_SHA256 | |
1823 | OP_ALG_AAI_HMAC_PRECOMP, |
1824 | .geniv = true, |
1825 | } |
1826 | }, |
1827 | { |
1828 | .aead = { |
1829 | .base = { |
1830 | .cra_name = "authenc(hmac(sha384),cbc(aes))" , |
1831 | .cra_driver_name = "authenc-hmac-sha384-" |
1832 | "cbc-aes-caam-qi" , |
1833 | .cra_blocksize = AES_BLOCK_SIZE, |
1834 | }, |
1835 | .setkey = aead_setkey, |
1836 | .setauthsize = aead_setauthsize, |
1837 | .encrypt = aead_encrypt, |
1838 | .decrypt = aead_decrypt, |
1839 | .ivsize = AES_BLOCK_SIZE, |
1840 | .maxauthsize = SHA384_DIGEST_SIZE, |
1841 | }, |
1842 | .caam = { |
1843 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, |
1844 | .class2_alg_type = OP_ALG_ALGSEL_SHA384 | |
1845 | OP_ALG_AAI_HMAC_PRECOMP, |
1846 | } |
1847 | }, |
1848 | { |
1849 | .aead = { |
1850 | .base = { |
1851 | .cra_name = "echainiv(authenc(hmac(sha384)," |
1852 | "cbc(aes)))" , |
1853 | .cra_driver_name = "echainiv-authenc-" |
1854 | "hmac-sha384-cbc-aes-" |
1855 | "caam-qi" , |
1856 | .cra_blocksize = AES_BLOCK_SIZE, |
1857 | }, |
1858 | .setkey = aead_setkey, |
1859 | .setauthsize = aead_setauthsize, |
1860 | .encrypt = aead_encrypt, |
1861 | .decrypt = aead_decrypt, |
1862 | .ivsize = AES_BLOCK_SIZE, |
1863 | .maxauthsize = SHA384_DIGEST_SIZE, |
1864 | }, |
1865 | .caam = { |
1866 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, |
1867 | .class2_alg_type = OP_ALG_ALGSEL_SHA384 | |
1868 | OP_ALG_AAI_HMAC_PRECOMP, |
1869 | .geniv = true, |
1870 | } |
1871 | }, |
1872 | { |
1873 | .aead = { |
1874 | .base = { |
1875 | .cra_name = "authenc(hmac(sha512),cbc(aes))" , |
1876 | .cra_driver_name = "authenc-hmac-sha512-" |
1877 | "cbc-aes-caam-qi" , |
1878 | .cra_blocksize = AES_BLOCK_SIZE, |
1879 | }, |
1880 | .setkey = aead_setkey, |
1881 | .setauthsize = aead_setauthsize, |
1882 | .encrypt = aead_encrypt, |
1883 | .decrypt = aead_decrypt, |
1884 | .ivsize = AES_BLOCK_SIZE, |
1885 | .maxauthsize = SHA512_DIGEST_SIZE, |
1886 | }, |
1887 | .caam = { |
1888 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, |
1889 | .class2_alg_type = OP_ALG_ALGSEL_SHA512 | |
1890 | OP_ALG_AAI_HMAC_PRECOMP, |
1891 | } |
1892 | }, |
1893 | { |
1894 | .aead = { |
1895 | .base = { |
1896 | .cra_name = "echainiv(authenc(hmac(sha512)," |
1897 | "cbc(aes)))" , |
1898 | .cra_driver_name = "echainiv-authenc-" |
1899 | "hmac-sha512-cbc-aes-" |
1900 | "caam-qi" , |
1901 | .cra_blocksize = AES_BLOCK_SIZE, |
1902 | }, |
1903 | .setkey = aead_setkey, |
1904 | .setauthsize = aead_setauthsize, |
1905 | .encrypt = aead_encrypt, |
1906 | .decrypt = aead_decrypt, |
1907 | .ivsize = AES_BLOCK_SIZE, |
1908 | .maxauthsize = SHA512_DIGEST_SIZE, |
1909 | }, |
1910 | .caam = { |
1911 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, |
1912 | .class2_alg_type = OP_ALG_ALGSEL_SHA512 | |
1913 | OP_ALG_AAI_HMAC_PRECOMP, |
1914 | .geniv = true, |
1915 | } |
1916 | }, |
1917 | { |
1918 | .aead = { |
1919 | .base = { |
1920 | .cra_name = "authenc(hmac(md5),cbc(des3_ede))" , |
1921 | .cra_driver_name = "authenc-hmac-md5-" |
1922 | "cbc-des3_ede-caam-qi" , |
1923 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
1924 | }, |
1925 | .setkey = des3_aead_setkey, |
1926 | .setauthsize = aead_setauthsize, |
1927 | .encrypt = aead_encrypt, |
1928 | .decrypt = aead_decrypt, |
1929 | .ivsize = DES3_EDE_BLOCK_SIZE, |
1930 | .maxauthsize = MD5_DIGEST_SIZE, |
1931 | }, |
1932 | .caam = { |
1933 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, |
1934 | .class2_alg_type = OP_ALG_ALGSEL_MD5 | |
1935 | OP_ALG_AAI_HMAC_PRECOMP, |
1936 | } |
1937 | }, |
1938 | { |
1939 | .aead = { |
1940 | .base = { |
1941 | .cra_name = "echainiv(authenc(hmac(md5)," |
1942 | "cbc(des3_ede)))" , |
1943 | .cra_driver_name = "echainiv-authenc-hmac-md5-" |
1944 | "cbc-des3_ede-caam-qi" , |
1945 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
1946 | }, |
1947 | .setkey = des3_aead_setkey, |
1948 | .setauthsize = aead_setauthsize, |
1949 | .encrypt = aead_encrypt, |
1950 | .decrypt = aead_decrypt, |
1951 | .ivsize = DES3_EDE_BLOCK_SIZE, |
1952 | .maxauthsize = MD5_DIGEST_SIZE, |
1953 | }, |
1954 | .caam = { |
1955 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, |
1956 | .class2_alg_type = OP_ALG_ALGSEL_MD5 | |
1957 | OP_ALG_AAI_HMAC_PRECOMP, |
1958 | .geniv = true, |
1959 | } |
1960 | }, |
1961 | { |
1962 | .aead = { |
1963 | .base = { |
1964 | .cra_name = "authenc(hmac(sha1)," |
1965 | "cbc(des3_ede))" , |
1966 | .cra_driver_name = "authenc-hmac-sha1-" |
1967 | "cbc-des3_ede-caam-qi" , |
1968 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
1969 | }, |
1970 | .setkey = des3_aead_setkey, |
1971 | .setauthsize = aead_setauthsize, |
1972 | .encrypt = aead_encrypt, |
1973 | .decrypt = aead_decrypt, |
1974 | .ivsize = DES3_EDE_BLOCK_SIZE, |
1975 | .maxauthsize = SHA1_DIGEST_SIZE, |
1976 | }, |
1977 | .caam = { |
1978 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, |
1979 | .class2_alg_type = OP_ALG_ALGSEL_SHA1 | |
1980 | OP_ALG_AAI_HMAC_PRECOMP, |
1981 | }, |
1982 | }, |
1983 | { |
1984 | .aead = { |
1985 | .base = { |
1986 | .cra_name = "echainiv(authenc(hmac(sha1)," |
1987 | "cbc(des3_ede)))" , |
1988 | .cra_driver_name = "echainiv-authenc-" |
1989 | "hmac-sha1-" |
1990 | "cbc-des3_ede-caam-qi" , |
1991 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
1992 | }, |
1993 | .setkey = des3_aead_setkey, |
1994 | .setauthsize = aead_setauthsize, |
1995 | .encrypt = aead_encrypt, |
1996 | .decrypt = aead_decrypt, |
1997 | .ivsize = DES3_EDE_BLOCK_SIZE, |
1998 | .maxauthsize = SHA1_DIGEST_SIZE, |
1999 | }, |
2000 | .caam = { |
2001 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, |
2002 | .class2_alg_type = OP_ALG_ALGSEL_SHA1 | |
2003 | OP_ALG_AAI_HMAC_PRECOMP, |
2004 | .geniv = true, |
2005 | } |
2006 | }, |
2007 | { |
2008 | .aead = { |
2009 | .base = { |
2010 | .cra_name = "authenc(hmac(sha224)," |
2011 | "cbc(des3_ede))" , |
2012 | .cra_driver_name = "authenc-hmac-sha224-" |
2013 | "cbc-des3_ede-caam-qi" , |
2014 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
2015 | }, |
2016 | .setkey = des3_aead_setkey, |
2017 | .setauthsize = aead_setauthsize, |
2018 | .encrypt = aead_encrypt, |
2019 | .decrypt = aead_decrypt, |
2020 | .ivsize = DES3_EDE_BLOCK_SIZE, |
2021 | .maxauthsize = SHA224_DIGEST_SIZE, |
2022 | }, |
2023 | .caam = { |
2024 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, |
2025 | .class2_alg_type = OP_ALG_ALGSEL_SHA224 | |
2026 | OP_ALG_AAI_HMAC_PRECOMP, |
2027 | }, |
2028 | }, |
2029 | { |
2030 | .aead = { |
2031 | .base = { |
2032 | .cra_name = "echainiv(authenc(hmac(sha224)," |
2033 | "cbc(des3_ede)))" , |
2034 | .cra_driver_name = "echainiv-authenc-" |
2035 | "hmac-sha224-" |
2036 | "cbc-des3_ede-caam-qi" , |
2037 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
2038 | }, |
2039 | .setkey = des3_aead_setkey, |
2040 | .setauthsize = aead_setauthsize, |
2041 | .encrypt = aead_encrypt, |
2042 | .decrypt = aead_decrypt, |
2043 | .ivsize = DES3_EDE_BLOCK_SIZE, |
2044 | .maxauthsize = SHA224_DIGEST_SIZE, |
2045 | }, |
2046 | .caam = { |
2047 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, |
2048 | .class2_alg_type = OP_ALG_ALGSEL_SHA224 | |
2049 | OP_ALG_AAI_HMAC_PRECOMP, |
2050 | .geniv = true, |
2051 | } |
2052 | }, |
2053 | { |
2054 | .aead = { |
2055 | .base = { |
2056 | .cra_name = "authenc(hmac(sha256)," |
2057 | "cbc(des3_ede))" , |
2058 | .cra_driver_name = "authenc-hmac-sha256-" |
2059 | "cbc-des3_ede-caam-qi" , |
2060 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
2061 | }, |
2062 | .setkey = des3_aead_setkey, |
2063 | .setauthsize = aead_setauthsize, |
2064 | .encrypt = aead_encrypt, |
2065 | .decrypt = aead_decrypt, |
2066 | .ivsize = DES3_EDE_BLOCK_SIZE, |
2067 | .maxauthsize = SHA256_DIGEST_SIZE, |
2068 | }, |
2069 | .caam = { |
2070 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, |
2071 | .class2_alg_type = OP_ALG_ALGSEL_SHA256 | |
2072 | OP_ALG_AAI_HMAC_PRECOMP, |
2073 | }, |
2074 | }, |
2075 | { |
2076 | .aead = { |
2077 | .base = { |
2078 | .cra_name = "echainiv(authenc(hmac(sha256)," |
2079 | "cbc(des3_ede)))" , |
2080 | .cra_driver_name = "echainiv-authenc-" |
2081 | "hmac-sha256-" |
2082 | "cbc-des3_ede-caam-qi" , |
2083 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
2084 | }, |
2085 | .setkey = des3_aead_setkey, |
2086 | .setauthsize = aead_setauthsize, |
2087 | .encrypt = aead_encrypt, |
2088 | .decrypt = aead_decrypt, |
2089 | .ivsize = DES3_EDE_BLOCK_SIZE, |
2090 | .maxauthsize = SHA256_DIGEST_SIZE, |
2091 | }, |
2092 | .caam = { |
2093 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, |
2094 | .class2_alg_type = OP_ALG_ALGSEL_SHA256 | |
2095 | OP_ALG_AAI_HMAC_PRECOMP, |
2096 | .geniv = true, |
2097 | } |
2098 | }, |
2099 | { |
2100 | .aead = { |
2101 | .base = { |
2102 | .cra_name = "authenc(hmac(sha384)," |
2103 | "cbc(des3_ede))" , |
2104 | .cra_driver_name = "authenc-hmac-sha384-" |
2105 | "cbc-des3_ede-caam-qi" , |
2106 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
2107 | }, |
2108 | .setkey = des3_aead_setkey, |
2109 | .setauthsize = aead_setauthsize, |
2110 | .encrypt = aead_encrypt, |
2111 | .decrypt = aead_decrypt, |
2112 | .ivsize = DES3_EDE_BLOCK_SIZE, |
2113 | .maxauthsize = SHA384_DIGEST_SIZE, |
2114 | }, |
2115 | .caam = { |
2116 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, |
2117 | .class2_alg_type = OP_ALG_ALGSEL_SHA384 | |
2118 | OP_ALG_AAI_HMAC_PRECOMP, |
2119 | }, |
2120 | }, |
2121 | { |
2122 | .aead = { |
2123 | .base = { |
2124 | .cra_name = "echainiv(authenc(hmac(sha384)," |
2125 | "cbc(des3_ede)))" , |
2126 | .cra_driver_name = "echainiv-authenc-" |
2127 | "hmac-sha384-" |
2128 | "cbc-des3_ede-caam-qi" , |
2129 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
2130 | }, |
2131 | .setkey = des3_aead_setkey, |
2132 | .setauthsize = aead_setauthsize, |
2133 | .encrypt = aead_encrypt, |
2134 | .decrypt = aead_decrypt, |
2135 | .ivsize = DES3_EDE_BLOCK_SIZE, |
2136 | .maxauthsize = SHA384_DIGEST_SIZE, |
2137 | }, |
2138 | .caam = { |
2139 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, |
2140 | .class2_alg_type = OP_ALG_ALGSEL_SHA384 | |
2141 | OP_ALG_AAI_HMAC_PRECOMP, |
2142 | .geniv = true, |
2143 | } |
2144 | }, |
2145 | { |
2146 | .aead = { |
2147 | .base = { |
2148 | .cra_name = "authenc(hmac(sha512)," |
2149 | "cbc(des3_ede))" , |
2150 | .cra_driver_name = "authenc-hmac-sha512-" |
2151 | "cbc-des3_ede-caam-qi" , |
2152 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
2153 | }, |
2154 | .setkey = des3_aead_setkey, |
2155 | .setauthsize = aead_setauthsize, |
2156 | .encrypt = aead_encrypt, |
2157 | .decrypt = aead_decrypt, |
2158 | .ivsize = DES3_EDE_BLOCK_SIZE, |
2159 | .maxauthsize = SHA512_DIGEST_SIZE, |
2160 | }, |
2161 | .caam = { |
2162 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, |
2163 | .class2_alg_type = OP_ALG_ALGSEL_SHA512 | |
2164 | OP_ALG_AAI_HMAC_PRECOMP, |
2165 | }, |
2166 | }, |
2167 | { |
2168 | .aead = { |
2169 | .base = { |
2170 | .cra_name = "echainiv(authenc(hmac(sha512)," |
2171 | "cbc(des3_ede)))" , |
2172 | .cra_driver_name = "echainiv-authenc-" |
2173 | "hmac-sha512-" |
2174 | "cbc-des3_ede-caam-qi" , |
2175 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
2176 | }, |
2177 | .setkey = des3_aead_setkey, |
2178 | .setauthsize = aead_setauthsize, |
2179 | .encrypt = aead_encrypt, |
2180 | .decrypt = aead_decrypt, |
2181 | .ivsize = DES3_EDE_BLOCK_SIZE, |
2182 | .maxauthsize = SHA512_DIGEST_SIZE, |
2183 | }, |
2184 | .caam = { |
2185 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, |
2186 | .class2_alg_type = OP_ALG_ALGSEL_SHA512 | |
2187 | OP_ALG_AAI_HMAC_PRECOMP, |
2188 | .geniv = true, |
2189 | } |
2190 | }, |
2191 | { |
2192 | .aead = { |
2193 | .base = { |
2194 | .cra_name = "authenc(hmac(md5),cbc(des))" , |
2195 | .cra_driver_name = "authenc-hmac-md5-" |
2196 | "cbc-des-caam-qi" , |
2197 | .cra_blocksize = DES_BLOCK_SIZE, |
2198 | }, |
2199 | .setkey = aead_setkey, |
2200 | .setauthsize = aead_setauthsize, |
2201 | .encrypt = aead_encrypt, |
2202 | .decrypt = aead_decrypt, |
2203 | .ivsize = DES_BLOCK_SIZE, |
2204 | .maxauthsize = MD5_DIGEST_SIZE, |
2205 | }, |
2206 | .caam = { |
2207 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, |
2208 | .class2_alg_type = OP_ALG_ALGSEL_MD5 | |
2209 | OP_ALG_AAI_HMAC_PRECOMP, |
2210 | }, |
2211 | }, |
2212 | { |
2213 | .aead = { |
2214 | .base = { |
2215 | .cra_name = "echainiv(authenc(hmac(md5)," |
2216 | "cbc(des)))" , |
2217 | .cra_driver_name = "echainiv-authenc-hmac-md5-" |
2218 | "cbc-des-caam-qi" , |
2219 | .cra_blocksize = DES_BLOCK_SIZE, |
2220 | }, |
2221 | .setkey = aead_setkey, |
2222 | .setauthsize = aead_setauthsize, |
2223 | .encrypt = aead_encrypt, |
2224 | .decrypt = aead_decrypt, |
2225 | .ivsize = DES_BLOCK_SIZE, |
2226 | .maxauthsize = MD5_DIGEST_SIZE, |
2227 | }, |
2228 | .caam = { |
2229 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, |
2230 | .class2_alg_type = OP_ALG_ALGSEL_MD5 | |
2231 | OP_ALG_AAI_HMAC_PRECOMP, |
2232 | .geniv = true, |
2233 | } |
2234 | }, |
2235 | { |
2236 | .aead = { |
2237 | .base = { |
2238 | .cra_name = "authenc(hmac(sha1),cbc(des))" , |
2239 | .cra_driver_name = "authenc-hmac-sha1-" |
2240 | "cbc-des-caam-qi" , |
2241 | .cra_blocksize = DES_BLOCK_SIZE, |
2242 | }, |
2243 | .setkey = aead_setkey, |
2244 | .setauthsize = aead_setauthsize, |
2245 | .encrypt = aead_encrypt, |
2246 | .decrypt = aead_decrypt, |
2247 | .ivsize = DES_BLOCK_SIZE, |
2248 | .maxauthsize = SHA1_DIGEST_SIZE, |
2249 | }, |
2250 | .caam = { |
2251 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, |
2252 | .class2_alg_type = OP_ALG_ALGSEL_SHA1 | |
2253 | OP_ALG_AAI_HMAC_PRECOMP, |
2254 | }, |
2255 | }, |
2256 | { |
2257 | .aead = { |
2258 | .base = { |
2259 | .cra_name = "echainiv(authenc(hmac(sha1)," |
2260 | "cbc(des)))" , |
2261 | .cra_driver_name = "echainiv-authenc-" |
2262 | "hmac-sha1-cbc-des-caam-qi" , |
2263 | .cra_blocksize = DES_BLOCK_SIZE, |
2264 | }, |
2265 | .setkey = aead_setkey, |
2266 | .setauthsize = aead_setauthsize, |
2267 | .encrypt = aead_encrypt, |
2268 | .decrypt = aead_decrypt, |
2269 | .ivsize = DES_BLOCK_SIZE, |
2270 | .maxauthsize = SHA1_DIGEST_SIZE, |
2271 | }, |
2272 | .caam = { |
2273 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, |
2274 | .class2_alg_type = OP_ALG_ALGSEL_SHA1 | |
2275 | OP_ALG_AAI_HMAC_PRECOMP, |
2276 | .geniv = true, |
2277 | } |
2278 | }, |
2279 | { |
2280 | .aead = { |
2281 | .base = { |
2282 | .cra_name = "authenc(hmac(sha224),cbc(des))" , |
2283 | .cra_driver_name = "authenc-hmac-sha224-" |
2284 | "cbc-des-caam-qi" , |
2285 | .cra_blocksize = DES_BLOCK_SIZE, |
2286 | }, |
2287 | .setkey = aead_setkey, |
2288 | .setauthsize = aead_setauthsize, |
2289 | .encrypt = aead_encrypt, |
2290 | .decrypt = aead_decrypt, |
2291 | .ivsize = DES_BLOCK_SIZE, |
2292 | .maxauthsize = SHA224_DIGEST_SIZE, |
2293 | }, |
2294 | .caam = { |
2295 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, |
2296 | .class2_alg_type = OP_ALG_ALGSEL_SHA224 | |
2297 | OP_ALG_AAI_HMAC_PRECOMP, |
2298 | }, |
2299 | }, |
2300 | { |
2301 | .aead = { |
2302 | .base = { |
2303 | .cra_name = "echainiv(authenc(hmac(sha224)," |
2304 | "cbc(des)))" , |
2305 | .cra_driver_name = "echainiv-authenc-" |
2306 | "hmac-sha224-cbc-des-" |
2307 | "caam-qi" , |
2308 | .cra_blocksize = DES_BLOCK_SIZE, |
2309 | }, |
2310 | .setkey = aead_setkey, |
2311 | .setauthsize = aead_setauthsize, |
2312 | .encrypt = aead_encrypt, |
2313 | .decrypt = aead_decrypt, |
2314 | .ivsize = DES_BLOCK_SIZE, |
2315 | .maxauthsize = SHA224_DIGEST_SIZE, |
2316 | }, |
2317 | .caam = { |
2318 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, |
2319 | .class2_alg_type = OP_ALG_ALGSEL_SHA224 | |
2320 | OP_ALG_AAI_HMAC_PRECOMP, |
2321 | .geniv = true, |
2322 | } |
2323 | }, |
2324 | { |
2325 | .aead = { |
2326 | .base = { |
2327 | .cra_name = "authenc(hmac(sha256),cbc(des))" , |
2328 | .cra_driver_name = "authenc-hmac-sha256-" |
2329 | "cbc-des-caam-qi" , |
2330 | .cra_blocksize = DES_BLOCK_SIZE, |
2331 | }, |
2332 | .setkey = aead_setkey, |
2333 | .setauthsize = aead_setauthsize, |
2334 | .encrypt = aead_encrypt, |
2335 | .decrypt = aead_decrypt, |
2336 | .ivsize = DES_BLOCK_SIZE, |
2337 | .maxauthsize = SHA256_DIGEST_SIZE, |
2338 | }, |
2339 | .caam = { |
2340 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, |
2341 | .class2_alg_type = OP_ALG_ALGSEL_SHA256 | |
2342 | OP_ALG_AAI_HMAC_PRECOMP, |
2343 | }, |
2344 | }, |
2345 | { |
2346 | .aead = { |
2347 | .base = { |
2348 | .cra_name = "echainiv(authenc(hmac(sha256)," |
2349 | "cbc(des)))" , |
2350 | .cra_driver_name = "echainiv-authenc-" |
2351 | "hmac-sha256-cbc-des-" |
2352 | "caam-qi" , |
2353 | .cra_blocksize = DES_BLOCK_SIZE, |
2354 | }, |
2355 | .setkey = aead_setkey, |
2356 | .setauthsize = aead_setauthsize, |
2357 | .encrypt = aead_encrypt, |
2358 | .decrypt = aead_decrypt, |
2359 | .ivsize = DES_BLOCK_SIZE, |
2360 | .maxauthsize = SHA256_DIGEST_SIZE, |
2361 | }, |
2362 | .caam = { |
2363 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, |
2364 | .class2_alg_type = OP_ALG_ALGSEL_SHA256 | |
2365 | OP_ALG_AAI_HMAC_PRECOMP, |
2366 | .geniv = true, |
2367 | }, |
2368 | }, |
2369 | { |
2370 | .aead = { |
2371 | .base = { |
2372 | .cra_name = "authenc(hmac(sha384),cbc(des))" , |
2373 | .cra_driver_name = "authenc-hmac-sha384-" |
2374 | "cbc-des-caam-qi" , |
2375 | .cra_blocksize = DES_BLOCK_SIZE, |
2376 | }, |
2377 | .setkey = aead_setkey, |
2378 | .setauthsize = aead_setauthsize, |
2379 | .encrypt = aead_encrypt, |
2380 | .decrypt = aead_decrypt, |
2381 | .ivsize = DES_BLOCK_SIZE, |
2382 | .maxauthsize = SHA384_DIGEST_SIZE, |
2383 | }, |
2384 | .caam = { |
2385 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, |
2386 | .class2_alg_type = OP_ALG_ALGSEL_SHA384 | |
2387 | OP_ALG_AAI_HMAC_PRECOMP, |
2388 | }, |
2389 | }, |
2390 | { |
2391 | .aead = { |
2392 | .base = { |
2393 | .cra_name = "echainiv(authenc(hmac(sha384)," |
2394 | "cbc(des)))" , |
2395 | .cra_driver_name = "echainiv-authenc-" |
2396 | "hmac-sha384-cbc-des-" |
2397 | "caam-qi" , |
2398 | .cra_blocksize = DES_BLOCK_SIZE, |
2399 | }, |
2400 | .setkey = aead_setkey, |
2401 | .setauthsize = aead_setauthsize, |
2402 | .encrypt = aead_encrypt, |
2403 | .decrypt = aead_decrypt, |
2404 | .ivsize = DES_BLOCK_SIZE, |
2405 | .maxauthsize = SHA384_DIGEST_SIZE, |
2406 | }, |
2407 | .caam = { |
2408 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, |
2409 | .class2_alg_type = OP_ALG_ALGSEL_SHA384 | |
2410 | OP_ALG_AAI_HMAC_PRECOMP, |
2411 | .geniv = true, |
2412 | } |
2413 | }, |
2414 | { |
2415 | .aead = { |
2416 | .base = { |
2417 | .cra_name = "authenc(hmac(sha512),cbc(des))" , |
2418 | .cra_driver_name = "authenc-hmac-sha512-" |
2419 | "cbc-des-caam-qi" , |
2420 | .cra_blocksize = DES_BLOCK_SIZE, |
2421 | }, |
2422 | .setkey = aead_setkey, |
2423 | .setauthsize = aead_setauthsize, |
2424 | .encrypt = aead_encrypt, |
2425 | .decrypt = aead_decrypt, |
2426 | .ivsize = DES_BLOCK_SIZE, |
2427 | .maxauthsize = SHA512_DIGEST_SIZE, |
2428 | }, |
2429 | .caam = { |
2430 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, |
2431 | .class2_alg_type = OP_ALG_ALGSEL_SHA512 | |
2432 | OP_ALG_AAI_HMAC_PRECOMP, |
2433 | } |
2434 | }, |
2435 | { |
2436 | .aead = { |
2437 | .base = { |
2438 | .cra_name = "echainiv(authenc(hmac(sha512)," |
2439 | "cbc(des)))" , |
2440 | .cra_driver_name = "echainiv-authenc-" |
2441 | "hmac-sha512-cbc-des-" |
2442 | "caam-qi" , |
2443 | .cra_blocksize = DES_BLOCK_SIZE, |
2444 | }, |
2445 | .setkey = aead_setkey, |
2446 | .setauthsize = aead_setauthsize, |
2447 | .encrypt = aead_encrypt, |
2448 | .decrypt = aead_decrypt, |
2449 | .ivsize = DES_BLOCK_SIZE, |
2450 | .maxauthsize = SHA512_DIGEST_SIZE, |
2451 | }, |
2452 | .caam = { |
2453 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, |
2454 | .class2_alg_type = OP_ALG_ALGSEL_SHA512 | |
2455 | OP_ALG_AAI_HMAC_PRECOMP, |
2456 | .geniv = true, |
2457 | } |
2458 | }, |
2459 | }; |
2460 | |
2461 | static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam, |
2462 | bool uses_dkp) |
2463 | { |
2464 | struct caam_drv_private *priv; |
2465 | struct device *dev; |
2466 | |
2467 | /* |
2468 | * distribute tfms across job rings to ensure in-order |
2469 | * crypto request processing per tfm |
2470 | */ |
2471 | ctx->jrdev = caam_jr_alloc(); |
2472 | if (IS_ERR(ptr: ctx->jrdev)) { |
2473 | pr_err("Job Ring Device allocation for transform failed\n" ); |
2474 | return PTR_ERR(ptr: ctx->jrdev); |
2475 | } |
2476 | |
2477 | dev = ctx->jrdev->parent; |
2478 | priv = dev_get_drvdata(dev); |
2479 | if (priv->era >= 6 && uses_dkp) |
2480 | ctx->dir = DMA_BIDIRECTIONAL; |
2481 | else |
2482 | ctx->dir = DMA_TO_DEVICE; |
2483 | |
2484 | ctx->key_dma = dma_map_single(dev, ctx->key, sizeof(ctx->key), |
2485 | ctx->dir); |
2486 | if (dma_mapping_error(dev, dma_addr: ctx->key_dma)) { |
2487 | dev_err(dev, "unable to map key\n" ); |
2488 | caam_jr_free(rdev: ctx->jrdev); |
2489 | return -ENOMEM; |
2490 | } |
2491 | |
2492 | /* copy descriptor header template value */ |
2493 | ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type; |
2494 | ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type; |
2495 | |
2496 | ctx->qidev = dev; |
2497 | |
2498 | spin_lock_init(&ctx->lock); |
2499 | ctx->drv_ctx[ENCRYPT] = NULL; |
2500 | ctx->drv_ctx[DECRYPT] = NULL; |
2501 | |
2502 | return 0; |
2503 | } |
2504 | |
2505 | static int caam_cra_init(struct crypto_skcipher *tfm) |
2506 | { |
2507 | struct skcipher_alg *alg = crypto_skcipher_alg(tfm); |
2508 | struct caam_skcipher_alg *caam_alg = |
2509 | container_of(alg, typeof(*caam_alg), skcipher); |
2510 | struct caam_ctx *ctx = crypto_skcipher_ctx_dma(tfm); |
2511 | u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK; |
2512 | int ret = 0; |
2513 | |
2514 | if (alg_aai == OP_ALG_AAI_XTS) { |
2515 | const char *tfm_name = crypto_tfm_alg_name(tfm: &tfm->base); |
2516 | struct crypto_skcipher *fallback; |
2517 | |
2518 | fallback = crypto_alloc_skcipher(alg_name: tfm_name, type: 0, |
2519 | CRYPTO_ALG_NEED_FALLBACK); |
2520 | if (IS_ERR(ptr: fallback)) { |
2521 | pr_err("Failed to allocate %s fallback: %ld\n" , |
2522 | tfm_name, PTR_ERR(fallback)); |
2523 | return PTR_ERR(ptr: fallback); |
2524 | } |
2525 | |
2526 | ctx->fallback = fallback; |
2527 | crypto_skcipher_set_reqsize(skcipher: tfm, reqsize: sizeof(struct caam_skcipher_req_ctx) + |
2528 | crypto_skcipher_reqsize(tfm: fallback)); |
2529 | } |
2530 | |
2531 | ret = caam_init_common(ctx, caam: &caam_alg->caam, uses_dkp: false); |
2532 | if (ret && ctx->fallback) |
2533 | crypto_free_skcipher(tfm: ctx->fallback); |
2534 | |
2535 | return ret; |
2536 | } |
2537 | |
2538 | static int caam_aead_init(struct crypto_aead *tfm) |
2539 | { |
2540 | struct aead_alg *alg = crypto_aead_alg(tfm); |
2541 | struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg), |
2542 | aead); |
2543 | struct caam_ctx *ctx = crypto_aead_ctx_dma(tfm); |
2544 | |
2545 | return caam_init_common(ctx, caam: &caam_alg->caam, uses_dkp: !caam_alg->caam.nodkp); |
2546 | } |
2547 | |
2548 | static void caam_exit_common(struct caam_ctx *ctx) |
2549 | { |
2550 | caam_drv_ctx_rel(drv_ctx: ctx->drv_ctx[ENCRYPT]); |
2551 | caam_drv_ctx_rel(drv_ctx: ctx->drv_ctx[DECRYPT]); |
2552 | |
2553 | dma_unmap_single(ctx->jrdev->parent, ctx->key_dma, sizeof(ctx->key), |
2554 | ctx->dir); |
2555 | |
2556 | caam_jr_free(rdev: ctx->jrdev); |
2557 | } |
2558 | |
2559 | static void caam_cra_exit(struct crypto_skcipher *tfm) |
2560 | { |
2561 | struct caam_ctx *ctx = crypto_skcipher_ctx_dma(tfm); |
2562 | |
2563 | if (ctx->fallback) |
2564 | crypto_free_skcipher(tfm: ctx->fallback); |
2565 | caam_exit_common(ctx); |
2566 | } |
2567 | |
2568 | static void caam_aead_exit(struct crypto_aead *tfm) |
2569 | { |
2570 | caam_exit_common(ctx: crypto_aead_ctx_dma(tfm)); |
2571 | } |
2572 | |
2573 | void caam_qi_algapi_exit(void) |
2574 | { |
2575 | int i; |
2576 | |
2577 | for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) { |
2578 | struct caam_aead_alg *t_alg = driver_aeads + i; |
2579 | |
2580 | if (t_alg->registered) |
2581 | crypto_unregister_aead(alg: &t_alg->aead); |
2582 | } |
2583 | |
2584 | for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { |
2585 | struct caam_skcipher_alg *t_alg = driver_algs + i; |
2586 | |
2587 | if (t_alg->registered) |
2588 | crypto_unregister_skcipher(alg: &t_alg->skcipher); |
2589 | } |
2590 | } |
2591 | |
2592 | static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg) |
2593 | { |
2594 | struct skcipher_alg *alg = &t_alg->skcipher; |
2595 | |
2596 | alg->base.cra_module = THIS_MODULE; |
2597 | alg->base.cra_priority = CAAM_CRA_PRIORITY; |
2598 | alg->base.cra_ctxsize = sizeof(struct caam_ctx) + crypto_dma_padding(); |
2599 | alg->base.cra_flags |= (CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | |
2600 | CRYPTO_ALG_KERN_DRIVER_ONLY); |
2601 | |
2602 | alg->init = caam_cra_init; |
2603 | alg->exit = caam_cra_exit; |
2604 | } |
2605 | |
2606 | static void caam_aead_alg_init(struct caam_aead_alg *t_alg) |
2607 | { |
2608 | struct aead_alg *alg = &t_alg->aead; |
2609 | |
2610 | alg->base.cra_module = THIS_MODULE; |
2611 | alg->base.cra_priority = CAAM_CRA_PRIORITY; |
2612 | alg->base.cra_ctxsize = sizeof(struct caam_ctx) + crypto_dma_padding(); |
2613 | alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | |
2614 | CRYPTO_ALG_KERN_DRIVER_ONLY; |
2615 | |
2616 | alg->init = caam_aead_init; |
2617 | alg->exit = caam_aead_exit; |
2618 | } |
2619 | |
2620 | int caam_qi_algapi_init(struct device *ctrldev) |
2621 | { |
2622 | struct caam_drv_private *priv = dev_get_drvdata(dev: ctrldev); |
2623 | int i = 0, err = 0; |
2624 | u32 aes_vid, aes_inst, des_inst, md_vid, md_inst; |
2625 | unsigned int md_limit = SHA512_DIGEST_SIZE; |
2626 | bool registered = false; |
2627 | |
2628 | /* Make sure this runs only on (DPAA 1.x) QI */ |
2629 | if (!priv->qi_present || caam_dpaa2) |
2630 | return 0; |
2631 | |
2632 | /* |
2633 | * Register crypto algorithms the device supports. |
2634 | * First, detect presence and attributes of DES, AES, and MD blocks. |
2635 | */ |
2636 | if (priv->era < 10) { |
2637 | u32 cha_vid, cha_inst; |
2638 | |
2639 | cha_vid = rd_reg32(reg: &priv->ctrl->perfmon.cha_id_ls); |
2640 | aes_vid = cha_vid & CHA_ID_LS_AES_MASK; |
2641 | md_vid = (cha_vid & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; |
2642 | |
2643 | cha_inst = rd_reg32(reg: &priv->ctrl->perfmon.cha_num_ls); |
2644 | des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> |
2645 | CHA_ID_LS_DES_SHIFT; |
2646 | aes_inst = cha_inst & CHA_ID_LS_AES_MASK; |
2647 | md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; |
2648 | } else { |
2649 | u32 aesa, mdha; |
2650 | |
2651 | aesa = rd_reg32(reg: &priv->ctrl->vreg.aesa); |
2652 | mdha = rd_reg32(reg: &priv->ctrl->vreg.mdha); |
2653 | |
2654 | aes_vid = (aesa & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT; |
2655 | md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT; |
2656 | |
2657 | des_inst = rd_reg32(reg: &priv->ctrl->vreg.desa) & CHA_VER_NUM_MASK; |
2658 | aes_inst = aesa & CHA_VER_NUM_MASK; |
2659 | md_inst = mdha & CHA_VER_NUM_MASK; |
2660 | } |
2661 | |
2662 | /* If MD is present, limit digest size based on LP256 */ |
2663 | if (md_inst && md_vid == CHA_VER_VID_MD_LP256) |
2664 | md_limit = SHA256_DIGEST_SIZE; |
2665 | |
2666 | for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { |
2667 | struct caam_skcipher_alg *t_alg = driver_algs + i; |
2668 | u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK; |
2669 | |
2670 | /* Skip DES algorithms if not supported by device */ |
2671 | if (!des_inst && |
2672 | ((alg_sel == OP_ALG_ALGSEL_3DES) || |
2673 | (alg_sel == OP_ALG_ALGSEL_DES))) |
2674 | continue; |
2675 | |
2676 | /* Skip AES algorithms if not supported by device */ |
2677 | if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES)) |
2678 | continue; |
2679 | |
2680 | caam_skcipher_alg_init(t_alg); |
2681 | |
2682 | err = crypto_register_skcipher(alg: &t_alg->skcipher); |
2683 | if (err) { |
2684 | dev_warn(ctrldev, "%s alg registration failed\n" , |
2685 | t_alg->skcipher.base.cra_driver_name); |
2686 | continue; |
2687 | } |
2688 | |
2689 | t_alg->registered = true; |
2690 | registered = true; |
2691 | } |
2692 | |
2693 | for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) { |
2694 | struct caam_aead_alg *t_alg = driver_aeads + i; |
2695 | u32 c1_alg_sel = t_alg->caam.class1_alg_type & |
2696 | OP_ALG_ALGSEL_MASK; |
2697 | u32 c2_alg_sel = t_alg->caam.class2_alg_type & |
2698 | OP_ALG_ALGSEL_MASK; |
2699 | u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK; |
2700 | |
2701 | /* Skip DES algorithms if not supported by device */ |
2702 | if (!des_inst && |
2703 | ((c1_alg_sel == OP_ALG_ALGSEL_3DES) || |
2704 | (c1_alg_sel == OP_ALG_ALGSEL_DES))) |
2705 | continue; |
2706 | |
2707 | /* Skip AES algorithms if not supported by device */ |
2708 | if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES)) |
2709 | continue; |
2710 | |
2711 | /* |
2712 | * Check support for AES algorithms not available |
2713 | * on LP devices. |
2714 | */ |
2715 | if (aes_vid == CHA_VER_VID_AES_LP && alg_aai == OP_ALG_AAI_GCM) |
2716 | continue; |
2717 | |
2718 | /* |
2719 | * Skip algorithms requiring message digests |
2720 | * if MD or MD size is not supported by device. |
2721 | */ |
2722 | if (c2_alg_sel && |
2723 | (!md_inst || (t_alg->aead.maxauthsize > md_limit))) |
2724 | continue; |
2725 | |
2726 | caam_aead_alg_init(t_alg); |
2727 | |
2728 | err = crypto_register_aead(alg: &t_alg->aead); |
2729 | if (err) { |
2730 | pr_warn("%s alg registration failed\n" , |
2731 | t_alg->aead.base.cra_driver_name); |
2732 | continue; |
2733 | } |
2734 | |
2735 | t_alg->registered = true; |
2736 | registered = true; |
2737 | } |
2738 | |
2739 | if (registered) |
2740 | dev_info(ctrldev, "algorithms registered in /proc/crypto\n" ); |
2741 | |
2742 | return err; |
2743 | } |
2744 | |