1 | // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) |
2 | /* |
3 | * Copyright 2015-2016 Freescale Semiconductor Inc. |
4 | * Copyright 2017-2019 NXP |
5 | */ |
6 | |
7 | #include "compat.h" |
8 | #include "regs.h" |
9 | #include "caamalg_qi2.h" |
10 | #include "dpseci_cmd.h" |
11 | #include "desc_constr.h" |
12 | #include "error.h" |
13 | #include "sg_sw_sec4.h" |
14 | #include "sg_sw_qm2.h" |
15 | #include "key_gen.h" |
16 | #include "caamalg_desc.h" |
17 | #include "caamhash_desc.h" |
18 | #include "dpseci-debugfs.h" |
19 | #include <linux/dma-mapping.h> |
20 | #include <linux/fsl/mc.h> |
21 | #include <linux/kernel.h> |
22 | #include <soc/fsl/dpaa2-io.h> |
23 | #include <soc/fsl/dpaa2-fd.h> |
24 | #include <crypto/xts.h> |
25 | #include <asm/unaligned.h> |
26 | |
27 | #define CAAM_CRA_PRIORITY 2000 |
28 | |
29 | /* max key is sum of AES_MAX_KEY_SIZE, max split key size */ |
30 | #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE + \ |
31 | SHA512_DIGEST_SIZE * 2) |
32 | |
33 | /* |
34 | * This is a cache of buffers, from which the users of CAAM QI driver |
35 | * can allocate short buffers. It's speedier than doing kmalloc on the hotpath. |
36 | * NOTE: A more elegant solution would be to have some headroom in the frames |
37 | * being processed. This can be added by the dpaa2-eth driver. This would |
38 | * pose a problem for userspace application processing which cannot |
39 | * know of this limitation. So for now, this will work. |
40 | * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here |
41 | */ |
42 | static struct kmem_cache *qi_cache; |
43 | |
44 | struct caam_alg_entry { |
45 | struct device *dev; |
46 | int class1_alg_type; |
47 | int class2_alg_type; |
48 | bool rfc3686; |
49 | bool geniv; |
50 | bool nodkp; |
51 | }; |
52 | |
53 | struct caam_aead_alg { |
54 | struct aead_alg aead; |
55 | struct caam_alg_entry caam; |
56 | bool registered; |
57 | }; |
58 | |
59 | struct caam_skcipher_alg { |
60 | struct skcipher_alg skcipher; |
61 | struct caam_alg_entry caam; |
62 | bool registered; |
63 | }; |
64 | |
65 | /** |
66 | * struct caam_ctx - per-session context |
67 | * @flc: Flow Contexts array |
68 | * @key: [authentication key], encryption key |
69 | * @flc_dma: I/O virtual addresses of the Flow Contexts |
70 | * @key_dma: I/O virtual address of the key |
71 | * @dir: DMA direction for mapping key and Flow Contexts |
72 | * @dev: dpseci device |
73 | * @adata: authentication algorithm details |
74 | * @cdata: encryption algorithm details |
75 | * @authsize: authentication tag (a.k.a. ICV / MAC) size |
76 | * @xts_key_fallback: true if fallback tfm needs to be used due |
77 | * to unsupported xts key lengths |
78 | * @fallback: xts fallback tfm |
79 | */ |
80 | struct caam_ctx { |
81 | struct caam_flc flc[NUM_OP]; |
82 | u8 key[CAAM_MAX_KEY_SIZE]; |
83 | dma_addr_t flc_dma[NUM_OP]; |
84 | dma_addr_t key_dma; |
85 | enum dma_data_direction dir; |
86 | struct device *dev; |
87 | struct alginfo adata; |
88 | struct alginfo cdata; |
89 | unsigned int authsize; |
90 | bool xts_key_fallback; |
91 | struct crypto_skcipher *fallback; |
92 | }; |
93 | |
94 | static void *dpaa2_caam_iova_to_virt(struct dpaa2_caam_priv *priv, |
95 | dma_addr_t iova_addr) |
96 | { |
97 | phys_addr_t phys_addr; |
98 | |
99 | phys_addr = priv->domain ? iommu_iova_to_phys(domain: priv->domain, iova: iova_addr) : |
100 | iova_addr; |
101 | |
102 | return phys_to_virt(address: phys_addr); |
103 | } |
104 | |
105 | /* |
106 | * qi_cache_zalloc - Allocate buffers from CAAM-QI cache |
107 | * |
108 | * Allocate data on the hotpath. Instead of using kzalloc, one can use the |
109 | * services of the CAAM QI memory cache (backed by kmem_cache). The buffers |
110 | * will have a size of CAAM_QI_MEMCACHE_SIZE, which should be sufficient for |
111 | * hosting 16 SG entries. |
112 | * |
113 | * @flags - flags that would be used for the equivalent kmalloc(..) call |
114 | * |
115 | * Returns a pointer to a retrieved buffer on success or NULL on failure. |
116 | */ |
117 | static inline void *qi_cache_zalloc(gfp_t flags) |
118 | { |
119 | return kmem_cache_zalloc(k: qi_cache, flags); |
120 | } |
121 | |
122 | /* |
123 | * qi_cache_free - Frees buffers allocated from CAAM-QI cache |
124 | * |
125 | * @obj - buffer previously allocated by qi_cache_zalloc |
126 | * |
127 | * No checking is being done, the call is a passthrough call to |
128 | * kmem_cache_free(...) |
129 | */ |
130 | static inline void qi_cache_free(void *obj) |
131 | { |
132 | kmem_cache_free(s: qi_cache, objp: obj); |
133 | } |
134 | |
135 | static struct caam_request *to_caam_req(struct crypto_async_request *areq) |
136 | { |
137 | switch (crypto_tfm_alg_type(tfm: areq->tfm)) { |
138 | case CRYPTO_ALG_TYPE_SKCIPHER: |
139 | return skcipher_request_ctx_dma(req: skcipher_request_cast(req: areq)); |
140 | case CRYPTO_ALG_TYPE_AEAD: |
141 | return aead_request_ctx_dma( |
142 | container_of(areq, struct aead_request, base)); |
143 | case CRYPTO_ALG_TYPE_AHASH: |
144 | return ahash_request_ctx_dma(req: ahash_request_cast(req: areq)); |
145 | default: |
146 | return ERR_PTR(error: -EINVAL); |
147 | } |
148 | } |
149 | |
150 | static void caam_unmap(struct device *dev, struct scatterlist *src, |
151 | struct scatterlist *dst, int src_nents, |
152 | int dst_nents, dma_addr_t iv_dma, int ivsize, |
153 | enum dma_data_direction iv_dir, dma_addr_t qm_sg_dma, |
154 | int qm_sg_bytes) |
155 | { |
156 | if (dst != src) { |
157 | if (src_nents) |
158 | dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE); |
159 | if (dst_nents) |
160 | dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE); |
161 | } else { |
162 | dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL); |
163 | } |
164 | |
165 | if (iv_dma) |
166 | dma_unmap_single(dev, iv_dma, ivsize, iv_dir); |
167 | |
168 | if (qm_sg_bytes) |
169 | dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE); |
170 | } |
171 | |
172 | static int aead_set_sh_desc(struct crypto_aead *aead) |
173 | { |
174 | struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead), |
175 | typeof(*alg), aead); |
176 | struct caam_ctx *ctx = crypto_aead_ctx_dma(tfm: aead); |
177 | unsigned int ivsize = crypto_aead_ivsize(tfm: aead); |
178 | struct device *dev = ctx->dev; |
179 | struct dpaa2_caam_priv *priv = dev_get_drvdata(dev); |
180 | struct caam_flc *flc; |
181 | u32 *desc; |
182 | u32 ctx1_iv_off = 0; |
183 | u32 *nonce = NULL; |
184 | unsigned int data_len[2]; |
185 | u32 inl_mask; |
186 | const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == |
187 | OP_ALG_AAI_CTR_MOD128); |
188 | const bool is_rfc3686 = alg->caam.rfc3686; |
189 | |
190 | if (!ctx->cdata.keylen || !ctx->authsize) |
191 | return 0; |
192 | |
193 | /* |
194 | * AES-CTR needs to load IV in CONTEXT1 reg |
195 | * at an offset of 128bits (16bytes) |
196 | * CONTEXT1[255:128] = IV |
197 | */ |
198 | if (ctr_mode) |
199 | ctx1_iv_off = 16; |
200 | |
201 | /* |
202 | * RFC3686 specific: |
203 | * CONTEXT1[255:128] = {NONCE, IV, COUNTER} |
204 | */ |
205 | if (is_rfc3686) { |
206 | ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE; |
207 | nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad + |
208 | ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE); |
209 | } |
210 | |
211 | /* |
212 | * In case |user key| > |derived key|, using DKP<imm,imm> would result |
213 | * in invalid opcodes (last bytes of user key) in the resulting |
214 | * descriptor. Use DKP<ptr,imm> instead => both virtual and dma key |
215 | * addresses are needed. |
216 | */ |
217 | ctx->adata.key_virt = ctx->key; |
218 | ctx->adata.key_dma = ctx->key_dma; |
219 | |
220 | ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; |
221 | ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; |
222 | |
223 | data_len[0] = ctx->adata.keylen_pad; |
224 | data_len[1] = ctx->cdata.keylen; |
225 | |
226 | /* aead_encrypt shared descriptor */ |
227 | if (desc_inline_query(sd_base_len: (alg->caam.geniv ? DESC_QI_AEAD_GIVENC_LEN : |
228 | DESC_QI_AEAD_ENC_LEN) + |
229 | (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0), |
230 | DESC_JOB_IO_LEN, data_len, inl_mask: &inl_mask, |
231 | ARRAY_SIZE(data_len)) < 0) |
232 | return -EINVAL; |
233 | |
234 | ctx->adata.key_inline = !!(inl_mask & 1); |
235 | ctx->cdata.key_inline = !!(inl_mask & 2); |
236 | |
237 | flc = &ctx->flc[ENCRYPT]; |
238 | desc = flc->sh_desc; |
239 | |
240 | if (alg->caam.geniv) |
241 | cnstr_shdsc_aead_givencap(desc, cdata: &ctx->cdata, adata: &ctx->adata, |
242 | ivsize, icvsize: ctx->authsize, is_rfc3686, |
243 | nonce, ctx1_iv_off, is_qi: true, |
244 | era: priv->sec_attr.era); |
245 | else |
246 | cnstr_shdsc_aead_encap(desc, cdata: &ctx->cdata, adata: &ctx->adata, |
247 | ivsize, icvsize: ctx->authsize, is_rfc3686, nonce, |
248 | ctx1_iv_off, is_qi: true, era: priv->sec_attr.era); |
249 | |
250 | flc->flc[1] = cpu_to_caam32(val: desc_len(desc)); /* SDL */ |
251 | dma_sync_single_for_device(dev, addr: ctx->flc_dma[ENCRYPT], |
252 | size: sizeof(flc->flc) + desc_bytes(desc), |
253 | dir: ctx->dir); |
254 | |
255 | /* aead_decrypt shared descriptor */ |
256 | if (desc_inline_query(DESC_QI_AEAD_DEC_LEN + |
257 | (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0), |
258 | DESC_JOB_IO_LEN, data_len, inl_mask: &inl_mask, |
259 | ARRAY_SIZE(data_len)) < 0) |
260 | return -EINVAL; |
261 | |
262 | ctx->adata.key_inline = !!(inl_mask & 1); |
263 | ctx->cdata.key_inline = !!(inl_mask & 2); |
264 | |
265 | flc = &ctx->flc[DECRYPT]; |
266 | desc = flc->sh_desc; |
267 | cnstr_shdsc_aead_decap(desc, cdata: &ctx->cdata, adata: &ctx->adata, |
268 | ivsize, icvsize: ctx->authsize, geniv: alg->caam.geniv, |
269 | is_rfc3686, nonce, ctx1_iv_off, is_qi: true, |
270 | era: priv->sec_attr.era); |
271 | flc->flc[1] = cpu_to_caam32(val: desc_len(desc)); /* SDL */ |
272 | dma_sync_single_for_device(dev, addr: ctx->flc_dma[DECRYPT], |
273 | size: sizeof(flc->flc) + desc_bytes(desc), |
274 | dir: ctx->dir); |
275 | |
276 | return 0; |
277 | } |
278 | |
279 | static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize) |
280 | { |
281 | struct caam_ctx *ctx = crypto_aead_ctx_dma(tfm: authenc); |
282 | |
283 | ctx->authsize = authsize; |
284 | aead_set_sh_desc(aead: authenc); |
285 | |
286 | return 0; |
287 | } |
288 | |
289 | static int aead_setkey(struct crypto_aead *aead, const u8 *key, |
290 | unsigned int keylen) |
291 | { |
292 | struct caam_ctx *ctx = crypto_aead_ctx_dma(tfm: aead); |
293 | struct device *dev = ctx->dev; |
294 | struct crypto_authenc_keys keys; |
295 | |
296 | if (crypto_authenc_extractkeys(keys: &keys, key, keylen) != 0) |
297 | goto badkey; |
298 | |
299 | dev_dbg(dev, "keylen %d enckeylen %d authkeylen %d\n" , |
300 | keys.authkeylen + keys.enckeylen, keys.enckeylen, |
301 | keys.authkeylen); |
302 | print_hex_dump_debug("key in @" __stringify(__LINE__)": " , |
303 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); |
304 | |
305 | ctx->adata.keylen = keys.authkeylen; |
306 | ctx->adata.keylen_pad = split_key_len(hash: ctx->adata.algtype & |
307 | OP_ALG_ALGSEL_MASK); |
308 | |
309 | if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE) |
310 | goto badkey; |
311 | |
312 | memcpy(ctx->key, keys.authkey, keys.authkeylen); |
313 | memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen); |
314 | dma_sync_single_for_device(dev, addr: ctx->key_dma, size: ctx->adata.keylen_pad + |
315 | keys.enckeylen, dir: ctx->dir); |
316 | print_hex_dump_debug("ctx.key@" __stringify(__LINE__)": " , |
317 | DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, |
318 | ctx->adata.keylen_pad + keys.enckeylen, 1); |
319 | |
320 | ctx->cdata.keylen = keys.enckeylen; |
321 | |
322 | memzero_explicit(s: &keys, count: sizeof(keys)); |
323 | return aead_set_sh_desc(aead); |
324 | badkey: |
325 | memzero_explicit(s: &keys, count: sizeof(keys)); |
326 | return -EINVAL; |
327 | } |
328 | |
329 | static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key, |
330 | unsigned int keylen) |
331 | { |
332 | struct crypto_authenc_keys keys; |
333 | int err; |
334 | |
335 | err = crypto_authenc_extractkeys(keys: &keys, key, keylen); |
336 | if (unlikely(err)) |
337 | goto out; |
338 | |
339 | err = -EINVAL; |
340 | if (keys.enckeylen != DES3_EDE_KEY_SIZE) |
341 | goto out; |
342 | |
343 | err = crypto_des3_ede_verify_key(tfm: crypto_aead_tfm(tfm: aead), key: keys.enckey) ?: |
344 | aead_setkey(aead, key, keylen); |
345 | |
346 | out: |
347 | memzero_explicit(s: &keys, count: sizeof(keys)); |
348 | return err; |
349 | } |
350 | |
351 | static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, |
352 | bool encrypt) |
353 | { |
354 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
355 | struct caam_request *req_ctx = aead_request_ctx_dma(req); |
356 | struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; |
357 | struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; |
358 | struct caam_ctx *ctx = crypto_aead_ctx_dma(tfm: aead); |
359 | struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead), |
360 | typeof(*alg), aead); |
361 | struct device *dev = ctx->dev; |
362 | gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? |
363 | GFP_KERNEL : GFP_ATOMIC; |
364 | int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; |
365 | int src_len, dst_len = 0; |
366 | struct aead_edesc *edesc; |
367 | dma_addr_t qm_sg_dma, iv_dma = 0; |
368 | int ivsize = 0; |
369 | unsigned int authsize = ctx->authsize; |
370 | int qm_sg_index = 0, qm_sg_nents = 0, qm_sg_bytes; |
371 | int in_len, out_len; |
372 | struct dpaa2_sg_entry *sg_table; |
373 | |
374 | /* allocate space for base edesc, link tables and IV */ |
375 | edesc = qi_cache_zalloc(flags); |
376 | if (unlikely(!edesc)) { |
377 | dev_err(dev, "could not allocate extended descriptor\n" ); |
378 | return ERR_PTR(error: -ENOMEM); |
379 | } |
380 | |
381 | if (unlikely(req->dst != req->src)) { |
382 | src_len = req->assoclen + req->cryptlen; |
383 | dst_len = src_len + (encrypt ? authsize : (-authsize)); |
384 | |
385 | src_nents = sg_nents_for_len(sg: req->src, len: src_len); |
386 | if (unlikely(src_nents < 0)) { |
387 | dev_err(dev, "Insufficient bytes (%d) in src S/G\n" , |
388 | src_len); |
389 | qi_cache_free(obj: edesc); |
390 | return ERR_PTR(error: src_nents); |
391 | } |
392 | |
393 | dst_nents = sg_nents_for_len(sg: req->dst, len: dst_len); |
394 | if (unlikely(dst_nents < 0)) { |
395 | dev_err(dev, "Insufficient bytes (%d) in dst S/G\n" , |
396 | dst_len); |
397 | qi_cache_free(obj: edesc); |
398 | return ERR_PTR(error: dst_nents); |
399 | } |
400 | |
401 | if (src_nents) { |
402 | mapped_src_nents = dma_map_sg(dev, req->src, src_nents, |
403 | DMA_TO_DEVICE); |
404 | if (unlikely(!mapped_src_nents)) { |
405 | dev_err(dev, "unable to map source\n" ); |
406 | qi_cache_free(obj: edesc); |
407 | return ERR_PTR(error: -ENOMEM); |
408 | } |
409 | } else { |
410 | mapped_src_nents = 0; |
411 | } |
412 | |
413 | if (dst_nents) { |
414 | mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents, |
415 | DMA_FROM_DEVICE); |
416 | if (unlikely(!mapped_dst_nents)) { |
417 | dev_err(dev, "unable to map destination\n" ); |
418 | dma_unmap_sg(dev, req->src, src_nents, |
419 | DMA_TO_DEVICE); |
420 | qi_cache_free(obj: edesc); |
421 | return ERR_PTR(error: -ENOMEM); |
422 | } |
423 | } else { |
424 | mapped_dst_nents = 0; |
425 | } |
426 | } else { |
427 | src_len = req->assoclen + req->cryptlen + |
428 | (encrypt ? authsize : 0); |
429 | |
430 | src_nents = sg_nents_for_len(sg: req->src, len: src_len); |
431 | if (unlikely(src_nents < 0)) { |
432 | dev_err(dev, "Insufficient bytes (%d) in src S/G\n" , |
433 | src_len); |
434 | qi_cache_free(obj: edesc); |
435 | return ERR_PTR(error: src_nents); |
436 | } |
437 | |
438 | mapped_src_nents = dma_map_sg(dev, req->src, src_nents, |
439 | DMA_BIDIRECTIONAL); |
440 | if (unlikely(!mapped_src_nents)) { |
441 | dev_err(dev, "unable to map source\n" ); |
442 | qi_cache_free(obj: edesc); |
443 | return ERR_PTR(error: -ENOMEM); |
444 | } |
445 | } |
446 | |
447 | if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv) |
448 | ivsize = crypto_aead_ivsize(tfm: aead); |
449 | |
450 | /* |
451 | * Create S/G table: req->assoclen, [IV,] req->src [, req->dst]. |
452 | * Input is not contiguous. |
453 | * HW reads 4 S/G entries at a time; make sure the reads don't go beyond |
454 | * the end of the table by allocating more S/G entries. Logic: |
455 | * if (src != dst && output S/G) |
456 | * pad output S/G, if needed |
457 | * else if (src == dst && S/G) |
458 | * overlapping S/Gs; pad one of them |
459 | * else if (input S/G) ... |
460 | * pad input S/G, if needed |
461 | */ |
462 | qm_sg_nents = 1 + !!ivsize + mapped_src_nents; |
463 | if (mapped_dst_nents > 1) |
464 | qm_sg_nents += pad_sg_nents(sg_nents: mapped_dst_nents); |
465 | else if ((req->src == req->dst) && (mapped_src_nents > 1)) |
466 | qm_sg_nents = max(pad_sg_nents(qm_sg_nents), |
467 | 1 + !!ivsize + |
468 | pad_sg_nents(mapped_src_nents)); |
469 | else |
470 | qm_sg_nents = pad_sg_nents(sg_nents: qm_sg_nents); |
471 | |
472 | sg_table = &edesc->sgt[0]; |
473 | qm_sg_bytes = qm_sg_nents * sizeof(*sg_table); |
474 | if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize > |
475 | CAAM_QI_MEMCACHE_SIZE)) { |
476 | dev_err(dev, "No space for %d S/G entries and/or %dB IV\n" , |
477 | qm_sg_nents, ivsize); |
478 | caam_unmap(dev, src: req->src, dst: req->dst, src_nents, dst_nents, iv_dma: 0, |
479 | ivsize: 0, iv_dir: DMA_NONE, qm_sg_dma: 0, qm_sg_bytes: 0); |
480 | qi_cache_free(obj: edesc); |
481 | return ERR_PTR(error: -ENOMEM); |
482 | } |
483 | |
484 | if (ivsize) { |
485 | u8 *iv = (u8 *)(sg_table + qm_sg_nents); |
486 | |
487 | /* Make sure IV is located in a DMAable area */ |
488 | memcpy(iv, req->iv, ivsize); |
489 | |
490 | iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE); |
491 | if (dma_mapping_error(dev, dma_addr: iv_dma)) { |
492 | dev_err(dev, "unable to map IV\n" ); |
493 | caam_unmap(dev, src: req->src, dst: req->dst, src_nents, |
494 | dst_nents, iv_dma: 0, ivsize: 0, iv_dir: DMA_NONE, qm_sg_dma: 0, qm_sg_bytes: 0); |
495 | qi_cache_free(obj: edesc); |
496 | return ERR_PTR(error: -ENOMEM); |
497 | } |
498 | } |
499 | |
500 | edesc->src_nents = src_nents; |
501 | edesc->dst_nents = dst_nents; |
502 | edesc->iv_dma = iv_dma; |
503 | |
504 | if ((alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK) == |
505 | OP_ALG_ALGSEL_CHACHA20 && ivsize != CHACHAPOLY_IV_SIZE) |
506 | /* |
507 | * The associated data comes already with the IV but we need |
508 | * to skip it when we authenticate or encrypt... |
509 | */ |
510 | edesc->assoclen = cpu_to_caam32(val: req->assoclen - ivsize); |
511 | else |
512 | edesc->assoclen = cpu_to_caam32(val: req->assoclen); |
513 | edesc->assoclen_dma = dma_map_single(dev, &edesc->assoclen, 4, |
514 | DMA_TO_DEVICE); |
515 | if (dma_mapping_error(dev, dma_addr: edesc->assoclen_dma)) { |
516 | dev_err(dev, "unable to map assoclen\n" ); |
517 | caam_unmap(dev, src: req->src, dst: req->dst, src_nents, dst_nents, |
518 | iv_dma, ivsize, iv_dir: DMA_TO_DEVICE, qm_sg_dma: 0, qm_sg_bytes: 0); |
519 | qi_cache_free(obj: edesc); |
520 | return ERR_PTR(error: -ENOMEM); |
521 | } |
522 | |
523 | dma_to_qm_sg_one(qm_sg_ptr: sg_table, dma: edesc->assoclen_dma, len: 4, offset: 0); |
524 | qm_sg_index++; |
525 | if (ivsize) { |
526 | dma_to_qm_sg_one(qm_sg_ptr: sg_table + qm_sg_index, dma: iv_dma, len: ivsize, offset: 0); |
527 | qm_sg_index++; |
528 | } |
529 | sg_to_qm_sg_last(sg: req->src, len: src_len, qm_sg_ptr: sg_table + qm_sg_index, offset: 0); |
530 | qm_sg_index += mapped_src_nents; |
531 | |
532 | if (mapped_dst_nents > 1) |
533 | sg_to_qm_sg_last(sg: req->dst, len: dst_len, qm_sg_ptr: sg_table + qm_sg_index, offset: 0); |
534 | |
535 | qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE); |
536 | if (dma_mapping_error(dev, dma_addr: qm_sg_dma)) { |
537 | dev_err(dev, "unable to map S/G table\n" ); |
538 | dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); |
539 | caam_unmap(dev, src: req->src, dst: req->dst, src_nents, dst_nents, |
540 | iv_dma, ivsize, iv_dir: DMA_TO_DEVICE, qm_sg_dma: 0, qm_sg_bytes: 0); |
541 | qi_cache_free(obj: edesc); |
542 | return ERR_PTR(error: -ENOMEM); |
543 | } |
544 | |
545 | edesc->qm_sg_dma = qm_sg_dma; |
546 | edesc->qm_sg_bytes = qm_sg_bytes; |
547 | |
548 | out_len = req->assoclen + req->cryptlen + |
549 | (encrypt ? ctx->authsize : (-ctx->authsize)); |
550 | in_len = 4 + ivsize + req->assoclen + req->cryptlen; |
551 | |
552 | memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); |
553 | dpaa2_fl_set_final(fle: in_fle, final: true); |
554 | dpaa2_fl_set_format(fle: in_fle, format: dpaa2_fl_sg); |
555 | dpaa2_fl_set_addr(fle: in_fle, addr: qm_sg_dma); |
556 | dpaa2_fl_set_len(fle: in_fle, len: in_len); |
557 | |
558 | if (req->dst == req->src) { |
559 | if (mapped_src_nents == 1) { |
560 | dpaa2_fl_set_format(fle: out_fle, format: dpaa2_fl_single); |
561 | dpaa2_fl_set_addr(fle: out_fle, sg_dma_address(req->src)); |
562 | } else { |
563 | dpaa2_fl_set_format(fle: out_fle, format: dpaa2_fl_sg); |
564 | dpaa2_fl_set_addr(fle: out_fle, addr: qm_sg_dma + |
565 | (1 + !!ivsize) * sizeof(*sg_table)); |
566 | } |
567 | } else if (!mapped_dst_nents) { |
568 | /* |
569 | * crypto engine requires the output entry to be present when |
570 | * "frame list" FD is used. |
571 | * Since engine does not support FMT=2'b11 (unused entry type), |
572 | * leaving out_fle zeroized is the best option. |
573 | */ |
574 | goto skip_out_fle; |
575 | } else if (mapped_dst_nents == 1) { |
576 | dpaa2_fl_set_format(fle: out_fle, format: dpaa2_fl_single); |
577 | dpaa2_fl_set_addr(fle: out_fle, sg_dma_address(req->dst)); |
578 | } else { |
579 | dpaa2_fl_set_format(fle: out_fle, format: dpaa2_fl_sg); |
580 | dpaa2_fl_set_addr(fle: out_fle, addr: qm_sg_dma + qm_sg_index * |
581 | sizeof(*sg_table)); |
582 | } |
583 | |
584 | dpaa2_fl_set_len(fle: out_fle, len: out_len); |
585 | |
586 | skip_out_fle: |
587 | return edesc; |
588 | } |
589 | |
590 | static int chachapoly_set_sh_desc(struct crypto_aead *aead) |
591 | { |
592 | struct caam_ctx *ctx = crypto_aead_ctx_dma(tfm: aead); |
593 | unsigned int ivsize = crypto_aead_ivsize(tfm: aead); |
594 | struct device *dev = ctx->dev; |
595 | struct caam_flc *flc; |
596 | u32 *desc; |
597 | |
598 | if (!ctx->cdata.keylen || !ctx->authsize) |
599 | return 0; |
600 | |
601 | flc = &ctx->flc[ENCRYPT]; |
602 | desc = flc->sh_desc; |
603 | cnstr_shdsc_chachapoly(desc, cdata: &ctx->cdata, adata: &ctx->adata, ivsize, |
604 | icvsize: ctx->authsize, encap: true, is_qi: true); |
605 | flc->flc[1] = cpu_to_caam32(val: desc_len(desc)); /* SDL */ |
606 | dma_sync_single_for_device(dev, addr: ctx->flc_dma[ENCRYPT], |
607 | size: sizeof(flc->flc) + desc_bytes(desc), |
608 | dir: ctx->dir); |
609 | |
610 | flc = &ctx->flc[DECRYPT]; |
611 | desc = flc->sh_desc; |
612 | cnstr_shdsc_chachapoly(desc, cdata: &ctx->cdata, adata: &ctx->adata, ivsize, |
613 | icvsize: ctx->authsize, encap: false, is_qi: true); |
614 | flc->flc[1] = cpu_to_caam32(val: desc_len(desc)); /* SDL */ |
615 | dma_sync_single_for_device(dev, addr: ctx->flc_dma[DECRYPT], |
616 | size: sizeof(flc->flc) + desc_bytes(desc), |
617 | dir: ctx->dir); |
618 | |
619 | return 0; |
620 | } |
621 | |
622 | static int chachapoly_setauthsize(struct crypto_aead *aead, |
623 | unsigned int authsize) |
624 | { |
625 | struct caam_ctx *ctx = crypto_aead_ctx_dma(tfm: aead); |
626 | |
627 | if (authsize != POLY1305_DIGEST_SIZE) |
628 | return -EINVAL; |
629 | |
630 | ctx->authsize = authsize; |
631 | return chachapoly_set_sh_desc(aead); |
632 | } |
633 | |
634 | static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key, |
635 | unsigned int keylen) |
636 | { |
637 | struct caam_ctx *ctx = crypto_aead_ctx_dma(tfm: aead); |
638 | unsigned int ivsize = crypto_aead_ivsize(tfm: aead); |
639 | unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize; |
640 | |
641 | if (keylen != CHACHA_KEY_SIZE + saltlen) |
642 | return -EINVAL; |
643 | |
644 | memcpy(ctx->key, key, keylen); |
645 | ctx->cdata.key_virt = ctx->key; |
646 | ctx->cdata.keylen = keylen - saltlen; |
647 | |
648 | return chachapoly_set_sh_desc(aead); |
649 | } |
650 | |
651 | static int gcm_set_sh_desc(struct crypto_aead *aead) |
652 | { |
653 | struct caam_ctx *ctx = crypto_aead_ctx_dma(tfm: aead); |
654 | struct device *dev = ctx->dev; |
655 | unsigned int ivsize = crypto_aead_ivsize(tfm: aead); |
656 | struct caam_flc *flc; |
657 | u32 *desc; |
658 | int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN - |
659 | ctx->cdata.keylen; |
660 | |
661 | if (!ctx->cdata.keylen || !ctx->authsize) |
662 | return 0; |
663 | |
664 | /* |
665 | * AES GCM encrypt shared descriptor |
666 | * Job Descriptor and Shared Descriptor |
667 | * must fit into the 64-word Descriptor h/w Buffer |
668 | */ |
669 | if (rem_bytes >= DESC_QI_GCM_ENC_LEN) { |
670 | ctx->cdata.key_inline = true; |
671 | ctx->cdata.key_virt = ctx->key; |
672 | } else { |
673 | ctx->cdata.key_inline = false; |
674 | ctx->cdata.key_dma = ctx->key_dma; |
675 | } |
676 | |
677 | flc = &ctx->flc[ENCRYPT]; |
678 | desc = flc->sh_desc; |
679 | cnstr_shdsc_gcm_encap(desc, cdata: &ctx->cdata, ivsize, icvsize: ctx->authsize, is_qi: true); |
680 | flc->flc[1] = cpu_to_caam32(val: desc_len(desc)); /* SDL */ |
681 | dma_sync_single_for_device(dev, addr: ctx->flc_dma[ENCRYPT], |
682 | size: sizeof(flc->flc) + desc_bytes(desc), |
683 | dir: ctx->dir); |
684 | |
685 | /* |
686 | * Job Descriptor and Shared Descriptors |
687 | * must all fit into the 64-word Descriptor h/w Buffer |
688 | */ |
689 | if (rem_bytes >= DESC_QI_GCM_DEC_LEN) { |
690 | ctx->cdata.key_inline = true; |
691 | ctx->cdata.key_virt = ctx->key; |
692 | } else { |
693 | ctx->cdata.key_inline = false; |
694 | ctx->cdata.key_dma = ctx->key_dma; |
695 | } |
696 | |
697 | flc = &ctx->flc[DECRYPT]; |
698 | desc = flc->sh_desc; |
699 | cnstr_shdsc_gcm_decap(desc, cdata: &ctx->cdata, ivsize, icvsize: ctx->authsize, is_qi: true); |
700 | flc->flc[1] = cpu_to_caam32(val: desc_len(desc)); /* SDL */ |
701 | dma_sync_single_for_device(dev, addr: ctx->flc_dma[DECRYPT], |
702 | size: sizeof(flc->flc) + desc_bytes(desc), |
703 | dir: ctx->dir); |
704 | |
705 | return 0; |
706 | } |
707 | |
708 | static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize) |
709 | { |
710 | struct caam_ctx *ctx = crypto_aead_ctx_dma(tfm: authenc); |
711 | int err; |
712 | |
713 | err = crypto_gcm_check_authsize(authsize); |
714 | if (err) |
715 | return err; |
716 | |
717 | ctx->authsize = authsize; |
718 | gcm_set_sh_desc(aead: authenc); |
719 | |
720 | return 0; |
721 | } |
722 | |
723 | static int gcm_setkey(struct crypto_aead *aead, |
724 | const u8 *key, unsigned int keylen) |
725 | { |
726 | struct caam_ctx *ctx = crypto_aead_ctx_dma(tfm: aead); |
727 | struct device *dev = ctx->dev; |
728 | int ret; |
729 | |
730 | ret = aes_check_keylen(keylen); |
731 | if (ret) |
732 | return ret; |
733 | print_hex_dump_debug("key in @" __stringify(__LINE__)": " , |
734 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); |
735 | |
736 | memcpy(ctx->key, key, keylen); |
737 | dma_sync_single_for_device(dev, addr: ctx->key_dma, size: keylen, dir: ctx->dir); |
738 | ctx->cdata.keylen = keylen; |
739 | |
740 | return gcm_set_sh_desc(aead); |
741 | } |
742 | |
743 | static int rfc4106_set_sh_desc(struct crypto_aead *aead) |
744 | { |
745 | struct caam_ctx *ctx = crypto_aead_ctx_dma(tfm: aead); |
746 | struct device *dev = ctx->dev; |
747 | unsigned int ivsize = crypto_aead_ivsize(tfm: aead); |
748 | struct caam_flc *flc; |
749 | u32 *desc; |
750 | int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN - |
751 | ctx->cdata.keylen; |
752 | |
753 | if (!ctx->cdata.keylen || !ctx->authsize) |
754 | return 0; |
755 | |
756 | ctx->cdata.key_virt = ctx->key; |
757 | |
758 | /* |
759 | * RFC4106 encrypt shared descriptor |
760 | * Job Descriptor and Shared Descriptor |
761 | * must fit into the 64-word Descriptor h/w Buffer |
762 | */ |
763 | if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) { |
764 | ctx->cdata.key_inline = true; |
765 | } else { |
766 | ctx->cdata.key_inline = false; |
767 | ctx->cdata.key_dma = ctx->key_dma; |
768 | } |
769 | |
770 | flc = &ctx->flc[ENCRYPT]; |
771 | desc = flc->sh_desc; |
772 | cnstr_shdsc_rfc4106_encap(desc, cdata: &ctx->cdata, ivsize, icvsize: ctx->authsize, |
773 | is_qi: true); |
774 | flc->flc[1] = cpu_to_caam32(val: desc_len(desc)); /* SDL */ |
775 | dma_sync_single_for_device(dev, addr: ctx->flc_dma[ENCRYPT], |
776 | size: sizeof(flc->flc) + desc_bytes(desc), |
777 | dir: ctx->dir); |
778 | |
779 | /* |
780 | * Job Descriptor and Shared Descriptors |
781 | * must all fit into the 64-word Descriptor h/w Buffer |
782 | */ |
783 | if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) { |
784 | ctx->cdata.key_inline = true; |
785 | } else { |
786 | ctx->cdata.key_inline = false; |
787 | ctx->cdata.key_dma = ctx->key_dma; |
788 | } |
789 | |
790 | flc = &ctx->flc[DECRYPT]; |
791 | desc = flc->sh_desc; |
792 | cnstr_shdsc_rfc4106_decap(desc, cdata: &ctx->cdata, ivsize, icvsize: ctx->authsize, |
793 | is_qi: true); |
794 | flc->flc[1] = cpu_to_caam32(val: desc_len(desc)); /* SDL */ |
795 | dma_sync_single_for_device(dev, addr: ctx->flc_dma[DECRYPT], |
796 | size: sizeof(flc->flc) + desc_bytes(desc), |
797 | dir: ctx->dir); |
798 | |
799 | return 0; |
800 | } |
801 | |
802 | static int rfc4106_setauthsize(struct crypto_aead *authenc, |
803 | unsigned int authsize) |
804 | { |
805 | struct caam_ctx *ctx = crypto_aead_ctx_dma(tfm: authenc); |
806 | int err; |
807 | |
808 | err = crypto_rfc4106_check_authsize(authsize); |
809 | if (err) |
810 | return err; |
811 | |
812 | ctx->authsize = authsize; |
813 | rfc4106_set_sh_desc(aead: authenc); |
814 | |
815 | return 0; |
816 | } |
817 | |
818 | static int rfc4106_setkey(struct crypto_aead *aead, |
819 | const u8 *key, unsigned int keylen) |
820 | { |
821 | struct caam_ctx *ctx = crypto_aead_ctx_dma(tfm: aead); |
822 | struct device *dev = ctx->dev; |
823 | int ret; |
824 | |
825 | ret = aes_check_keylen(keylen: keylen - 4); |
826 | if (ret) |
827 | return ret; |
828 | |
829 | print_hex_dump_debug("key in @" __stringify(__LINE__)": " , |
830 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); |
831 | |
832 | memcpy(ctx->key, key, keylen); |
833 | /* |
834 | * The last four bytes of the key material are used as the salt value |
835 | * in the nonce. Update the AES key length. |
836 | */ |
837 | ctx->cdata.keylen = keylen - 4; |
838 | dma_sync_single_for_device(dev, addr: ctx->key_dma, size: ctx->cdata.keylen, |
839 | dir: ctx->dir); |
840 | |
841 | return rfc4106_set_sh_desc(aead); |
842 | } |
843 | |
844 | static int rfc4543_set_sh_desc(struct crypto_aead *aead) |
845 | { |
846 | struct caam_ctx *ctx = crypto_aead_ctx_dma(tfm: aead); |
847 | struct device *dev = ctx->dev; |
848 | unsigned int ivsize = crypto_aead_ivsize(tfm: aead); |
849 | struct caam_flc *flc; |
850 | u32 *desc; |
851 | int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN - |
852 | ctx->cdata.keylen; |
853 | |
854 | if (!ctx->cdata.keylen || !ctx->authsize) |
855 | return 0; |
856 | |
857 | ctx->cdata.key_virt = ctx->key; |
858 | |
859 | /* |
860 | * RFC4543 encrypt shared descriptor |
861 | * Job Descriptor and Shared Descriptor |
862 | * must fit into the 64-word Descriptor h/w Buffer |
863 | */ |
864 | if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) { |
865 | ctx->cdata.key_inline = true; |
866 | } else { |
867 | ctx->cdata.key_inline = false; |
868 | ctx->cdata.key_dma = ctx->key_dma; |
869 | } |
870 | |
871 | flc = &ctx->flc[ENCRYPT]; |
872 | desc = flc->sh_desc; |
873 | cnstr_shdsc_rfc4543_encap(desc, cdata: &ctx->cdata, ivsize, icvsize: ctx->authsize, |
874 | is_qi: true); |
875 | flc->flc[1] = cpu_to_caam32(val: desc_len(desc)); /* SDL */ |
876 | dma_sync_single_for_device(dev, addr: ctx->flc_dma[ENCRYPT], |
877 | size: sizeof(flc->flc) + desc_bytes(desc), |
878 | dir: ctx->dir); |
879 | |
880 | /* |
881 | * Job Descriptor and Shared Descriptors |
882 | * must all fit into the 64-word Descriptor h/w Buffer |
883 | */ |
884 | if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) { |
885 | ctx->cdata.key_inline = true; |
886 | } else { |
887 | ctx->cdata.key_inline = false; |
888 | ctx->cdata.key_dma = ctx->key_dma; |
889 | } |
890 | |
891 | flc = &ctx->flc[DECRYPT]; |
892 | desc = flc->sh_desc; |
893 | cnstr_shdsc_rfc4543_decap(desc, cdata: &ctx->cdata, ivsize, icvsize: ctx->authsize, |
894 | is_qi: true); |
895 | flc->flc[1] = cpu_to_caam32(val: desc_len(desc)); /* SDL */ |
896 | dma_sync_single_for_device(dev, addr: ctx->flc_dma[DECRYPT], |
897 | size: sizeof(flc->flc) + desc_bytes(desc), |
898 | dir: ctx->dir); |
899 | |
900 | return 0; |
901 | } |
902 | |
903 | static int rfc4543_setauthsize(struct crypto_aead *authenc, |
904 | unsigned int authsize) |
905 | { |
906 | struct caam_ctx *ctx = crypto_aead_ctx_dma(tfm: authenc); |
907 | |
908 | if (authsize != 16) |
909 | return -EINVAL; |
910 | |
911 | ctx->authsize = authsize; |
912 | rfc4543_set_sh_desc(aead: authenc); |
913 | |
914 | return 0; |
915 | } |
916 | |
917 | static int rfc4543_setkey(struct crypto_aead *aead, |
918 | const u8 *key, unsigned int keylen) |
919 | { |
920 | struct caam_ctx *ctx = crypto_aead_ctx_dma(tfm: aead); |
921 | struct device *dev = ctx->dev; |
922 | int ret; |
923 | |
924 | ret = aes_check_keylen(keylen: keylen - 4); |
925 | if (ret) |
926 | return ret; |
927 | |
928 | print_hex_dump_debug("key in @" __stringify(__LINE__)": " , |
929 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); |
930 | |
931 | memcpy(ctx->key, key, keylen); |
932 | /* |
933 | * The last four bytes of the key material are used as the salt value |
934 | * in the nonce. Update the AES key length. |
935 | */ |
936 | ctx->cdata.keylen = keylen - 4; |
937 | dma_sync_single_for_device(dev, addr: ctx->key_dma, size: ctx->cdata.keylen, |
938 | dir: ctx->dir); |
939 | |
940 | return rfc4543_set_sh_desc(aead); |
941 | } |
942 | |
943 | static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, |
944 | unsigned int keylen, const u32 ctx1_iv_off) |
945 | { |
946 | struct caam_ctx *ctx = crypto_skcipher_ctx_dma(tfm: skcipher); |
947 | struct caam_skcipher_alg *alg = |
948 | container_of(crypto_skcipher_alg(skcipher), |
949 | struct caam_skcipher_alg, skcipher); |
950 | struct device *dev = ctx->dev; |
951 | struct caam_flc *flc; |
952 | unsigned int ivsize = crypto_skcipher_ivsize(tfm: skcipher); |
953 | u32 *desc; |
954 | const bool is_rfc3686 = alg->caam.rfc3686; |
955 | |
956 | print_hex_dump_debug("key in @" __stringify(__LINE__)": " , |
957 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); |
958 | |
959 | ctx->cdata.keylen = keylen; |
960 | ctx->cdata.key_virt = key; |
961 | ctx->cdata.key_inline = true; |
962 | |
963 | /* skcipher_encrypt shared descriptor */ |
964 | flc = &ctx->flc[ENCRYPT]; |
965 | desc = flc->sh_desc; |
966 | cnstr_shdsc_skcipher_encap(desc, cdata: &ctx->cdata, ivsize, is_rfc3686, |
967 | ctx1_iv_off); |
968 | flc->flc[1] = cpu_to_caam32(val: desc_len(desc)); /* SDL */ |
969 | dma_sync_single_for_device(dev, addr: ctx->flc_dma[ENCRYPT], |
970 | size: sizeof(flc->flc) + desc_bytes(desc), |
971 | dir: ctx->dir); |
972 | |
973 | /* skcipher_decrypt shared descriptor */ |
974 | flc = &ctx->flc[DECRYPT]; |
975 | desc = flc->sh_desc; |
976 | cnstr_shdsc_skcipher_decap(desc, cdata: &ctx->cdata, ivsize, is_rfc3686, |
977 | ctx1_iv_off); |
978 | flc->flc[1] = cpu_to_caam32(val: desc_len(desc)); /* SDL */ |
979 | dma_sync_single_for_device(dev, addr: ctx->flc_dma[DECRYPT], |
980 | size: sizeof(flc->flc) + desc_bytes(desc), |
981 | dir: ctx->dir); |
982 | |
983 | return 0; |
984 | } |
985 | |
986 | static int aes_skcipher_setkey(struct crypto_skcipher *skcipher, |
987 | const u8 *key, unsigned int keylen) |
988 | { |
989 | int err; |
990 | |
991 | err = aes_check_keylen(keylen); |
992 | if (err) |
993 | return err; |
994 | |
995 | return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off: 0); |
996 | } |
997 | |
998 | static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher, |
999 | const u8 *key, unsigned int keylen) |
1000 | { |
1001 | u32 ctx1_iv_off; |
1002 | int err; |
1003 | |
1004 | /* |
1005 | * RFC3686 specific: |
1006 | * | CONTEXT1[255:128] = {NONCE, IV, COUNTER} |
1007 | * | *key = {KEY, NONCE} |
1008 | */ |
1009 | ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE; |
1010 | keylen -= CTR_RFC3686_NONCE_SIZE; |
1011 | |
1012 | err = aes_check_keylen(keylen); |
1013 | if (err) |
1014 | return err; |
1015 | |
1016 | return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off); |
1017 | } |
1018 | |
1019 | static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher, |
1020 | const u8 *key, unsigned int keylen) |
1021 | { |
1022 | u32 ctx1_iv_off; |
1023 | int err; |
1024 | |
1025 | /* |
1026 | * AES-CTR needs to load IV in CONTEXT1 reg |
1027 | * at an offset of 128bits (16bytes) |
1028 | * CONTEXT1[255:128] = IV |
1029 | */ |
1030 | ctx1_iv_off = 16; |
1031 | |
1032 | err = aes_check_keylen(keylen); |
1033 | if (err) |
1034 | return err; |
1035 | |
1036 | return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off); |
1037 | } |
1038 | |
1039 | static int chacha20_skcipher_setkey(struct crypto_skcipher *skcipher, |
1040 | const u8 *key, unsigned int keylen) |
1041 | { |
1042 | if (keylen != CHACHA_KEY_SIZE) |
1043 | return -EINVAL; |
1044 | |
1045 | return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off: 0); |
1046 | } |
1047 | |
1048 | static int des_skcipher_setkey(struct crypto_skcipher *skcipher, |
1049 | const u8 *key, unsigned int keylen) |
1050 | { |
1051 | return verify_skcipher_des_key(tfm: skcipher, key) ?: |
1052 | skcipher_setkey(skcipher, key, keylen, ctx1_iv_off: 0); |
1053 | } |
1054 | |
1055 | static int des3_skcipher_setkey(struct crypto_skcipher *skcipher, |
1056 | const u8 *key, unsigned int keylen) |
1057 | { |
1058 | return verify_skcipher_des3_key(tfm: skcipher, key) ?: |
1059 | skcipher_setkey(skcipher, key, keylen, ctx1_iv_off: 0); |
1060 | } |
1061 | |
1062 | static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, |
1063 | unsigned int keylen) |
1064 | { |
1065 | struct caam_ctx *ctx = crypto_skcipher_ctx_dma(tfm: skcipher); |
1066 | struct device *dev = ctx->dev; |
1067 | struct dpaa2_caam_priv *priv = dev_get_drvdata(dev); |
1068 | struct caam_flc *flc; |
1069 | u32 *desc; |
1070 | int err; |
1071 | |
1072 | err = xts_verify_key(tfm: skcipher, key, keylen); |
1073 | if (err) { |
1074 | dev_dbg(dev, "key size mismatch\n" ); |
1075 | return err; |
1076 | } |
1077 | |
1078 | if (keylen != 2 * AES_KEYSIZE_128 && keylen != 2 * AES_KEYSIZE_256) |
1079 | ctx->xts_key_fallback = true; |
1080 | |
1081 | if (priv->sec_attr.era <= 8 || ctx->xts_key_fallback) { |
1082 | err = crypto_skcipher_setkey(tfm: ctx->fallback, key, keylen); |
1083 | if (err) |
1084 | return err; |
1085 | } |
1086 | |
1087 | ctx->cdata.keylen = keylen; |
1088 | ctx->cdata.key_virt = key; |
1089 | ctx->cdata.key_inline = true; |
1090 | |
1091 | /* xts_skcipher_encrypt shared descriptor */ |
1092 | flc = &ctx->flc[ENCRYPT]; |
1093 | desc = flc->sh_desc; |
1094 | cnstr_shdsc_xts_skcipher_encap(desc, cdata: &ctx->cdata); |
1095 | flc->flc[1] = cpu_to_caam32(val: desc_len(desc)); /* SDL */ |
1096 | dma_sync_single_for_device(dev, addr: ctx->flc_dma[ENCRYPT], |
1097 | size: sizeof(flc->flc) + desc_bytes(desc), |
1098 | dir: ctx->dir); |
1099 | |
1100 | /* xts_skcipher_decrypt shared descriptor */ |
1101 | flc = &ctx->flc[DECRYPT]; |
1102 | desc = flc->sh_desc; |
1103 | cnstr_shdsc_xts_skcipher_decap(desc, cdata: &ctx->cdata); |
1104 | flc->flc[1] = cpu_to_caam32(val: desc_len(desc)); /* SDL */ |
1105 | dma_sync_single_for_device(dev, addr: ctx->flc_dma[DECRYPT], |
1106 | size: sizeof(flc->flc) + desc_bytes(desc), |
1107 | dir: ctx->dir); |
1108 | |
1109 | return 0; |
1110 | } |
1111 | |
1112 | static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req) |
1113 | { |
1114 | struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); |
1115 | struct caam_request *req_ctx = skcipher_request_ctx_dma(req); |
1116 | struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; |
1117 | struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; |
1118 | struct caam_ctx *ctx = crypto_skcipher_ctx_dma(tfm: skcipher); |
1119 | struct device *dev = ctx->dev; |
1120 | gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? |
1121 | GFP_KERNEL : GFP_ATOMIC; |
1122 | int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; |
1123 | struct skcipher_edesc *edesc; |
1124 | dma_addr_t iv_dma; |
1125 | u8 *iv; |
1126 | int ivsize = crypto_skcipher_ivsize(tfm: skcipher); |
1127 | int dst_sg_idx, qm_sg_ents, qm_sg_bytes; |
1128 | struct dpaa2_sg_entry *sg_table; |
1129 | |
1130 | src_nents = sg_nents_for_len(sg: req->src, len: req->cryptlen); |
1131 | if (unlikely(src_nents < 0)) { |
1132 | dev_err(dev, "Insufficient bytes (%d) in src S/G\n" , |
1133 | req->cryptlen); |
1134 | return ERR_PTR(error: src_nents); |
1135 | } |
1136 | |
1137 | if (unlikely(req->dst != req->src)) { |
1138 | dst_nents = sg_nents_for_len(sg: req->dst, len: req->cryptlen); |
1139 | if (unlikely(dst_nents < 0)) { |
1140 | dev_err(dev, "Insufficient bytes (%d) in dst S/G\n" , |
1141 | req->cryptlen); |
1142 | return ERR_PTR(error: dst_nents); |
1143 | } |
1144 | |
1145 | mapped_src_nents = dma_map_sg(dev, req->src, src_nents, |
1146 | DMA_TO_DEVICE); |
1147 | if (unlikely(!mapped_src_nents)) { |
1148 | dev_err(dev, "unable to map source\n" ); |
1149 | return ERR_PTR(error: -ENOMEM); |
1150 | } |
1151 | |
1152 | mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents, |
1153 | DMA_FROM_DEVICE); |
1154 | if (unlikely(!mapped_dst_nents)) { |
1155 | dev_err(dev, "unable to map destination\n" ); |
1156 | dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE); |
1157 | return ERR_PTR(error: -ENOMEM); |
1158 | } |
1159 | } else { |
1160 | mapped_src_nents = dma_map_sg(dev, req->src, src_nents, |
1161 | DMA_BIDIRECTIONAL); |
1162 | if (unlikely(!mapped_src_nents)) { |
1163 | dev_err(dev, "unable to map source\n" ); |
1164 | return ERR_PTR(error: -ENOMEM); |
1165 | } |
1166 | } |
1167 | |
1168 | qm_sg_ents = 1 + mapped_src_nents; |
1169 | dst_sg_idx = qm_sg_ents; |
1170 | |
1171 | /* |
1172 | * Input, output HW S/G tables: [IV, src][dst, IV] |
1173 | * IV entries point to the same buffer |
1174 | * If src == dst, S/G entries are reused (S/G tables overlap) |
1175 | * |
1176 | * HW reads 4 S/G entries at a time; make sure the reads don't go beyond |
1177 | * the end of the table by allocating more S/G entries. |
1178 | */ |
1179 | if (req->src != req->dst) |
1180 | qm_sg_ents += pad_sg_nents(sg_nents: mapped_dst_nents + 1); |
1181 | else |
1182 | qm_sg_ents = 1 + pad_sg_nents(sg_nents: qm_sg_ents); |
1183 | |
1184 | qm_sg_bytes = qm_sg_ents * sizeof(struct dpaa2_sg_entry); |
1185 | if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes + |
1186 | ivsize > CAAM_QI_MEMCACHE_SIZE)) { |
1187 | dev_err(dev, "No space for %d S/G entries and/or %dB IV\n" , |
1188 | qm_sg_ents, ivsize); |
1189 | caam_unmap(dev, src: req->src, dst: req->dst, src_nents, dst_nents, iv_dma: 0, |
1190 | ivsize: 0, iv_dir: DMA_NONE, qm_sg_dma: 0, qm_sg_bytes: 0); |
1191 | return ERR_PTR(error: -ENOMEM); |
1192 | } |
1193 | |
1194 | /* allocate space for base edesc, link tables and IV */ |
1195 | edesc = qi_cache_zalloc(flags); |
1196 | if (unlikely(!edesc)) { |
1197 | dev_err(dev, "could not allocate extended descriptor\n" ); |
1198 | caam_unmap(dev, src: req->src, dst: req->dst, src_nents, dst_nents, iv_dma: 0, |
1199 | ivsize: 0, iv_dir: DMA_NONE, qm_sg_dma: 0, qm_sg_bytes: 0); |
1200 | return ERR_PTR(error: -ENOMEM); |
1201 | } |
1202 | |
1203 | /* Make sure IV is located in a DMAable area */ |
1204 | sg_table = &edesc->sgt[0]; |
1205 | iv = (u8 *)(sg_table + qm_sg_ents); |
1206 | memcpy(iv, req->iv, ivsize); |
1207 | |
1208 | iv_dma = dma_map_single(dev, iv, ivsize, DMA_BIDIRECTIONAL); |
1209 | if (dma_mapping_error(dev, dma_addr: iv_dma)) { |
1210 | dev_err(dev, "unable to map IV\n" ); |
1211 | caam_unmap(dev, src: req->src, dst: req->dst, src_nents, dst_nents, iv_dma: 0, |
1212 | ivsize: 0, iv_dir: DMA_NONE, qm_sg_dma: 0, qm_sg_bytes: 0); |
1213 | qi_cache_free(obj: edesc); |
1214 | return ERR_PTR(error: -ENOMEM); |
1215 | } |
1216 | |
1217 | edesc->src_nents = src_nents; |
1218 | edesc->dst_nents = dst_nents; |
1219 | edesc->iv_dma = iv_dma; |
1220 | edesc->qm_sg_bytes = qm_sg_bytes; |
1221 | |
1222 | dma_to_qm_sg_one(qm_sg_ptr: sg_table, dma: iv_dma, len: ivsize, offset: 0); |
1223 | sg_to_qm_sg(sg: req->src, len: req->cryptlen, qm_sg_ptr: sg_table + 1, offset: 0); |
1224 | |
1225 | if (req->src != req->dst) |
1226 | sg_to_qm_sg(sg: req->dst, len: req->cryptlen, qm_sg_ptr: sg_table + dst_sg_idx, offset: 0); |
1227 | |
1228 | dma_to_qm_sg_one(qm_sg_ptr: sg_table + dst_sg_idx + mapped_dst_nents, dma: iv_dma, |
1229 | len: ivsize, offset: 0); |
1230 | |
1231 | edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes, |
1232 | DMA_TO_DEVICE); |
1233 | if (dma_mapping_error(dev, dma_addr: edesc->qm_sg_dma)) { |
1234 | dev_err(dev, "unable to map S/G table\n" ); |
1235 | caam_unmap(dev, src: req->src, dst: req->dst, src_nents, dst_nents, |
1236 | iv_dma, ivsize, iv_dir: DMA_BIDIRECTIONAL, qm_sg_dma: 0, qm_sg_bytes: 0); |
1237 | qi_cache_free(obj: edesc); |
1238 | return ERR_PTR(error: -ENOMEM); |
1239 | } |
1240 | |
1241 | memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); |
1242 | dpaa2_fl_set_final(fle: in_fle, final: true); |
1243 | dpaa2_fl_set_len(fle: in_fle, len: req->cryptlen + ivsize); |
1244 | dpaa2_fl_set_len(fle: out_fle, len: req->cryptlen + ivsize); |
1245 | |
1246 | dpaa2_fl_set_format(fle: in_fle, format: dpaa2_fl_sg); |
1247 | dpaa2_fl_set_addr(fle: in_fle, addr: edesc->qm_sg_dma); |
1248 | |
1249 | dpaa2_fl_set_format(fle: out_fle, format: dpaa2_fl_sg); |
1250 | |
1251 | if (req->src == req->dst) |
1252 | dpaa2_fl_set_addr(fle: out_fle, addr: edesc->qm_sg_dma + |
1253 | sizeof(*sg_table)); |
1254 | else |
1255 | dpaa2_fl_set_addr(fle: out_fle, addr: edesc->qm_sg_dma + dst_sg_idx * |
1256 | sizeof(*sg_table)); |
1257 | |
1258 | return edesc; |
1259 | } |
1260 | |
1261 | static void aead_unmap(struct device *dev, struct aead_edesc *edesc, |
1262 | struct aead_request *req) |
1263 | { |
1264 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
1265 | int ivsize = crypto_aead_ivsize(tfm: aead); |
1266 | |
1267 | caam_unmap(dev, src: req->src, dst: req->dst, src_nents: edesc->src_nents, dst_nents: edesc->dst_nents, |
1268 | iv_dma: edesc->iv_dma, ivsize, iv_dir: DMA_TO_DEVICE, qm_sg_dma: edesc->qm_sg_dma, |
1269 | qm_sg_bytes: edesc->qm_sg_bytes); |
1270 | dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); |
1271 | } |
1272 | |
1273 | static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc, |
1274 | struct skcipher_request *req) |
1275 | { |
1276 | struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); |
1277 | int ivsize = crypto_skcipher_ivsize(tfm: skcipher); |
1278 | |
1279 | caam_unmap(dev, src: req->src, dst: req->dst, src_nents: edesc->src_nents, dst_nents: edesc->dst_nents, |
1280 | iv_dma: edesc->iv_dma, ivsize, iv_dir: DMA_BIDIRECTIONAL, qm_sg_dma: edesc->qm_sg_dma, |
1281 | qm_sg_bytes: edesc->qm_sg_bytes); |
1282 | } |
1283 | |
1284 | static void aead_encrypt_done(void *cbk_ctx, u32 status) |
1285 | { |
1286 | struct crypto_async_request *areq = cbk_ctx; |
1287 | struct aead_request *req = container_of(areq, struct aead_request, |
1288 | base); |
1289 | struct caam_request *req_ctx = to_caam_req(areq); |
1290 | struct aead_edesc *edesc = req_ctx->edesc; |
1291 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
1292 | struct caam_ctx *ctx = crypto_aead_ctx_dma(tfm: aead); |
1293 | int ecode = 0; |
1294 | |
1295 | dev_dbg(ctx->dev, "%s %d: err 0x%x\n" , __func__, __LINE__, status); |
1296 | |
1297 | if (unlikely(status)) |
1298 | ecode = caam_qi2_strstatus(ctx->dev, status); |
1299 | |
1300 | aead_unmap(dev: ctx->dev, edesc, req); |
1301 | qi_cache_free(obj: edesc); |
1302 | aead_request_complete(req, err: ecode); |
1303 | } |
1304 | |
1305 | static void aead_decrypt_done(void *cbk_ctx, u32 status) |
1306 | { |
1307 | struct crypto_async_request *areq = cbk_ctx; |
1308 | struct aead_request *req = container_of(areq, struct aead_request, |
1309 | base); |
1310 | struct caam_request *req_ctx = to_caam_req(areq); |
1311 | struct aead_edesc *edesc = req_ctx->edesc; |
1312 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
1313 | struct caam_ctx *ctx = crypto_aead_ctx_dma(tfm: aead); |
1314 | int ecode = 0; |
1315 | |
1316 | dev_dbg(ctx->dev, "%s %d: err 0x%x\n" , __func__, __LINE__, status); |
1317 | |
1318 | if (unlikely(status)) |
1319 | ecode = caam_qi2_strstatus(ctx->dev, status); |
1320 | |
1321 | aead_unmap(dev: ctx->dev, edesc, req); |
1322 | qi_cache_free(obj: edesc); |
1323 | aead_request_complete(req, err: ecode); |
1324 | } |
1325 | |
1326 | static int aead_encrypt(struct aead_request *req) |
1327 | { |
1328 | struct aead_edesc *edesc; |
1329 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
1330 | struct caam_ctx *ctx = crypto_aead_ctx_dma(tfm: aead); |
1331 | struct caam_request *caam_req = aead_request_ctx_dma(req); |
1332 | int ret; |
1333 | |
1334 | /* allocate extended descriptor */ |
1335 | edesc = aead_edesc_alloc(req, encrypt: true); |
1336 | if (IS_ERR(ptr: edesc)) |
1337 | return PTR_ERR(ptr: edesc); |
1338 | |
1339 | caam_req->flc = &ctx->flc[ENCRYPT]; |
1340 | caam_req->flc_dma = ctx->flc_dma[ENCRYPT]; |
1341 | caam_req->cbk = aead_encrypt_done; |
1342 | caam_req->ctx = &req->base; |
1343 | caam_req->edesc = edesc; |
1344 | ret = dpaa2_caam_enqueue(dev: ctx->dev, req: caam_req); |
1345 | if (ret != -EINPROGRESS && |
1346 | !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { |
1347 | aead_unmap(dev: ctx->dev, edesc, req); |
1348 | qi_cache_free(obj: edesc); |
1349 | } |
1350 | |
1351 | return ret; |
1352 | } |
1353 | |
1354 | static int aead_decrypt(struct aead_request *req) |
1355 | { |
1356 | struct aead_edesc *edesc; |
1357 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
1358 | struct caam_ctx *ctx = crypto_aead_ctx_dma(tfm: aead); |
1359 | struct caam_request *caam_req = aead_request_ctx_dma(req); |
1360 | int ret; |
1361 | |
1362 | /* allocate extended descriptor */ |
1363 | edesc = aead_edesc_alloc(req, encrypt: false); |
1364 | if (IS_ERR(ptr: edesc)) |
1365 | return PTR_ERR(ptr: edesc); |
1366 | |
1367 | caam_req->flc = &ctx->flc[DECRYPT]; |
1368 | caam_req->flc_dma = ctx->flc_dma[DECRYPT]; |
1369 | caam_req->cbk = aead_decrypt_done; |
1370 | caam_req->ctx = &req->base; |
1371 | caam_req->edesc = edesc; |
1372 | ret = dpaa2_caam_enqueue(dev: ctx->dev, req: caam_req); |
1373 | if (ret != -EINPROGRESS && |
1374 | !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { |
1375 | aead_unmap(dev: ctx->dev, edesc, req); |
1376 | qi_cache_free(obj: edesc); |
1377 | } |
1378 | |
1379 | return ret; |
1380 | } |
1381 | |
1382 | static int ipsec_gcm_encrypt(struct aead_request *req) |
1383 | { |
1384 | return crypto_ipsec_check_assoclen(assoclen: req->assoclen) ? : aead_encrypt(req); |
1385 | } |
1386 | |
1387 | static int ipsec_gcm_decrypt(struct aead_request *req) |
1388 | { |
1389 | return crypto_ipsec_check_assoclen(assoclen: req->assoclen) ? : aead_decrypt(req); |
1390 | } |
1391 | |
1392 | static void skcipher_encrypt_done(void *cbk_ctx, u32 status) |
1393 | { |
1394 | struct crypto_async_request *areq = cbk_ctx; |
1395 | struct skcipher_request *req = skcipher_request_cast(req: areq); |
1396 | struct caam_request *req_ctx = to_caam_req(areq); |
1397 | struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); |
1398 | struct caam_ctx *ctx = crypto_skcipher_ctx_dma(tfm: skcipher); |
1399 | struct skcipher_edesc *edesc = req_ctx->edesc; |
1400 | int ecode = 0; |
1401 | int ivsize = crypto_skcipher_ivsize(tfm: skcipher); |
1402 | |
1403 | dev_dbg(ctx->dev, "%s %d: err 0x%x\n" , __func__, __LINE__, status); |
1404 | |
1405 | if (unlikely(status)) |
1406 | ecode = caam_qi2_strstatus(ctx->dev, status); |
1407 | |
1408 | print_hex_dump_debug("dstiv @" __stringify(__LINE__)": " , |
1409 | DUMP_PREFIX_ADDRESS, 16, 4, req->iv, |
1410 | edesc->src_nents > 1 ? 100 : ivsize, 1); |
1411 | caam_dump_sg(prefix_str: "dst @" __stringify(__LINE__)": " , |
1412 | prefix_type: DUMP_PREFIX_ADDRESS, rowsize: 16, groupsize: 4, sg: req->dst, |
1413 | tlen: edesc->dst_nents > 1 ? 100 : req->cryptlen, ascii: 1); |
1414 | |
1415 | skcipher_unmap(dev: ctx->dev, edesc, req); |
1416 | |
1417 | /* |
1418 | * The crypto API expects us to set the IV (req->iv) to the last |
1419 | * ciphertext block (CBC mode) or last counter (CTR mode). |
1420 | * This is used e.g. by the CTS mode. |
1421 | */ |
1422 | if (!ecode) |
1423 | memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, |
1424 | ivsize); |
1425 | |
1426 | qi_cache_free(obj: edesc); |
1427 | skcipher_request_complete(req, err: ecode); |
1428 | } |
1429 | |
1430 | static void skcipher_decrypt_done(void *cbk_ctx, u32 status) |
1431 | { |
1432 | struct crypto_async_request *areq = cbk_ctx; |
1433 | struct skcipher_request *req = skcipher_request_cast(req: areq); |
1434 | struct caam_request *req_ctx = to_caam_req(areq); |
1435 | struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); |
1436 | struct caam_ctx *ctx = crypto_skcipher_ctx_dma(tfm: skcipher); |
1437 | struct skcipher_edesc *edesc = req_ctx->edesc; |
1438 | int ecode = 0; |
1439 | int ivsize = crypto_skcipher_ivsize(tfm: skcipher); |
1440 | |
1441 | dev_dbg(ctx->dev, "%s %d: err 0x%x\n" , __func__, __LINE__, status); |
1442 | |
1443 | if (unlikely(status)) |
1444 | ecode = caam_qi2_strstatus(ctx->dev, status); |
1445 | |
1446 | print_hex_dump_debug("dstiv @" __stringify(__LINE__)": " , |
1447 | DUMP_PREFIX_ADDRESS, 16, 4, req->iv, |
1448 | edesc->src_nents > 1 ? 100 : ivsize, 1); |
1449 | caam_dump_sg(prefix_str: "dst @" __stringify(__LINE__)": " , |
1450 | prefix_type: DUMP_PREFIX_ADDRESS, rowsize: 16, groupsize: 4, sg: req->dst, |
1451 | tlen: edesc->dst_nents > 1 ? 100 : req->cryptlen, ascii: 1); |
1452 | |
1453 | skcipher_unmap(dev: ctx->dev, edesc, req); |
1454 | |
1455 | /* |
1456 | * The crypto API expects us to set the IV (req->iv) to the last |
1457 | * ciphertext block (CBC mode) or last counter (CTR mode). |
1458 | * This is used e.g. by the CTS mode. |
1459 | */ |
1460 | if (!ecode) |
1461 | memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, |
1462 | ivsize); |
1463 | |
1464 | qi_cache_free(obj: edesc); |
1465 | skcipher_request_complete(req, err: ecode); |
1466 | } |
1467 | |
1468 | static inline bool xts_skcipher_ivsize(struct skcipher_request *req) |
1469 | { |
1470 | struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); |
1471 | unsigned int ivsize = crypto_skcipher_ivsize(tfm: skcipher); |
1472 | |
1473 | return !!get_unaligned((u64 *)(req->iv + (ivsize / 2))); |
1474 | } |
1475 | |
1476 | static int skcipher_encrypt(struct skcipher_request *req) |
1477 | { |
1478 | struct skcipher_edesc *edesc; |
1479 | struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); |
1480 | struct caam_ctx *ctx = crypto_skcipher_ctx_dma(tfm: skcipher); |
1481 | struct caam_request *caam_req = skcipher_request_ctx_dma(req); |
1482 | struct dpaa2_caam_priv *priv = dev_get_drvdata(dev: ctx->dev); |
1483 | int ret; |
1484 | |
1485 | /* |
1486 | * XTS is expected to return an error even for input length = 0 |
1487 | * Note that the case input length < block size will be caught during |
1488 | * HW offloading and return an error. |
1489 | */ |
1490 | if (!req->cryptlen && !ctx->fallback) |
1491 | return 0; |
1492 | |
1493 | if (ctx->fallback && ((priv->sec_attr.era <= 8 && xts_skcipher_ivsize(req)) || |
1494 | ctx->xts_key_fallback)) { |
1495 | skcipher_request_set_tfm(req: &caam_req->fallback_req, tfm: ctx->fallback); |
1496 | skcipher_request_set_callback(req: &caam_req->fallback_req, |
1497 | flags: req->base.flags, |
1498 | compl: req->base.complete, |
1499 | data: req->base.data); |
1500 | skcipher_request_set_crypt(req: &caam_req->fallback_req, src: req->src, |
1501 | dst: req->dst, cryptlen: req->cryptlen, iv: req->iv); |
1502 | |
1503 | return crypto_skcipher_encrypt(req: &caam_req->fallback_req); |
1504 | } |
1505 | |
1506 | /* allocate extended descriptor */ |
1507 | edesc = skcipher_edesc_alloc(req); |
1508 | if (IS_ERR(ptr: edesc)) |
1509 | return PTR_ERR(ptr: edesc); |
1510 | |
1511 | caam_req->flc = &ctx->flc[ENCRYPT]; |
1512 | caam_req->flc_dma = ctx->flc_dma[ENCRYPT]; |
1513 | caam_req->cbk = skcipher_encrypt_done; |
1514 | caam_req->ctx = &req->base; |
1515 | caam_req->edesc = edesc; |
1516 | ret = dpaa2_caam_enqueue(dev: ctx->dev, req: caam_req); |
1517 | if (ret != -EINPROGRESS && |
1518 | !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { |
1519 | skcipher_unmap(dev: ctx->dev, edesc, req); |
1520 | qi_cache_free(obj: edesc); |
1521 | } |
1522 | |
1523 | return ret; |
1524 | } |
1525 | |
1526 | static int skcipher_decrypt(struct skcipher_request *req) |
1527 | { |
1528 | struct skcipher_edesc *edesc; |
1529 | struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); |
1530 | struct caam_ctx *ctx = crypto_skcipher_ctx_dma(tfm: skcipher); |
1531 | struct caam_request *caam_req = skcipher_request_ctx_dma(req); |
1532 | struct dpaa2_caam_priv *priv = dev_get_drvdata(dev: ctx->dev); |
1533 | int ret; |
1534 | |
1535 | /* |
1536 | * XTS is expected to return an error even for input length = 0 |
1537 | * Note that the case input length < block size will be caught during |
1538 | * HW offloading and return an error. |
1539 | */ |
1540 | if (!req->cryptlen && !ctx->fallback) |
1541 | return 0; |
1542 | |
1543 | if (ctx->fallback && ((priv->sec_attr.era <= 8 && xts_skcipher_ivsize(req)) || |
1544 | ctx->xts_key_fallback)) { |
1545 | skcipher_request_set_tfm(req: &caam_req->fallback_req, tfm: ctx->fallback); |
1546 | skcipher_request_set_callback(req: &caam_req->fallback_req, |
1547 | flags: req->base.flags, |
1548 | compl: req->base.complete, |
1549 | data: req->base.data); |
1550 | skcipher_request_set_crypt(req: &caam_req->fallback_req, src: req->src, |
1551 | dst: req->dst, cryptlen: req->cryptlen, iv: req->iv); |
1552 | |
1553 | return crypto_skcipher_decrypt(req: &caam_req->fallback_req); |
1554 | } |
1555 | |
1556 | /* allocate extended descriptor */ |
1557 | edesc = skcipher_edesc_alloc(req); |
1558 | if (IS_ERR(ptr: edesc)) |
1559 | return PTR_ERR(ptr: edesc); |
1560 | |
1561 | caam_req->flc = &ctx->flc[DECRYPT]; |
1562 | caam_req->flc_dma = ctx->flc_dma[DECRYPT]; |
1563 | caam_req->cbk = skcipher_decrypt_done; |
1564 | caam_req->ctx = &req->base; |
1565 | caam_req->edesc = edesc; |
1566 | ret = dpaa2_caam_enqueue(dev: ctx->dev, req: caam_req); |
1567 | if (ret != -EINPROGRESS && |
1568 | !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { |
1569 | skcipher_unmap(dev: ctx->dev, edesc, req); |
1570 | qi_cache_free(obj: edesc); |
1571 | } |
1572 | |
1573 | return ret; |
1574 | } |
1575 | |
1576 | static int caam_cra_init(struct caam_ctx *ctx, struct caam_alg_entry *caam, |
1577 | bool uses_dkp) |
1578 | { |
1579 | dma_addr_t dma_addr; |
1580 | int i; |
1581 | |
1582 | /* copy descriptor header template value */ |
1583 | ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type; |
1584 | ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type; |
1585 | |
1586 | ctx->dev = caam->dev; |
1587 | ctx->dir = uses_dkp ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE; |
1588 | |
1589 | dma_addr = dma_map_single_attrs(dev: ctx->dev, ptr: ctx->flc, |
1590 | offsetof(struct caam_ctx, flc_dma), |
1591 | dir: ctx->dir, DMA_ATTR_SKIP_CPU_SYNC); |
1592 | if (dma_mapping_error(dev: ctx->dev, dma_addr)) { |
1593 | dev_err(ctx->dev, "unable to map key, shared descriptors\n" ); |
1594 | return -ENOMEM; |
1595 | } |
1596 | |
1597 | for (i = 0; i < NUM_OP; i++) |
1598 | ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]); |
1599 | ctx->key_dma = dma_addr + NUM_OP * sizeof(ctx->flc[0]); |
1600 | |
1601 | return 0; |
1602 | } |
1603 | |
1604 | static int caam_cra_init_skcipher(struct crypto_skcipher *tfm) |
1605 | { |
1606 | struct skcipher_alg *alg = crypto_skcipher_alg(tfm); |
1607 | struct caam_skcipher_alg *caam_alg = |
1608 | container_of(alg, typeof(*caam_alg), skcipher); |
1609 | struct caam_ctx *ctx = crypto_skcipher_ctx_dma(tfm); |
1610 | u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK; |
1611 | int ret = 0; |
1612 | |
1613 | if (alg_aai == OP_ALG_AAI_XTS) { |
1614 | const char *tfm_name = crypto_tfm_alg_name(tfm: &tfm->base); |
1615 | struct crypto_skcipher *fallback; |
1616 | |
1617 | fallback = crypto_alloc_skcipher(alg_name: tfm_name, type: 0, |
1618 | CRYPTO_ALG_NEED_FALLBACK); |
1619 | if (IS_ERR(ptr: fallback)) { |
1620 | dev_err(caam_alg->caam.dev, |
1621 | "Failed to allocate %s fallback: %ld\n" , |
1622 | tfm_name, PTR_ERR(fallback)); |
1623 | return PTR_ERR(ptr: fallback); |
1624 | } |
1625 | |
1626 | ctx->fallback = fallback; |
1627 | crypto_skcipher_set_reqsize_dma( |
1628 | skcipher: tfm, reqsize: sizeof(struct caam_request) + |
1629 | crypto_skcipher_reqsize(tfm: fallback)); |
1630 | } else { |
1631 | crypto_skcipher_set_reqsize_dma(skcipher: tfm, |
1632 | reqsize: sizeof(struct caam_request)); |
1633 | } |
1634 | |
1635 | ret = caam_cra_init(ctx, caam: &caam_alg->caam, uses_dkp: false); |
1636 | if (ret && ctx->fallback) |
1637 | crypto_free_skcipher(tfm: ctx->fallback); |
1638 | |
1639 | return ret; |
1640 | } |
1641 | |
1642 | static int caam_cra_init_aead(struct crypto_aead *tfm) |
1643 | { |
1644 | struct aead_alg *alg = crypto_aead_alg(tfm); |
1645 | struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg), |
1646 | aead); |
1647 | |
1648 | crypto_aead_set_reqsize_dma(aead: tfm, reqsize: sizeof(struct caam_request)); |
1649 | return caam_cra_init(ctx: crypto_aead_ctx_dma(tfm), caam: &caam_alg->caam, |
1650 | uses_dkp: !caam_alg->caam.nodkp); |
1651 | } |
1652 | |
1653 | static void caam_exit_common(struct caam_ctx *ctx) |
1654 | { |
1655 | dma_unmap_single_attrs(dev: ctx->dev, addr: ctx->flc_dma[0], |
1656 | offsetof(struct caam_ctx, flc_dma), dir: ctx->dir, |
1657 | DMA_ATTR_SKIP_CPU_SYNC); |
1658 | } |
1659 | |
1660 | static void caam_cra_exit(struct crypto_skcipher *tfm) |
1661 | { |
1662 | struct caam_ctx *ctx = crypto_skcipher_ctx_dma(tfm); |
1663 | |
1664 | if (ctx->fallback) |
1665 | crypto_free_skcipher(tfm: ctx->fallback); |
1666 | caam_exit_common(ctx); |
1667 | } |
1668 | |
1669 | static void caam_cra_exit_aead(struct crypto_aead *tfm) |
1670 | { |
1671 | caam_exit_common(ctx: crypto_aead_ctx_dma(tfm)); |
1672 | } |
1673 | |
1674 | static struct caam_skcipher_alg driver_algs[] = { |
1675 | { |
1676 | .skcipher = { |
1677 | .base = { |
1678 | .cra_name = "cbc(aes)" , |
1679 | .cra_driver_name = "cbc-aes-caam-qi2" , |
1680 | .cra_blocksize = AES_BLOCK_SIZE, |
1681 | }, |
1682 | .setkey = aes_skcipher_setkey, |
1683 | .encrypt = skcipher_encrypt, |
1684 | .decrypt = skcipher_decrypt, |
1685 | .min_keysize = AES_MIN_KEY_SIZE, |
1686 | .max_keysize = AES_MAX_KEY_SIZE, |
1687 | .ivsize = AES_BLOCK_SIZE, |
1688 | }, |
1689 | .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, |
1690 | }, |
1691 | { |
1692 | .skcipher = { |
1693 | .base = { |
1694 | .cra_name = "cbc(des3_ede)" , |
1695 | .cra_driver_name = "cbc-3des-caam-qi2" , |
1696 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
1697 | }, |
1698 | .setkey = des3_skcipher_setkey, |
1699 | .encrypt = skcipher_encrypt, |
1700 | .decrypt = skcipher_decrypt, |
1701 | .min_keysize = DES3_EDE_KEY_SIZE, |
1702 | .max_keysize = DES3_EDE_KEY_SIZE, |
1703 | .ivsize = DES3_EDE_BLOCK_SIZE, |
1704 | }, |
1705 | .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, |
1706 | }, |
1707 | { |
1708 | .skcipher = { |
1709 | .base = { |
1710 | .cra_name = "cbc(des)" , |
1711 | .cra_driver_name = "cbc-des-caam-qi2" , |
1712 | .cra_blocksize = DES_BLOCK_SIZE, |
1713 | }, |
1714 | .setkey = des_skcipher_setkey, |
1715 | .encrypt = skcipher_encrypt, |
1716 | .decrypt = skcipher_decrypt, |
1717 | .min_keysize = DES_KEY_SIZE, |
1718 | .max_keysize = DES_KEY_SIZE, |
1719 | .ivsize = DES_BLOCK_SIZE, |
1720 | }, |
1721 | .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, |
1722 | }, |
1723 | { |
1724 | .skcipher = { |
1725 | .base = { |
1726 | .cra_name = "ctr(aes)" , |
1727 | .cra_driver_name = "ctr-aes-caam-qi2" , |
1728 | .cra_blocksize = 1, |
1729 | }, |
1730 | .setkey = ctr_skcipher_setkey, |
1731 | .encrypt = skcipher_encrypt, |
1732 | .decrypt = skcipher_decrypt, |
1733 | .min_keysize = AES_MIN_KEY_SIZE, |
1734 | .max_keysize = AES_MAX_KEY_SIZE, |
1735 | .ivsize = AES_BLOCK_SIZE, |
1736 | .chunksize = AES_BLOCK_SIZE, |
1737 | }, |
1738 | .caam.class1_alg_type = OP_ALG_ALGSEL_AES | |
1739 | OP_ALG_AAI_CTR_MOD128, |
1740 | }, |
1741 | { |
1742 | .skcipher = { |
1743 | .base = { |
1744 | .cra_name = "rfc3686(ctr(aes))" , |
1745 | .cra_driver_name = "rfc3686-ctr-aes-caam-qi2" , |
1746 | .cra_blocksize = 1, |
1747 | }, |
1748 | .setkey = rfc3686_skcipher_setkey, |
1749 | .encrypt = skcipher_encrypt, |
1750 | .decrypt = skcipher_decrypt, |
1751 | .min_keysize = AES_MIN_KEY_SIZE + |
1752 | CTR_RFC3686_NONCE_SIZE, |
1753 | .max_keysize = AES_MAX_KEY_SIZE + |
1754 | CTR_RFC3686_NONCE_SIZE, |
1755 | .ivsize = CTR_RFC3686_IV_SIZE, |
1756 | .chunksize = AES_BLOCK_SIZE, |
1757 | }, |
1758 | .caam = { |
1759 | .class1_alg_type = OP_ALG_ALGSEL_AES | |
1760 | OP_ALG_AAI_CTR_MOD128, |
1761 | .rfc3686 = true, |
1762 | }, |
1763 | }, |
1764 | { |
1765 | .skcipher = { |
1766 | .base = { |
1767 | .cra_name = "xts(aes)" , |
1768 | .cra_driver_name = "xts-aes-caam-qi2" , |
1769 | .cra_flags = CRYPTO_ALG_NEED_FALLBACK, |
1770 | .cra_blocksize = AES_BLOCK_SIZE, |
1771 | }, |
1772 | .setkey = xts_skcipher_setkey, |
1773 | .encrypt = skcipher_encrypt, |
1774 | .decrypt = skcipher_decrypt, |
1775 | .min_keysize = 2 * AES_MIN_KEY_SIZE, |
1776 | .max_keysize = 2 * AES_MAX_KEY_SIZE, |
1777 | .ivsize = AES_BLOCK_SIZE, |
1778 | }, |
1779 | .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS, |
1780 | }, |
1781 | { |
1782 | .skcipher = { |
1783 | .base = { |
1784 | .cra_name = "chacha20" , |
1785 | .cra_driver_name = "chacha20-caam-qi2" , |
1786 | .cra_blocksize = 1, |
1787 | }, |
1788 | .setkey = chacha20_skcipher_setkey, |
1789 | .encrypt = skcipher_encrypt, |
1790 | .decrypt = skcipher_decrypt, |
1791 | .min_keysize = CHACHA_KEY_SIZE, |
1792 | .max_keysize = CHACHA_KEY_SIZE, |
1793 | .ivsize = CHACHA_IV_SIZE, |
1794 | }, |
1795 | .caam.class1_alg_type = OP_ALG_ALGSEL_CHACHA20, |
1796 | }, |
1797 | }; |
1798 | |
1799 | static struct caam_aead_alg driver_aeads[] = { |
1800 | { |
1801 | .aead = { |
1802 | .base = { |
1803 | .cra_name = "rfc4106(gcm(aes))" , |
1804 | .cra_driver_name = "rfc4106-gcm-aes-caam-qi2" , |
1805 | .cra_blocksize = 1, |
1806 | }, |
1807 | .setkey = rfc4106_setkey, |
1808 | .setauthsize = rfc4106_setauthsize, |
1809 | .encrypt = ipsec_gcm_encrypt, |
1810 | .decrypt = ipsec_gcm_decrypt, |
1811 | .ivsize = 8, |
1812 | .maxauthsize = AES_BLOCK_SIZE, |
1813 | }, |
1814 | .caam = { |
1815 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, |
1816 | .nodkp = true, |
1817 | }, |
1818 | }, |
1819 | { |
1820 | .aead = { |
1821 | .base = { |
1822 | .cra_name = "rfc4543(gcm(aes))" , |
1823 | .cra_driver_name = "rfc4543-gcm-aes-caam-qi2" , |
1824 | .cra_blocksize = 1, |
1825 | }, |
1826 | .setkey = rfc4543_setkey, |
1827 | .setauthsize = rfc4543_setauthsize, |
1828 | .encrypt = ipsec_gcm_encrypt, |
1829 | .decrypt = ipsec_gcm_decrypt, |
1830 | .ivsize = 8, |
1831 | .maxauthsize = AES_BLOCK_SIZE, |
1832 | }, |
1833 | .caam = { |
1834 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, |
1835 | .nodkp = true, |
1836 | }, |
1837 | }, |
1838 | /* Galois Counter Mode */ |
1839 | { |
1840 | .aead = { |
1841 | .base = { |
1842 | .cra_name = "gcm(aes)" , |
1843 | .cra_driver_name = "gcm-aes-caam-qi2" , |
1844 | .cra_blocksize = 1, |
1845 | }, |
1846 | .setkey = gcm_setkey, |
1847 | .setauthsize = gcm_setauthsize, |
1848 | .encrypt = aead_encrypt, |
1849 | .decrypt = aead_decrypt, |
1850 | .ivsize = 12, |
1851 | .maxauthsize = AES_BLOCK_SIZE, |
1852 | }, |
1853 | .caam = { |
1854 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, |
1855 | .nodkp = true, |
1856 | } |
1857 | }, |
1858 | /* single-pass ipsec_esp descriptor */ |
1859 | { |
1860 | .aead = { |
1861 | .base = { |
1862 | .cra_name = "authenc(hmac(md5),cbc(aes))" , |
1863 | .cra_driver_name = "authenc-hmac-md5-" |
1864 | "cbc-aes-caam-qi2" , |
1865 | .cra_blocksize = AES_BLOCK_SIZE, |
1866 | }, |
1867 | .setkey = aead_setkey, |
1868 | .setauthsize = aead_setauthsize, |
1869 | .encrypt = aead_encrypt, |
1870 | .decrypt = aead_decrypt, |
1871 | .ivsize = AES_BLOCK_SIZE, |
1872 | .maxauthsize = MD5_DIGEST_SIZE, |
1873 | }, |
1874 | .caam = { |
1875 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, |
1876 | .class2_alg_type = OP_ALG_ALGSEL_MD5 | |
1877 | OP_ALG_AAI_HMAC_PRECOMP, |
1878 | } |
1879 | }, |
1880 | { |
1881 | .aead = { |
1882 | .base = { |
1883 | .cra_name = "echainiv(authenc(hmac(md5)," |
1884 | "cbc(aes)))" , |
1885 | .cra_driver_name = "echainiv-authenc-hmac-md5-" |
1886 | "cbc-aes-caam-qi2" , |
1887 | .cra_blocksize = AES_BLOCK_SIZE, |
1888 | }, |
1889 | .setkey = aead_setkey, |
1890 | .setauthsize = aead_setauthsize, |
1891 | .encrypt = aead_encrypt, |
1892 | .decrypt = aead_decrypt, |
1893 | .ivsize = AES_BLOCK_SIZE, |
1894 | .maxauthsize = MD5_DIGEST_SIZE, |
1895 | }, |
1896 | .caam = { |
1897 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, |
1898 | .class2_alg_type = OP_ALG_ALGSEL_MD5 | |
1899 | OP_ALG_AAI_HMAC_PRECOMP, |
1900 | .geniv = true, |
1901 | } |
1902 | }, |
1903 | { |
1904 | .aead = { |
1905 | .base = { |
1906 | .cra_name = "authenc(hmac(sha1),cbc(aes))" , |
1907 | .cra_driver_name = "authenc-hmac-sha1-" |
1908 | "cbc-aes-caam-qi2" , |
1909 | .cra_blocksize = AES_BLOCK_SIZE, |
1910 | }, |
1911 | .setkey = aead_setkey, |
1912 | .setauthsize = aead_setauthsize, |
1913 | .encrypt = aead_encrypt, |
1914 | .decrypt = aead_decrypt, |
1915 | .ivsize = AES_BLOCK_SIZE, |
1916 | .maxauthsize = SHA1_DIGEST_SIZE, |
1917 | }, |
1918 | .caam = { |
1919 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, |
1920 | .class2_alg_type = OP_ALG_ALGSEL_SHA1 | |
1921 | OP_ALG_AAI_HMAC_PRECOMP, |
1922 | } |
1923 | }, |
1924 | { |
1925 | .aead = { |
1926 | .base = { |
1927 | .cra_name = "echainiv(authenc(hmac(sha1)," |
1928 | "cbc(aes)))" , |
1929 | .cra_driver_name = "echainiv-authenc-" |
1930 | "hmac-sha1-cbc-aes-caam-qi2" , |
1931 | .cra_blocksize = AES_BLOCK_SIZE, |
1932 | }, |
1933 | .setkey = aead_setkey, |
1934 | .setauthsize = aead_setauthsize, |
1935 | .encrypt = aead_encrypt, |
1936 | .decrypt = aead_decrypt, |
1937 | .ivsize = AES_BLOCK_SIZE, |
1938 | .maxauthsize = SHA1_DIGEST_SIZE, |
1939 | }, |
1940 | .caam = { |
1941 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, |
1942 | .class2_alg_type = OP_ALG_ALGSEL_SHA1 | |
1943 | OP_ALG_AAI_HMAC_PRECOMP, |
1944 | .geniv = true, |
1945 | }, |
1946 | }, |
1947 | { |
1948 | .aead = { |
1949 | .base = { |
1950 | .cra_name = "authenc(hmac(sha224),cbc(aes))" , |
1951 | .cra_driver_name = "authenc-hmac-sha224-" |
1952 | "cbc-aes-caam-qi2" , |
1953 | .cra_blocksize = AES_BLOCK_SIZE, |
1954 | }, |
1955 | .setkey = aead_setkey, |
1956 | .setauthsize = aead_setauthsize, |
1957 | .encrypt = aead_encrypt, |
1958 | .decrypt = aead_decrypt, |
1959 | .ivsize = AES_BLOCK_SIZE, |
1960 | .maxauthsize = SHA224_DIGEST_SIZE, |
1961 | }, |
1962 | .caam = { |
1963 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, |
1964 | .class2_alg_type = OP_ALG_ALGSEL_SHA224 | |
1965 | OP_ALG_AAI_HMAC_PRECOMP, |
1966 | } |
1967 | }, |
1968 | { |
1969 | .aead = { |
1970 | .base = { |
1971 | .cra_name = "echainiv(authenc(hmac(sha224)," |
1972 | "cbc(aes)))" , |
1973 | .cra_driver_name = "echainiv-authenc-" |
1974 | "hmac-sha224-cbc-aes-caam-qi2" , |
1975 | .cra_blocksize = AES_BLOCK_SIZE, |
1976 | }, |
1977 | .setkey = aead_setkey, |
1978 | .setauthsize = aead_setauthsize, |
1979 | .encrypt = aead_encrypt, |
1980 | .decrypt = aead_decrypt, |
1981 | .ivsize = AES_BLOCK_SIZE, |
1982 | .maxauthsize = SHA224_DIGEST_SIZE, |
1983 | }, |
1984 | .caam = { |
1985 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, |
1986 | .class2_alg_type = OP_ALG_ALGSEL_SHA224 | |
1987 | OP_ALG_AAI_HMAC_PRECOMP, |
1988 | .geniv = true, |
1989 | } |
1990 | }, |
1991 | { |
1992 | .aead = { |
1993 | .base = { |
1994 | .cra_name = "authenc(hmac(sha256),cbc(aes))" , |
1995 | .cra_driver_name = "authenc-hmac-sha256-" |
1996 | "cbc-aes-caam-qi2" , |
1997 | .cra_blocksize = AES_BLOCK_SIZE, |
1998 | }, |
1999 | .setkey = aead_setkey, |
2000 | .setauthsize = aead_setauthsize, |
2001 | .encrypt = aead_encrypt, |
2002 | .decrypt = aead_decrypt, |
2003 | .ivsize = AES_BLOCK_SIZE, |
2004 | .maxauthsize = SHA256_DIGEST_SIZE, |
2005 | }, |
2006 | .caam = { |
2007 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, |
2008 | .class2_alg_type = OP_ALG_ALGSEL_SHA256 | |
2009 | OP_ALG_AAI_HMAC_PRECOMP, |
2010 | } |
2011 | }, |
2012 | { |
2013 | .aead = { |
2014 | .base = { |
2015 | .cra_name = "echainiv(authenc(hmac(sha256)," |
2016 | "cbc(aes)))" , |
2017 | .cra_driver_name = "echainiv-authenc-" |
2018 | "hmac-sha256-cbc-aes-" |
2019 | "caam-qi2" , |
2020 | .cra_blocksize = AES_BLOCK_SIZE, |
2021 | }, |
2022 | .setkey = aead_setkey, |
2023 | .setauthsize = aead_setauthsize, |
2024 | .encrypt = aead_encrypt, |
2025 | .decrypt = aead_decrypt, |
2026 | .ivsize = AES_BLOCK_SIZE, |
2027 | .maxauthsize = SHA256_DIGEST_SIZE, |
2028 | }, |
2029 | .caam = { |
2030 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, |
2031 | .class2_alg_type = OP_ALG_ALGSEL_SHA256 | |
2032 | OP_ALG_AAI_HMAC_PRECOMP, |
2033 | .geniv = true, |
2034 | } |
2035 | }, |
2036 | { |
2037 | .aead = { |
2038 | .base = { |
2039 | .cra_name = "authenc(hmac(sha384),cbc(aes))" , |
2040 | .cra_driver_name = "authenc-hmac-sha384-" |
2041 | "cbc-aes-caam-qi2" , |
2042 | .cra_blocksize = AES_BLOCK_SIZE, |
2043 | }, |
2044 | .setkey = aead_setkey, |
2045 | .setauthsize = aead_setauthsize, |
2046 | .encrypt = aead_encrypt, |
2047 | .decrypt = aead_decrypt, |
2048 | .ivsize = AES_BLOCK_SIZE, |
2049 | .maxauthsize = SHA384_DIGEST_SIZE, |
2050 | }, |
2051 | .caam = { |
2052 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, |
2053 | .class2_alg_type = OP_ALG_ALGSEL_SHA384 | |
2054 | OP_ALG_AAI_HMAC_PRECOMP, |
2055 | } |
2056 | }, |
2057 | { |
2058 | .aead = { |
2059 | .base = { |
2060 | .cra_name = "echainiv(authenc(hmac(sha384)," |
2061 | "cbc(aes)))" , |
2062 | .cra_driver_name = "echainiv-authenc-" |
2063 | "hmac-sha384-cbc-aes-" |
2064 | "caam-qi2" , |
2065 | .cra_blocksize = AES_BLOCK_SIZE, |
2066 | }, |
2067 | .setkey = aead_setkey, |
2068 | .setauthsize = aead_setauthsize, |
2069 | .encrypt = aead_encrypt, |
2070 | .decrypt = aead_decrypt, |
2071 | .ivsize = AES_BLOCK_SIZE, |
2072 | .maxauthsize = SHA384_DIGEST_SIZE, |
2073 | }, |
2074 | .caam = { |
2075 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, |
2076 | .class2_alg_type = OP_ALG_ALGSEL_SHA384 | |
2077 | OP_ALG_AAI_HMAC_PRECOMP, |
2078 | .geniv = true, |
2079 | } |
2080 | }, |
2081 | { |
2082 | .aead = { |
2083 | .base = { |
2084 | .cra_name = "authenc(hmac(sha512),cbc(aes))" , |
2085 | .cra_driver_name = "authenc-hmac-sha512-" |
2086 | "cbc-aes-caam-qi2" , |
2087 | .cra_blocksize = AES_BLOCK_SIZE, |
2088 | }, |
2089 | .setkey = aead_setkey, |
2090 | .setauthsize = aead_setauthsize, |
2091 | .encrypt = aead_encrypt, |
2092 | .decrypt = aead_decrypt, |
2093 | .ivsize = AES_BLOCK_SIZE, |
2094 | .maxauthsize = SHA512_DIGEST_SIZE, |
2095 | }, |
2096 | .caam = { |
2097 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, |
2098 | .class2_alg_type = OP_ALG_ALGSEL_SHA512 | |
2099 | OP_ALG_AAI_HMAC_PRECOMP, |
2100 | } |
2101 | }, |
2102 | { |
2103 | .aead = { |
2104 | .base = { |
2105 | .cra_name = "echainiv(authenc(hmac(sha512)," |
2106 | "cbc(aes)))" , |
2107 | .cra_driver_name = "echainiv-authenc-" |
2108 | "hmac-sha512-cbc-aes-" |
2109 | "caam-qi2" , |
2110 | .cra_blocksize = AES_BLOCK_SIZE, |
2111 | }, |
2112 | .setkey = aead_setkey, |
2113 | .setauthsize = aead_setauthsize, |
2114 | .encrypt = aead_encrypt, |
2115 | .decrypt = aead_decrypt, |
2116 | .ivsize = AES_BLOCK_SIZE, |
2117 | .maxauthsize = SHA512_DIGEST_SIZE, |
2118 | }, |
2119 | .caam = { |
2120 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, |
2121 | .class2_alg_type = OP_ALG_ALGSEL_SHA512 | |
2122 | OP_ALG_AAI_HMAC_PRECOMP, |
2123 | .geniv = true, |
2124 | } |
2125 | }, |
2126 | { |
2127 | .aead = { |
2128 | .base = { |
2129 | .cra_name = "authenc(hmac(md5),cbc(des3_ede))" , |
2130 | .cra_driver_name = "authenc-hmac-md5-" |
2131 | "cbc-des3_ede-caam-qi2" , |
2132 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
2133 | }, |
2134 | .setkey = des3_aead_setkey, |
2135 | .setauthsize = aead_setauthsize, |
2136 | .encrypt = aead_encrypt, |
2137 | .decrypt = aead_decrypt, |
2138 | .ivsize = DES3_EDE_BLOCK_SIZE, |
2139 | .maxauthsize = MD5_DIGEST_SIZE, |
2140 | }, |
2141 | .caam = { |
2142 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, |
2143 | .class2_alg_type = OP_ALG_ALGSEL_MD5 | |
2144 | OP_ALG_AAI_HMAC_PRECOMP, |
2145 | } |
2146 | }, |
2147 | { |
2148 | .aead = { |
2149 | .base = { |
2150 | .cra_name = "echainiv(authenc(hmac(md5)," |
2151 | "cbc(des3_ede)))" , |
2152 | .cra_driver_name = "echainiv-authenc-hmac-md5-" |
2153 | "cbc-des3_ede-caam-qi2" , |
2154 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
2155 | }, |
2156 | .setkey = des3_aead_setkey, |
2157 | .setauthsize = aead_setauthsize, |
2158 | .encrypt = aead_encrypt, |
2159 | .decrypt = aead_decrypt, |
2160 | .ivsize = DES3_EDE_BLOCK_SIZE, |
2161 | .maxauthsize = MD5_DIGEST_SIZE, |
2162 | }, |
2163 | .caam = { |
2164 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, |
2165 | .class2_alg_type = OP_ALG_ALGSEL_MD5 | |
2166 | OP_ALG_AAI_HMAC_PRECOMP, |
2167 | .geniv = true, |
2168 | } |
2169 | }, |
2170 | { |
2171 | .aead = { |
2172 | .base = { |
2173 | .cra_name = "authenc(hmac(sha1)," |
2174 | "cbc(des3_ede))" , |
2175 | .cra_driver_name = "authenc-hmac-sha1-" |
2176 | "cbc-des3_ede-caam-qi2" , |
2177 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
2178 | }, |
2179 | .setkey = des3_aead_setkey, |
2180 | .setauthsize = aead_setauthsize, |
2181 | .encrypt = aead_encrypt, |
2182 | .decrypt = aead_decrypt, |
2183 | .ivsize = DES3_EDE_BLOCK_SIZE, |
2184 | .maxauthsize = SHA1_DIGEST_SIZE, |
2185 | }, |
2186 | .caam = { |
2187 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, |
2188 | .class2_alg_type = OP_ALG_ALGSEL_SHA1 | |
2189 | OP_ALG_AAI_HMAC_PRECOMP, |
2190 | }, |
2191 | }, |
2192 | { |
2193 | .aead = { |
2194 | .base = { |
2195 | .cra_name = "echainiv(authenc(hmac(sha1)," |
2196 | "cbc(des3_ede)))" , |
2197 | .cra_driver_name = "echainiv-authenc-" |
2198 | "hmac-sha1-" |
2199 | "cbc-des3_ede-caam-qi2" , |
2200 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
2201 | }, |
2202 | .setkey = des3_aead_setkey, |
2203 | .setauthsize = aead_setauthsize, |
2204 | .encrypt = aead_encrypt, |
2205 | .decrypt = aead_decrypt, |
2206 | .ivsize = DES3_EDE_BLOCK_SIZE, |
2207 | .maxauthsize = SHA1_DIGEST_SIZE, |
2208 | }, |
2209 | .caam = { |
2210 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, |
2211 | .class2_alg_type = OP_ALG_ALGSEL_SHA1 | |
2212 | OP_ALG_AAI_HMAC_PRECOMP, |
2213 | .geniv = true, |
2214 | } |
2215 | }, |
2216 | { |
2217 | .aead = { |
2218 | .base = { |
2219 | .cra_name = "authenc(hmac(sha224)," |
2220 | "cbc(des3_ede))" , |
2221 | .cra_driver_name = "authenc-hmac-sha224-" |
2222 | "cbc-des3_ede-caam-qi2" , |
2223 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
2224 | }, |
2225 | .setkey = des3_aead_setkey, |
2226 | .setauthsize = aead_setauthsize, |
2227 | .encrypt = aead_encrypt, |
2228 | .decrypt = aead_decrypt, |
2229 | .ivsize = DES3_EDE_BLOCK_SIZE, |
2230 | .maxauthsize = SHA224_DIGEST_SIZE, |
2231 | }, |
2232 | .caam = { |
2233 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, |
2234 | .class2_alg_type = OP_ALG_ALGSEL_SHA224 | |
2235 | OP_ALG_AAI_HMAC_PRECOMP, |
2236 | }, |
2237 | }, |
2238 | { |
2239 | .aead = { |
2240 | .base = { |
2241 | .cra_name = "echainiv(authenc(hmac(sha224)," |
2242 | "cbc(des3_ede)))" , |
2243 | .cra_driver_name = "echainiv-authenc-" |
2244 | "hmac-sha224-" |
2245 | "cbc-des3_ede-caam-qi2" , |
2246 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
2247 | }, |
2248 | .setkey = des3_aead_setkey, |
2249 | .setauthsize = aead_setauthsize, |
2250 | .encrypt = aead_encrypt, |
2251 | .decrypt = aead_decrypt, |
2252 | .ivsize = DES3_EDE_BLOCK_SIZE, |
2253 | .maxauthsize = SHA224_DIGEST_SIZE, |
2254 | }, |
2255 | .caam = { |
2256 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, |
2257 | .class2_alg_type = OP_ALG_ALGSEL_SHA224 | |
2258 | OP_ALG_AAI_HMAC_PRECOMP, |
2259 | .geniv = true, |
2260 | } |
2261 | }, |
2262 | { |
2263 | .aead = { |
2264 | .base = { |
2265 | .cra_name = "authenc(hmac(sha256)," |
2266 | "cbc(des3_ede))" , |
2267 | .cra_driver_name = "authenc-hmac-sha256-" |
2268 | "cbc-des3_ede-caam-qi2" , |
2269 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
2270 | }, |
2271 | .setkey = des3_aead_setkey, |
2272 | .setauthsize = aead_setauthsize, |
2273 | .encrypt = aead_encrypt, |
2274 | .decrypt = aead_decrypt, |
2275 | .ivsize = DES3_EDE_BLOCK_SIZE, |
2276 | .maxauthsize = SHA256_DIGEST_SIZE, |
2277 | }, |
2278 | .caam = { |
2279 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, |
2280 | .class2_alg_type = OP_ALG_ALGSEL_SHA256 | |
2281 | OP_ALG_AAI_HMAC_PRECOMP, |
2282 | }, |
2283 | }, |
2284 | { |
2285 | .aead = { |
2286 | .base = { |
2287 | .cra_name = "echainiv(authenc(hmac(sha256)," |
2288 | "cbc(des3_ede)))" , |
2289 | .cra_driver_name = "echainiv-authenc-" |
2290 | "hmac-sha256-" |
2291 | "cbc-des3_ede-caam-qi2" , |
2292 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
2293 | }, |
2294 | .setkey = des3_aead_setkey, |
2295 | .setauthsize = aead_setauthsize, |
2296 | .encrypt = aead_encrypt, |
2297 | .decrypt = aead_decrypt, |
2298 | .ivsize = DES3_EDE_BLOCK_SIZE, |
2299 | .maxauthsize = SHA256_DIGEST_SIZE, |
2300 | }, |
2301 | .caam = { |
2302 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, |
2303 | .class2_alg_type = OP_ALG_ALGSEL_SHA256 | |
2304 | OP_ALG_AAI_HMAC_PRECOMP, |
2305 | .geniv = true, |
2306 | } |
2307 | }, |
2308 | { |
2309 | .aead = { |
2310 | .base = { |
2311 | .cra_name = "authenc(hmac(sha384)," |
2312 | "cbc(des3_ede))" , |
2313 | .cra_driver_name = "authenc-hmac-sha384-" |
2314 | "cbc-des3_ede-caam-qi2" , |
2315 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
2316 | }, |
2317 | .setkey = des3_aead_setkey, |
2318 | .setauthsize = aead_setauthsize, |
2319 | .encrypt = aead_encrypt, |
2320 | .decrypt = aead_decrypt, |
2321 | .ivsize = DES3_EDE_BLOCK_SIZE, |
2322 | .maxauthsize = SHA384_DIGEST_SIZE, |
2323 | }, |
2324 | .caam = { |
2325 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, |
2326 | .class2_alg_type = OP_ALG_ALGSEL_SHA384 | |
2327 | OP_ALG_AAI_HMAC_PRECOMP, |
2328 | }, |
2329 | }, |
2330 | { |
2331 | .aead = { |
2332 | .base = { |
2333 | .cra_name = "echainiv(authenc(hmac(sha384)," |
2334 | "cbc(des3_ede)))" , |
2335 | .cra_driver_name = "echainiv-authenc-" |
2336 | "hmac-sha384-" |
2337 | "cbc-des3_ede-caam-qi2" , |
2338 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
2339 | }, |
2340 | .setkey = des3_aead_setkey, |
2341 | .setauthsize = aead_setauthsize, |
2342 | .encrypt = aead_encrypt, |
2343 | .decrypt = aead_decrypt, |
2344 | .ivsize = DES3_EDE_BLOCK_SIZE, |
2345 | .maxauthsize = SHA384_DIGEST_SIZE, |
2346 | }, |
2347 | .caam = { |
2348 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, |
2349 | .class2_alg_type = OP_ALG_ALGSEL_SHA384 | |
2350 | OP_ALG_AAI_HMAC_PRECOMP, |
2351 | .geniv = true, |
2352 | } |
2353 | }, |
2354 | { |
2355 | .aead = { |
2356 | .base = { |
2357 | .cra_name = "authenc(hmac(sha512)," |
2358 | "cbc(des3_ede))" , |
2359 | .cra_driver_name = "authenc-hmac-sha512-" |
2360 | "cbc-des3_ede-caam-qi2" , |
2361 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
2362 | }, |
2363 | .setkey = des3_aead_setkey, |
2364 | .setauthsize = aead_setauthsize, |
2365 | .encrypt = aead_encrypt, |
2366 | .decrypt = aead_decrypt, |
2367 | .ivsize = DES3_EDE_BLOCK_SIZE, |
2368 | .maxauthsize = SHA512_DIGEST_SIZE, |
2369 | }, |
2370 | .caam = { |
2371 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, |
2372 | .class2_alg_type = OP_ALG_ALGSEL_SHA512 | |
2373 | OP_ALG_AAI_HMAC_PRECOMP, |
2374 | }, |
2375 | }, |
2376 | { |
2377 | .aead = { |
2378 | .base = { |
2379 | .cra_name = "echainiv(authenc(hmac(sha512)," |
2380 | "cbc(des3_ede)))" , |
2381 | .cra_driver_name = "echainiv-authenc-" |
2382 | "hmac-sha512-" |
2383 | "cbc-des3_ede-caam-qi2" , |
2384 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
2385 | }, |
2386 | .setkey = des3_aead_setkey, |
2387 | .setauthsize = aead_setauthsize, |
2388 | .encrypt = aead_encrypt, |
2389 | .decrypt = aead_decrypt, |
2390 | .ivsize = DES3_EDE_BLOCK_SIZE, |
2391 | .maxauthsize = SHA512_DIGEST_SIZE, |
2392 | }, |
2393 | .caam = { |
2394 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, |
2395 | .class2_alg_type = OP_ALG_ALGSEL_SHA512 | |
2396 | OP_ALG_AAI_HMAC_PRECOMP, |
2397 | .geniv = true, |
2398 | } |
2399 | }, |
2400 | { |
2401 | .aead = { |
2402 | .base = { |
2403 | .cra_name = "authenc(hmac(md5),cbc(des))" , |
2404 | .cra_driver_name = "authenc-hmac-md5-" |
2405 | "cbc-des-caam-qi2" , |
2406 | .cra_blocksize = DES_BLOCK_SIZE, |
2407 | }, |
2408 | .setkey = aead_setkey, |
2409 | .setauthsize = aead_setauthsize, |
2410 | .encrypt = aead_encrypt, |
2411 | .decrypt = aead_decrypt, |
2412 | .ivsize = DES_BLOCK_SIZE, |
2413 | .maxauthsize = MD5_DIGEST_SIZE, |
2414 | }, |
2415 | .caam = { |
2416 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, |
2417 | .class2_alg_type = OP_ALG_ALGSEL_MD5 | |
2418 | OP_ALG_AAI_HMAC_PRECOMP, |
2419 | }, |
2420 | }, |
2421 | { |
2422 | .aead = { |
2423 | .base = { |
2424 | .cra_name = "echainiv(authenc(hmac(md5)," |
2425 | "cbc(des)))" , |
2426 | .cra_driver_name = "echainiv-authenc-hmac-md5-" |
2427 | "cbc-des-caam-qi2" , |
2428 | .cra_blocksize = DES_BLOCK_SIZE, |
2429 | }, |
2430 | .setkey = aead_setkey, |
2431 | .setauthsize = aead_setauthsize, |
2432 | .encrypt = aead_encrypt, |
2433 | .decrypt = aead_decrypt, |
2434 | .ivsize = DES_BLOCK_SIZE, |
2435 | .maxauthsize = MD5_DIGEST_SIZE, |
2436 | }, |
2437 | .caam = { |
2438 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, |
2439 | .class2_alg_type = OP_ALG_ALGSEL_MD5 | |
2440 | OP_ALG_AAI_HMAC_PRECOMP, |
2441 | .geniv = true, |
2442 | } |
2443 | }, |
2444 | { |
2445 | .aead = { |
2446 | .base = { |
2447 | .cra_name = "authenc(hmac(sha1),cbc(des))" , |
2448 | .cra_driver_name = "authenc-hmac-sha1-" |
2449 | "cbc-des-caam-qi2" , |
2450 | .cra_blocksize = DES_BLOCK_SIZE, |
2451 | }, |
2452 | .setkey = aead_setkey, |
2453 | .setauthsize = aead_setauthsize, |
2454 | .encrypt = aead_encrypt, |
2455 | .decrypt = aead_decrypt, |
2456 | .ivsize = DES_BLOCK_SIZE, |
2457 | .maxauthsize = SHA1_DIGEST_SIZE, |
2458 | }, |
2459 | .caam = { |
2460 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, |
2461 | .class2_alg_type = OP_ALG_ALGSEL_SHA1 | |
2462 | OP_ALG_AAI_HMAC_PRECOMP, |
2463 | }, |
2464 | }, |
2465 | { |
2466 | .aead = { |
2467 | .base = { |
2468 | .cra_name = "echainiv(authenc(hmac(sha1)," |
2469 | "cbc(des)))" , |
2470 | .cra_driver_name = "echainiv-authenc-" |
2471 | "hmac-sha1-cbc-des-caam-qi2" , |
2472 | .cra_blocksize = DES_BLOCK_SIZE, |
2473 | }, |
2474 | .setkey = aead_setkey, |
2475 | .setauthsize = aead_setauthsize, |
2476 | .encrypt = aead_encrypt, |
2477 | .decrypt = aead_decrypt, |
2478 | .ivsize = DES_BLOCK_SIZE, |
2479 | .maxauthsize = SHA1_DIGEST_SIZE, |
2480 | }, |
2481 | .caam = { |
2482 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, |
2483 | .class2_alg_type = OP_ALG_ALGSEL_SHA1 | |
2484 | OP_ALG_AAI_HMAC_PRECOMP, |
2485 | .geniv = true, |
2486 | } |
2487 | }, |
2488 | { |
2489 | .aead = { |
2490 | .base = { |
2491 | .cra_name = "authenc(hmac(sha224),cbc(des))" , |
2492 | .cra_driver_name = "authenc-hmac-sha224-" |
2493 | "cbc-des-caam-qi2" , |
2494 | .cra_blocksize = DES_BLOCK_SIZE, |
2495 | }, |
2496 | .setkey = aead_setkey, |
2497 | .setauthsize = aead_setauthsize, |
2498 | .encrypt = aead_encrypt, |
2499 | .decrypt = aead_decrypt, |
2500 | .ivsize = DES_BLOCK_SIZE, |
2501 | .maxauthsize = SHA224_DIGEST_SIZE, |
2502 | }, |
2503 | .caam = { |
2504 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, |
2505 | .class2_alg_type = OP_ALG_ALGSEL_SHA224 | |
2506 | OP_ALG_AAI_HMAC_PRECOMP, |
2507 | }, |
2508 | }, |
2509 | { |
2510 | .aead = { |
2511 | .base = { |
2512 | .cra_name = "echainiv(authenc(hmac(sha224)," |
2513 | "cbc(des)))" , |
2514 | .cra_driver_name = "echainiv-authenc-" |
2515 | "hmac-sha224-cbc-des-" |
2516 | "caam-qi2" , |
2517 | .cra_blocksize = DES_BLOCK_SIZE, |
2518 | }, |
2519 | .setkey = aead_setkey, |
2520 | .setauthsize = aead_setauthsize, |
2521 | .encrypt = aead_encrypt, |
2522 | .decrypt = aead_decrypt, |
2523 | .ivsize = DES_BLOCK_SIZE, |
2524 | .maxauthsize = SHA224_DIGEST_SIZE, |
2525 | }, |
2526 | .caam = { |
2527 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, |
2528 | .class2_alg_type = OP_ALG_ALGSEL_SHA224 | |
2529 | OP_ALG_AAI_HMAC_PRECOMP, |
2530 | .geniv = true, |
2531 | } |
2532 | }, |
2533 | { |
2534 | .aead = { |
2535 | .base = { |
2536 | .cra_name = "authenc(hmac(sha256),cbc(des))" , |
2537 | .cra_driver_name = "authenc-hmac-sha256-" |
2538 | "cbc-des-caam-qi2" , |
2539 | .cra_blocksize = DES_BLOCK_SIZE, |
2540 | }, |
2541 | .setkey = aead_setkey, |
2542 | .setauthsize = aead_setauthsize, |
2543 | .encrypt = aead_encrypt, |
2544 | .decrypt = aead_decrypt, |
2545 | .ivsize = DES_BLOCK_SIZE, |
2546 | .maxauthsize = SHA256_DIGEST_SIZE, |
2547 | }, |
2548 | .caam = { |
2549 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, |
2550 | .class2_alg_type = OP_ALG_ALGSEL_SHA256 | |
2551 | OP_ALG_AAI_HMAC_PRECOMP, |
2552 | }, |
2553 | }, |
2554 | { |
2555 | .aead = { |
2556 | .base = { |
2557 | .cra_name = "echainiv(authenc(hmac(sha256)," |
2558 | "cbc(des)))" , |
2559 | .cra_driver_name = "echainiv-authenc-" |
2560 | "hmac-sha256-cbc-des-" |
2561 | "caam-qi2" , |
2562 | .cra_blocksize = DES_BLOCK_SIZE, |
2563 | }, |
2564 | .setkey = aead_setkey, |
2565 | .setauthsize = aead_setauthsize, |
2566 | .encrypt = aead_encrypt, |
2567 | .decrypt = aead_decrypt, |
2568 | .ivsize = DES_BLOCK_SIZE, |
2569 | .maxauthsize = SHA256_DIGEST_SIZE, |
2570 | }, |
2571 | .caam = { |
2572 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, |
2573 | .class2_alg_type = OP_ALG_ALGSEL_SHA256 | |
2574 | OP_ALG_AAI_HMAC_PRECOMP, |
2575 | .geniv = true, |
2576 | }, |
2577 | }, |
2578 | { |
2579 | .aead = { |
2580 | .base = { |
2581 | .cra_name = "authenc(hmac(sha384),cbc(des))" , |
2582 | .cra_driver_name = "authenc-hmac-sha384-" |
2583 | "cbc-des-caam-qi2" , |
2584 | .cra_blocksize = DES_BLOCK_SIZE, |
2585 | }, |
2586 | .setkey = aead_setkey, |
2587 | .setauthsize = aead_setauthsize, |
2588 | .encrypt = aead_encrypt, |
2589 | .decrypt = aead_decrypt, |
2590 | .ivsize = DES_BLOCK_SIZE, |
2591 | .maxauthsize = SHA384_DIGEST_SIZE, |
2592 | }, |
2593 | .caam = { |
2594 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, |
2595 | .class2_alg_type = OP_ALG_ALGSEL_SHA384 | |
2596 | OP_ALG_AAI_HMAC_PRECOMP, |
2597 | }, |
2598 | }, |
2599 | { |
2600 | .aead = { |
2601 | .base = { |
2602 | .cra_name = "echainiv(authenc(hmac(sha384)," |
2603 | "cbc(des)))" , |
2604 | .cra_driver_name = "echainiv-authenc-" |
2605 | "hmac-sha384-cbc-des-" |
2606 | "caam-qi2" , |
2607 | .cra_blocksize = DES_BLOCK_SIZE, |
2608 | }, |
2609 | .setkey = aead_setkey, |
2610 | .setauthsize = aead_setauthsize, |
2611 | .encrypt = aead_encrypt, |
2612 | .decrypt = aead_decrypt, |
2613 | .ivsize = DES_BLOCK_SIZE, |
2614 | .maxauthsize = SHA384_DIGEST_SIZE, |
2615 | }, |
2616 | .caam = { |
2617 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, |
2618 | .class2_alg_type = OP_ALG_ALGSEL_SHA384 | |
2619 | OP_ALG_AAI_HMAC_PRECOMP, |
2620 | .geniv = true, |
2621 | } |
2622 | }, |
2623 | { |
2624 | .aead = { |
2625 | .base = { |
2626 | .cra_name = "authenc(hmac(sha512),cbc(des))" , |
2627 | .cra_driver_name = "authenc-hmac-sha512-" |
2628 | "cbc-des-caam-qi2" , |
2629 | .cra_blocksize = DES_BLOCK_SIZE, |
2630 | }, |
2631 | .setkey = aead_setkey, |
2632 | .setauthsize = aead_setauthsize, |
2633 | .encrypt = aead_encrypt, |
2634 | .decrypt = aead_decrypt, |
2635 | .ivsize = DES_BLOCK_SIZE, |
2636 | .maxauthsize = SHA512_DIGEST_SIZE, |
2637 | }, |
2638 | .caam = { |
2639 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, |
2640 | .class2_alg_type = OP_ALG_ALGSEL_SHA512 | |
2641 | OP_ALG_AAI_HMAC_PRECOMP, |
2642 | } |
2643 | }, |
2644 | { |
2645 | .aead = { |
2646 | .base = { |
2647 | .cra_name = "echainiv(authenc(hmac(sha512)," |
2648 | "cbc(des)))" , |
2649 | .cra_driver_name = "echainiv-authenc-" |
2650 | "hmac-sha512-cbc-des-" |
2651 | "caam-qi2" , |
2652 | .cra_blocksize = DES_BLOCK_SIZE, |
2653 | }, |
2654 | .setkey = aead_setkey, |
2655 | .setauthsize = aead_setauthsize, |
2656 | .encrypt = aead_encrypt, |
2657 | .decrypt = aead_decrypt, |
2658 | .ivsize = DES_BLOCK_SIZE, |
2659 | .maxauthsize = SHA512_DIGEST_SIZE, |
2660 | }, |
2661 | .caam = { |
2662 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, |
2663 | .class2_alg_type = OP_ALG_ALGSEL_SHA512 | |
2664 | OP_ALG_AAI_HMAC_PRECOMP, |
2665 | .geniv = true, |
2666 | } |
2667 | }, |
2668 | { |
2669 | .aead = { |
2670 | .base = { |
2671 | .cra_name = "authenc(hmac(md5)," |
2672 | "rfc3686(ctr(aes)))" , |
2673 | .cra_driver_name = "authenc-hmac-md5-" |
2674 | "rfc3686-ctr-aes-caam-qi2" , |
2675 | .cra_blocksize = 1, |
2676 | }, |
2677 | .setkey = aead_setkey, |
2678 | .setauthsize = aead_setauthsize, |
2679 | .encrypt = aead_encrypt, |
2680 | .decrypt = aead_decrypt, |
2681 | .ivsize = CTR_RFC3686_IV_SIZE, |
2682 | .maxauthsize = MD5_DIGEST_SIZE, |
2683 | }, |
2684 | .caam = { |
2685 | .class1_alg_type = OP_ALG_ALGSEL_AES | |
2686 | OP_ALG_AAI_CTR_MOD128, |
2687 | .class2_alg_type = OP_ALG_ALGSEL_MD5 | |
2688 | OP_ALG_AAI_HMAC_PRECOMP, |
2689 | .rfc3686 = true, |
2690 | }, |
2691 | }, |
2692 | { |
2693 | .aead = { |
2694 | .base = { |
2695 | .cra_name = "seqiv(authenc(" |
2696 | "hmac(md5),rfc3686(ctr(aes))))" , |
2697 | .cra_driver_name = "seqiv-authenc-hmac-md5-" |
2698 | "rfc3686-ctr-aes-caam-qi2" , |
2699 | .cra_blocksize = 1, |
2700 | }, |
2701 | .setkey = aead_setkey, |
2702 | .setauthsize = aead_setauthsize, |
2703 | .encrypt = aead_encrypt, |
2704 | .decrypt = aead_decrypt, |
2705 | .ivsize = CTR_RFC3686_IV_SIZE, |
2706 | .maxauthsize = MD5_DIGEST_SIZE, |
2707 | }, |
2708 | .caam = { |
2709 | .class1_alg_type = OP_ALG_ALGSEL_AES | |
2710 | OP_ALG_AAI_CTR_MOD128, |
2711 | .class2_alg_type = OP_ALG_ALGSEL_MD5 | |
2712 | OP_ALG_AAI_HMAC_PRECOMP, |
2713 | .rfc3686 = true, |
2714 | .geniv = true, |
2715 | }, |
2716 | }, |
2717 | { |
2718 | .aead = { |
2719 | .base = { |
2720 | .cra_name = "authenc(hmac(sha1)," |
2721 | "rfc3686(ctr(aes)))" , |
2722 | .cra_driver_name = "authenc-hmac-sha1-" |
2723 | "rfc3686-ctr-aes-caam-qi2" , |
2724 | .cra_blocksize = 1, |
2725 | }, |
2726 | .setkey = aead_setkey, |
2727 | .setauthsize = aead_setauthsize, |
2728 | .encrypt = aead_encrypt, |
2729 | .decrypt = aead_decrypt, |
2730 | .ivsize = CTR_RFC3686_IV_SIZE, |
2731 | .maxauthsize = SHA1_DIGEST_SIZE, |
2732 | }, |
2733 | .caam = { |
2734 | .class1_alg_type = OP_ALG_ALGSEL_AES | |
2735 | OP_ALG_AAI_CTR_MOD128, |
2736 | .class2_alg_type = OP_ALG_ALGSEL_SHA1 | |
2737 | OP_ALG_AAI_HMAC_PRECOMP, |
2738 | .rfc3686 = true, |
2739 | }, |
2740 | }, |
2741 | { |
2742 | .aead = { |
2743 | .base = { |
2744 | .cra_name = "seqiv(authenc(" |
2745 | "hmac(sha1),rfc3686(ctr(aes))))" , |
2746 | .cra_driver_name = "seqiv-authenc-hmac-sha1-" |
2747 | "rfc3686-ctr-aes-caam-qi2" , |
2748 | .cra_blocksize = 1, |
2749 | }, |
2750 | .setkey = aead_setkey, |
2751 | .setauthsize = aead_setauthsize, |
2752 | .encrypt = aead_encrypt, |
2753 | .decrypt = aead_decrypt, |
2754 | .ivsize = CTR_RFC3686_IV_SIZE, |
2755 | .maxauthsize = SHA1_DIGEST_SIZE, |
2756 | }, |
2757 | .caam = { |
2758 | .class1_alg_type = OP_ALG_ALGSEL_AES | |
2759 | OP_ALG_AAI_CTR_MOD128, |
2760 | .class2_alg_type = OP_ALG_ALGSEL_SHA1 | |
2761 | OP_ALG_AAI_HMAC_PRECOMP, |
2762 | .rfc3686 = true, |
2763 | .geniv = true, |
2764 | }, |
2765 | }, |
2766 | { |
2767 | .aead = { |
2768 | .base = { |
2769 | .cra_name = "authenc(hmac(sha224)," |
2770 | "rfc3686(ctr(aes)))" , |
2771 | .cra_driver_name = "authenc-hmac-sha224-" |
2772 | "rfc3686-ctr-aes-caam-qi2" , |
2773 | .cra_blocksize = 1, |
2774 | }, |
2775 | .setkey = aead_setkey, |
2776 | .setauthsize = aead_setauthsize, |
2777 | .encrypt = aead_encrypt, |
2778 | .decrypt = aead_decrypt, |
2779 | .ivsize = CTR_RFC3686_IV_SIZE, |
2780 | .maxauthsize = SHA224_DIGEST_SIZE, |
2781 | }, |
2782 | .caam = { |
2783 | .class1_alg_type = OP_ALG_ALGSEL_AES | |
2784 | OP_ALG_AAI_CTR_MOD128, |
2785 | .class2_alg_type = OP_ALG_ALGSEL_SHA224 | |
2786 | OP_ALG_AAI_HMAC_PRECOMP, |
2787 | .rfc3686 = true, |
2788 | }, |
2789 | }, |
2790 | { |
2791 | .aead = { |
2792 | .base = { |
2793 | .cra_name = "seqiv(authenc(" |
2794 | "hmac(sha224),rfc3686(ctr(aes))))" , |
2795 | .cra_driver_name = "seqiv-authenc-hmac-sha224-" |
2796 | "rfc3686-ctr-aes-caam-qi2" , |
2797 | .cra_blocksize = 1, |
2798 | }, |
2799 | .setkey = aead_setkey, |
2800 | .setauthsize = aead_setauthsize, |
2801 | .encrypt = aead_encrypt, |
2802 | .decrypt = aead_decrypt, |
2803 | .ivsize = CTR_RFC3686_IV_SIZE, |
2804 | .maxauthsize = SHA224_DIGEST_SIZE, |
2805 | }, |
2806 | .caam = { |
2807 | .class1_alg_type = OP_ALG_ALGSEL_AES | |
2808 | OP_ALG_AAI_CTR_MOD128, |
2809 | .class2_alg_type = OP_ALG_ALGSEL_SHA224 | |
2810 | OP_ALG_AAI_HMAC_PRECOMP, |
2811 | .rfc3686 = true, |
2812 | .geniv = true, |
2813 | }, |
2814 | }, |
2815 | { |
2816 | .aead = { |
2817 | .base = { |
2818 | .cra_name = "authenc(hmac(sha256)," |
2819 | "rfc3686(ctr(aes)))" , |
2820 | .cra_driver_name = "authenc-hmac-sha256-" |
2821 | "rfc3686-ctr-aes-caam-qi2" , |
2822 | .cra_blocksize = 1, |
2823 | }, |
2824 | .setkey = aead_setkey, |
2825 | .setauthsize = aead_setauthsize, |
2826 | .encrypt = aead_encrypt, |
2827 | .decrypt = aead_decrypt, |
2828 | .ivsize = CTR_RFC3686_IV_SIZE, |
2829 | .maxauthsize = SHA256_DIGEST_SIZE, |
2830 | }, |
2831 | .caam = { |
2832 | .class1_alg_type = OP_ALG_ALGSEL_AES | |
2833 | OP_ALG_AAI_CTR_MOD128, |
2834 | .class2_alg_type = OP_ALG_ALGSEL_SHA256 | |
2835 | OP_ALG_AAI_HMAC_PRECOMP, |
2836 | .rfc3686 = true, |
2837 | }, |
2838 | }, |
2839 | { |
2840 | .aead = { |
2841 | .base = { |
2842 | .cra_name = "seqiv(authenc(hmac(sha256)," |
2843 | "rfc3686(ctr(aes))))" , |
2844 | .cra_driver_name = "seqiv-authenc-hmac-sha256-" |
2845 | "rfc3686-ctr-aes-caam-qi2" , |
2846 | .cra_blocksize = 1, |
2847 | }, |
2848 | .setkey = aead_setkey, |
2849 | .setauthsize = aead_setauthsize, |
2850 | .encrypt = aead_encrypt, |
2851 | .decrypt = aead_decrypt, |
2852 | .ivsize = CTR_RFC3686_IV_SIZE, |
2853 | .maxauthsize = SHA256_DIGEST_SIZE, |
2854 | }, |
2855 | .caam = { |
2856 | .class1_alg_type = OP_ALG_ALGSEL_AES | |
2857 | OP_ALG_AAI_CTR_MOD128, |
2858 | .class2_alg_type = OP_ALG_ALGSEL_SHA256 | |
2859 | OP_ALG_AAI_HMAC_PRECOMP, |
2860 | .rfc3686 = true, |
2861 | .geniv = true, |
2862 | }, |
2863 | }, |
2864 | { |
2865 | .aead = { |
2866 | .base = { |
2867 | .cra_name = "authenc(hmac(sha384)," |
2868 | "rfc3686(ctr(aes)))" , |
2869 | .cra_driver_name = "authenc-hmac-sha384-" |
2870 | "rfc3686-ctr-aes-caam-qi2" , |
2871 | .cra_blocksize = 1, |
2872 | }, |
2873 | .setkey = aead_setkey, |
2874 | .setauthsize = aead_setauthsize, |
2875 | .encrypt = aead_encrypt, |
2876 | .decrypt = aead_decrypt, |
2877 | .ivsize = CTR_RFC3686_IV_SIZE, |
2878 | .maxauthsize = SHA384_DIGEST_SIZE, |
2879 | }, |
2880 | .caam = { |
2881 | .class1_alg_type = OP_ALG_ALGSEL_AES | |
2882 | OP_ALG_AAI_CTR_MOD128, |
2883 | .class2_alg_type = OP_ALG_ALGSEL_SHA384 | |
2884 | OP_ALG_AAI_HMAC_PRECOMP, |
2885 | .rfc3686 = true, |
2886 | }, |
2887 | }, |
2888 | { |
2889 | .aead = { |
2890 | .base = { |
2891 | .cra_name = "seqiv(authenc(hmac(sha384)," |
2892 | "rfc3686(ctr(aes))))" , |
2893 | .cra_driver_name = "seqiv-authenc-hmac-sha384-" |
2894 | "rfc3686-ctr-aes-caam-qi2" , |
2895 | .cra_blocksize = 1, |
2896 | }, |
2897 | .setkey = aead_setkey, |
2898 | .setauthsize = aead_setauthsize, |
2899 | .encrypt = aead_encrypt, |
2900 | .decrypt = aead_decrypt, |
2901 | .ivsize = CTR_RFC3686_IV_SIZE, |
2902 | .maxauthsize = SHA384_DIGEST_SIZE, |
2903 | }, |
2904 | .caam = { |
2905 | .class1_alg_type = OP_ALG_ALGSEL_AES | |
2906 | OP_ALG_AAI_CTR_MOD128, |
2907 | .class2_alg_type = OP_ALG_ALGSEL_SHA384 | |
2908 | OP_ALG_AAI_HMAC_PRECOMP, |
2909 | .rfc3686 = true, |
2910 | .geniv = true, |
2911 | }, |
2912 | }, |
2913 | { |
2914 | .aead = { |
2915 | .base = { |
2916 | .cra_name = "rfc7539(chacha20,poly1305)" , |
2917 | .cra_driver_name = "rfc7539-chacha20-poly1305-" |
2918 | "caam-qi2" , |
2919 | .cra_blocksize = 1, |
2920 | }, |
2921 | .setkey = chachapoly_setkey, |
2922 | .setauthsize = chachapoly_setauthsize, |
2923 | .encrypt = aead_encrypt, |
2924 | .decrypt = aead_decrypt, |
2925 | .ivsize = CHACHAPOLY_IV_SIZE, |
2926 | .maxauthsize = POLY1305_DIGEST_SIZE, |
2927 | }, |
2928 | .caam = { |
2929 | .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 | |
2930 | OP_ALG_AAI_AEAD, |
2931 | .class2_alg_type = OP_ALG_ALGSEL_POLY1305 | |
2932 | OP_ALG_AAI_AEAD, |
2933 | .nodkp = true, |
2934 | }, |
2935 | }, |
2936 | { |
2937 | .aead = { |
2938 | .base = { |
2939 | .cra_name = "rfc7539esp(chacha20,poly1305)" , |
2940 | .cra_driver_name = "rfc7539esp-chacha20-" |
2941 | "poly1305-caam-qi2" , |
2942 | .cra_blocksize = 1, |
2943 | }, |
2944 | .setkey = chachapoly_setkey, |
2945 | .setauthsize = chachapoly_setauthsize, |
2946 | .encrypt = aead_encrypt, |
2947 | .decrypt = aead_decrypt, |
2948 | .ivsize = 8, |
2949 | .maxauthsize = POLY1305_DIGEST_SIZE, |
2950 | }, |
2951 | .caam = { |
2952 | .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 | |
2953 | OP_ALG_AAI_AEAD, |
2954 | .class2_alg_type = OP_ALG_ALGSEL_POLY1305 | |
2955 | OP_ALG_AAI_AEAD, |
2956 | .nodkp = true, |
2957 | }, |
2958 | }, |
2959 | { |
2960 | .aead = { |
2961 | .base = { |
2962 | .cra_name = "authenc(hmac(sha512)," |
2963 | "rfc3686(ctr(aes)))" , |
2964 | .cra_driver_name = "authenc-hmac-sha512-" |
2965 | "rfc3686-ctr-aes-caam-qi2" , |
2966 | .cra_blocksize = 1, |
2967 | }, |
2968 | .setkey = aead_setkey, |
2969 | .setauthsize = aead_setauthsize, |
2970 | .encrypt = aead_encrypt, |
2971 | .decrypt = aead_decrypt, |
2972 | .ivsize = CTR_RFC3686_IV_SIZE, |
2973 | .maxauthsize = SHA512_DIGEST_SIZE, |
2974 | }, |
2975 | .caam = { |
2976 | .class1_alg_type = OP_ALG_ALGSEL_AES | |
2977 | OP_ALG_AAI_CTR_MOD128, |
2978 | .class2_alg_type = OP_ALG_ALGSEL_SHA512 | |
2979 | OP_ALG_AAI_HMAC_PRECOMP, |
2980 | .rfc3686 = true, |
2981 | }, |
2982 | }, |
2983 | { |
2984 | .aead = { |
2985 | .base = { |
2986 | .cra_name = "seqiv(authenc(hmac(sha512)," |
2987 | "rfc3686(ctr(aes))))" , |
2988 | .cra_driver_name = "seqiv-authenc-hmac-sha512-" |
2989 | "rfc3686-ctr-aes-caam-qi2" , |
2990 | .cra_blocksize = 1, |
2991 | }, |
2992 | .setkey = aead_setkey, |
2993 | .setauthsize = aead_setauthsize, |
2994 | .encrypt = aead_encrypt, |
2995 | .decrypt = aead_decrypt, |
2996 | .ivsize = CTR_RFC3686_IV_SIZE, |
2997 | .maxauthsize = SHA512_DIGEST_SIZE, |
2998 | }, |
2999 | .caam = { |
3000 | .class1_alg_type = OP_ALG_ALGSEL_AES | |
3001 | OP_ALG_AAI_CTR_MOD128, |
3002 | .class2_alg_type = OP_ALG_ALGSEL_SHA512 | |
3003 | OP_ALG_AAI_HMAC_PRECOMP, |
3004 | .rfc3686 = true, |
3005 | .geniv = true, |
3006 | }, |
3007 | }, |
3008 | }; |
3009 | |
3010 | static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg) |
3011 | { |
3012 | struct skcipher_alg *alg = &t_alg->skcipher; |
3013 | |
3014 | alg->base.cra_module = THIS_MODULE; |
3015 | alg->base.cra_priority = CAAM_CRA_PRIORITY; |
3016 | alg->base.cra_ctxsize = sizeof(struct caam_ctx) + crypto_dma_padding(); |
3017 | alg->base.cra_flags |= (CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | |
3018 | CRYPTO_ALG_KERN_DRIVER_ONLY); |
3019 | |
3020 | alg->init = caam_cra_init_skcipher; |
3021 | alg->exit = caam_cra_exit; |
3022 | } |
3023 | |
3024 | static void caam_aead_alg_init(struct caam_aead_alg *t_alg) |
3025 | { |
3026 | struct aead_alg *alg = &t_alg->aead; |
3027 | |
3028 | alg->base.cra_module = THIS_MODULE; |
3029 | alg->base.cra_priority = CAAM_CRA_PRIORITY; |
3030 | alg->base.cra_ctxsize = sizeof(struct caam_ctx) + crypto_dma_padding(); |
3031 | alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | |
3032 | CRYPTO_ALG_KERN_DRIVER_ONLY; |
3033 | |
3034 | alg->init = caam_cra_init_aead; |
3035 | alg->exit = caam_cra_exit_aead; |
3036 | } |
3037 | |
3038 | /* max hash key is max split key size */ |
3039 | #define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2) |
3040 | |
3041 | #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE |
3042 | |
3043 | /* caam context sizes for hashes: running digest + 8 */ |
3044 | #define HASH_MSG_LEN 8 |
3045 | #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE) |
3046 | |
3047 | enum hash_optype { |
3048 | UPDATE = 0, |
3049 | UPDATE_FIRST, |
3050 | FINALIZE, |
3051 | DIGEST, |
3052 | HASH_NUM_OP |
3053 | }; |
3054 | |
3055 | /** |
3056 | * struct caam_hash_ctx - ahash per-session context |
3057 | * @flc: Flow Contexts array |
3058 | * @key: authentication key |
3059 | * @flc_dma: I/O virtual addresses of the Flow Contexts |
3060 | * @dev: dpseci device |
3061 | * @ctx_len: size of Context Register |
3062 | * @adata: hashing algorithm details |
3063 | */ |
3064 | struct caam_hash_ctx { |
3065 | struct caam_flc flc[HASH_NUM_OP]; |
3066 | u8 key[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned; |
3067 | dma_addr_t flc_dma[HASH_NUM_OP]; |
3068 | struct device *dev; |
3069 | int ctx_len; |
3070 | struct alginfo adata; |
3071 | }; |
3072 | |
3073 | /* ahash state */ |
3074 | struct caam_hash_state { |
3075 | struct caam_request caam_req; |
3076 | dma_addr_t buf_dma; |
3077 | dma_addr_t ctx_dma; |
3078 | int ctx_dma_len; |
3079 | u8 buf[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned; |
3080 | int buflen; |
3081 | int next_buflen; |
3082 | u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned; |
3083 | int (*update)(struct ahash_request *req); |
3084 | int (*final)(struct ahash_request *req); |
3085 | int (*finup)(struct ahash_request *req); |
3086 | }; |
3087 | |
3088 | struct caam_export_state { |
3089 | u8 buf[CAAM_MAX_HASH_BLOCK_SIZE]; |
3090 | u8 caam_ctx[MAX_CTX_LEN]; |
3091 | int buflen; |
3092 | int (*update)(struct ahash_request *req); |
3093 | int (*final)(struct ahash_request *req); |
3094 | int (*finup)(struct ahash_request *req); |
3095 | }; |
3096 | |
3097 | /* Map current buffer in state (if length > 0) and put it in link table */ |
3098 | static inline int buf_map_to_qm_sg(struct device *dev, |
3099 | struct dpaa2_sg_entry *qm_sg, |
3100 | struct caam_hash_state *state) |
3101 | { |
3102 | int buflen = state->buflen; |
3103 | |
3104 | if (!buflen) |
3105 | return 0; |
3106 | |
3107 | state->buf_dma = dma_map_single(dev, state->buf, buflen, |
3108 | DMA_TO_DEVICE); |
3109 | if (dma_mapping_error(dev, dma_addr: state->buf_dma)) { |
3110 | dev_err(dev, "unable to map buf\n" ); |
3111 | state->buf_dma = 0; |
3112 | return -ENOMEM; |
3113 | } |
3114 | |
3115 | dma_to_qm_sg_one(qm_sg_ptr: qm_sg, dma: state->buf_dma, len: buflen, offset: 0); |
3116 | |
3117 | return 0; |
3118 | } |
3119 | |
3120 | /* Map state->caam_ctx, and add it to link table */ |
3121 | static inline int ctx_map_to_qm_sg(struct device *dev, |
3122 | struct caam_hash_state *state, int ctx_len, |
3123 | struct dpaa2_sg_entry *qm_sg, u32 flag) |
3124 | { |
3125 | state->ctx_dma_len = ctx_len; |
3126 | state->ctx_dma = dma_map_single(dev, state->caam_ctx, ctx_len, flag); |
3127 | if (dma_mapping_error(dev, dma_addr: state->ctx_dma)) { |
3128 | dev_err(dev, "unable to map ctx\n" ); |
3129 | state->ctx_dma = 0; |
3130 | return -ENOMEM; |
3131 | } |
3132 | |
3133 | dma_to_qm_sg_one(qm_sg_ptr: qm_sg, dma: state->ctx_dma, len: ctx_len, offset: 0); |
3134 | |
3135 | return 0; |
3136 | } |
3137 | |
3138 | static int ahash_set_sh_desc(struct crypto_ahash *ahash) |
3139 | { |
3140 | struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm: ahash); |
3141 | int digestsize = crypto_ahash_digestsize(tfm: ahash); |
3142 | struct dpaa2_caam_priv *priv = dev_get_drvdata(dev: ctx->dev); |
3143 | struct caam_flc *flc; |
3144 | u32 *desc; |
3145 | |
3146 | /* ahash_update shared descriptor */ |
3147 | flc = &ctx->flc[UPDATE]; |
3148 | desc = flc->sh_desc; |
3149 | cnstr_shdsc_ahash(desc, adata: &ctx->adata, OP_ALG_AS_UPDATE, digestsize: ctx->ctx_len, |
3150 | ctx_len: ctx->ctx_len, import_ctx: true, era: priv->sec_attr.era); |
3151 | flc->flc[1] = cpu_to_caam32(val: desc_len(desc)); /* SDL */ |
3152 | dma_sync_single_for_device(dev: ctx->dev, addr: ctx->flc_dma[UPDATE], |
3153 | size: desc_bytes(desc), dir: DMA_BIDIRECTIONAL); |
3154 | print_hex_dump_debug("ahash update shdesc@" __stringify(__LINE__)": " , |
3155 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), |
3156 | 1); |
3157 | |
3158 | /* ahash_update_first shared descriptor */ |
3159 | flc = &ctx->flc[UPDATE_FIRST]; |
3160 | desc = flc->sh_desc; |
3161 | cnstr_shdsc_ahash(desc, adata: &ctx->adata, OP_ALG_AS_INIT, digestsize: ctx->ctx_len, |
3162 | ctx_len: ctx->ctx_len, import_ctx: false, era: priv->sec_attr.era); |
3163 | flc->flc[1] = cpu_to_caam32(val: desc_len(desc)); /* SDL */ |
3164 | dma_sync_single_for_device(dev: ctx->dev, addr: ctx->flc_dma[UPDATE_FIRST], |
3165 | size: desc_bytes(desc), dir: DMA_BIDIRECTIONAL); |
3166 | print_hex_dump_debug("ahash update first shdesc@" __stringify(__LINE__)": " , |
3167 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), |
3168 | 1); |
3169 | |
3170 | /* ahash_final shared descriptor */ |
3171 | flc = &ctx->flc[FINALIZE]; |
3172 | desc = flc->sh_desc; |
3173 | cnstr_shdsc_ahash(desc, adata: &ctx->adata, OP_ALG_AS_FINALIZE, digestsize, |
3174 | ctx_len: ctx->ctx_len, import_ctx: true, era: priv->sec_attr.era); |
3175 | flc->flc[1] = cpu_to_caam32(val: desc_len(desc)); /* SDL */ |
3176 | dma_sync_single_for_device(dev: ctx->dev, addr: ctx->flc_dma[FINALIZE], |
3177 | size: desc_bytes(desc), dir: DMA_BIDIRECTIONAL); |
3178 | print_hex_dump_debug("ahash final shdesc@" __stringify(__LINE__)": " , |
3179 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), |
3180 | 1); |
3181 | |
3182 | /* ahash_digest shared descriptor */ |
3183 | flc = &ctx->flc[DIGEST]; |
3184 | desc = flc->sh_desc; |
3185 | cnstr_shdsc_ahash(desc, adata: &ctx->adata, OP_ALG_AS_INITFINAL, digestsize, |
3186 | ctx_len: ctx->ctx_len, import_ctx: false, era: priv->sec_attr.era); |
3187 | flc->flc[1] = cpu_to_caam32(val: desc_len(desc)); /* SDL */ |
3188 | dma_sync_single_for_device(dev: ctx->dev, addr: ctx->flc_dma[DIGEST], |
3189 | size: desc_bytes(desc), dir: DMA_BIDIRECTIONAL); |
3190 | print_hex_dump_debug("ahash digest shdesc@" __stringify(__LINE__)": " , |
3191 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), |
3192 | 1); |
3193 | |
3194 | return 0; |
3195 | } |
3196 | |
3197 | struct split_key_sh_result { |
3198 | struct completion completion; |
3199 | int err; |
3200 | struct device *dev; |
3201 | }; |
3202 | |
3203 | static void split_key_sh_done(void *cbk_ctx, u32 err) |
3204 | { |
3205 | struct split_key_sh_result *res = cbk_ctx; |
3206 | |
3207 | dev_dbg(res->dev, "%s %d: err 0x%x\n" , __func__, __LINE__, err); |
3208 | |
3209 | res->err = err ? caam_qi2_strstatus(res->dev, err) : 0; |
3210 | complete(&res->completion); |
3211 | } |
3212 | |
3213 | /* Digest hash size if it is too large */ |
3214 | static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key, |
3215 | u32 digestsize) |
3216 | { |
3217 | struct caam_request *req_ctx; |
3218 | u32 *desc; |
3219 | struct split_key_sh_result result; |
3220 | dma_addr_t key_dma; |
3221 | struct caam_flc *flc; |
3222 | dma_addr_t flc_dma; |
3223 | int ret = -ENOMEM; |
3224 | struct dpaa2_fl_entry *in_fle, *out_fle; |
3225 | |
3226 | req_ctx = kzalloc(size: sizeof(*req_ctx), GFP_KERNEL); |
3227 | if (!req_ctx) |
3228 | return -ENOMEM; |
3229 | |
3230 | in_fle = &req_ctx->fd_flt[1]; |
3231 | out_fle = &req_ctx->fd_flt[0]; |
3232 | |
3233 | flc = kzalloc(size: sizeof(*flc), GFP_KERNEL); |
3234 | if (!flc) |
3235 | goto err_flc; |
3236 | |
3237 | key_dma = dma_map_single(ctx->dev, key, *keylen, DMA_BIDIRECTIONAL); |
3238 | if (dma_mapping_error(dev: ctx->dev, dma_addr: key_dma)) { |
3239 | dev_err(ctx->dev, "unable to map key memory\n" ); |
3240 | goto err_key_dma; |
3241 | } |
3242 | |
3243 | desc = flc->sh_desc; |
3244 | |
3245 | init_sh_desc(desc, options: 0); |
3246 | |
3247 | /* descriptor to perform unkeyed hash on key_in */ |
3248 | append_operation(desc, options: ctx->adata.algtype | OP_ALG_ENCRYPT | |
3249 | OP_ALG_AS_INITFINAL); |
3250 | append_seq_fifo_load(desc, len: *keylen, FIFOLD_CLASS_CLASS2 | |
3251 | FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG); |
3252 | append_seq_store(desc, len: digestsize, LDST_CLASS_2_CCB | |
3253 | LDST_SRCDST_BYTE_CONTEXT); |
3254 | |
3255 | flc->flc[1] = cpu_to_caam32(val: desc_len(desc)); /* SDL */ |
3256 | flc_dma = dma_map_single(ctx->dev, flc, sizeof(flc->flc) + |
3257 | desc_bytes(desc), DMA_TO_DEVICE); |
3258 | if (dma_mapping_error(dev: ctx->dev, dma_addr: flc_dma)) { |
3259 | dev_err(ctx->dev, "unable to map shared descriptor\n" ); |
3260 | goto err_flc_dma; |
3261 | } |
3262 | |
3263 | dpaa2_fl_set_final(fle: in_fle, final: true); |
3264 | dpaa2_fl_set_format(fle: in_fle, format: dpaa2_fl_single); |
3265 | dpaa2_fl_set_addr(fle: in_fle, addr: key_dma); |
3266 | dpaa2_fl_set_len(fle: in_fle, len: *keylen); |
3267 | dpaa2_fl_set_format(fle: out_fle, format: dpaa2_fl_single); |
3268 | dpaa2_fl_set_addr(fle: out_fle, addr: key_dma); |
3269 | dpaa2_fl_set_len(fle: out_fle, len: digestsize); |
3270 | |
3271 | print_hex_dump_debug("key_in@" __stringify(__LINE__)": " , |
3272 | DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1); |
3273 | print_hex_dump_debug("shdesc@" __stringify(__LINE__)": " , |
3274 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), |
3275 | 1); |
3276 | |
3277 | result.err = 0; |
3278 | init_completion(x: &result.completion); |
3279 | result.dev = ctx->dev; |
3280 | |
3281 | req_ctx->flc = flc; |
3282 | req_ctx->flc_dma = flc_dma; |
3283 | req_ctx->cbk = split_key_sh_done; |
3284 | req_ctx->ctx = &result; |
3285 | |
3286 | ret = dpaa2_caam_enqueue(dev: ctx->dev, req: req_ctx); |
3287 | if (ret == -EINPROGRESS) { |
3288 | /* in progress */ |
3289 | wait_for_completion(&result.completion); |
3290 | ret = result.err; |
3291 | print_hex_dump_debug("digested key@" __stringify(__LINE__)": " , |
3292 | DUMP_PREFIX_ADDRESS, 16, 4, key, |
3293 | digestsize, 1); |
3294 | } |
3295 | |
3296 | dma_unmap_single(ctx->dev, flc_dma, sizeof(flc->flc) + desc_bytes(desc), |
3297 | DMA_TO_DEVICE); |
3298 | err_flc_dma: |
3299 | dma_unmap_single(ctx->dev, key_dma, *keylen, DMA_BIDIRECTIONAL); |
3300 | err_key_dma: |
3301 | kfree(objp: flc); |
3302 | err_flc: |
3303 | kfree(objp: req_ctx); |
3304 | |
3305 | *keylen = digestsize; |
3306 | |
3307 | return ret; |
3308 | } |
3309 | |
3310 | static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key, |
3311 | unsigned int keylen) |
3312 | { |
3313 | struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm: ahash); |
3314 | unsigned int blocksize = crypto_tfm_alg_blocksize(tfm: &ahash->base); |
3315 | unsigned int digestsize = crypto_ahash_digestsize(tfm: ahash); |
3316 | int ret; |
3317 | u8 *hashed_key = NULL; |
3318 | |
3319 | dev_dbg(ctx->dev, "keylen %d blocksize %d\n" , keylen, blocksize); |
3320 | |
3321 | if (keylen > blocksize) { |
3322 | unsigned int aligned_len = |
3323 | ALIGN(keylen, dma_get_cache_alignment()); |
3324 | |
3325 | if (aligned_len < keylen) |
3326 | return -EOVERFLOW; |
3327 | |
3328 | hashed_key = kmemdup(p: key, size: aligned_len, GFP_KERNEL); |
3329 | if (!hashed_key) |
3330 | return -ENOMEM; |
3331 | ret = hash_digest_key(ctx, keylen: &keylen, key: hashed_key, digestsize); |
3332 | if (ret) |
3333 | goto bad_free_key; |
3334 | key = hashed_key; |
3335 | } |
3336 | |
3337 | ctx->adata.keylen = keylen; |
3338 | ctx->adata.keylen_pad = split_key_len(hash: ctx->adata.algtype & |
3339 | OP_ALG_ALGSEL_MASK); |
3340 | if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE) |
3341 | goto bad_free_key; |
3342 | |
3343 | ctx->adata.key_virt = key; |
3344 | ctx->adata.key_inline = true; |
3345 | |
3346 | /* |
3347 | * In case |user key| > |derived key|, using DKP<imm,imm> would result |
3348 | * in invalid opcodes (last bytes of user key) in the resulting |
3349 | * descriptor. Use DKP<ptr,imm> instead => both virtual and dma key |
3350 | * addresses are needed. |
3351 | */ |
3352 | if (keylen > ctx->adata.keylen_pad) { |
3353 | memcpy(ctx->key, key, keylen); |
3354 | dma_sync_single_for_device(dev: ctx->dev, addr: ctx->adata.key_dma, |
3355 | size: ctx->adata.keylen_pad, |
3356 | dir: DMA_TO_DEVICE); |
3357 | } |
3358 | |
3359 | ret = ahash_set_sh_desc(ahash); |
3360 | kfree(objp: hashed_key); |
3361 | return ret; |
3362 | bad_free_key: |
3363 | kfree(objp: hashed_key); |
3364 | return -EINVAL; |
3365 | } |
3366 | |
3367 | static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc, |
3368 | struct ahash_request *req) |
3369 | { |
3370 | struct caam_hash_state *state = ahash_request_ctx_dma(req); |
3371 | |
3372 | if (edesc->src_nents) |
3373 | dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE); |
3374 | |
3375 | if (edesc->qm_sg_bytes) |
3376 | dma_unmap_single(dev, edesc->qm_sg_dma, edesc->qm_sg_bytes, |
3377 | DMA_TO_DEVICE); |
3378 | |
3379 | if (state->buf_dma) { |
3380 | dma_unmap_single(dev, state->buf_dma, state->buflen, |
3381 | DMA_TO_DEVICE); |
3382 | state->buf_dma = 0; |
3383 | } |
3384 | } |
3385 | |
3386 | static inline void ahash_unmap_ctx(struct device *dev, |
3387 | struct ahash_edesc *edesc, |
3388 | struct ahash_request *req, u32 flag) |
3389 | { |
3390 | struct caam_hash_state *state = ahash_request_ctx_dma(req); |
3391 | |
3392 | if (state->ctx_dma) { |
3393 | dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag); |
3394 | state->ctx_dma = 0; |
3395 | } |
3396 | ahash_unmap(dev, edesc, req); |
3397 | } |
3398 | |
3399 | static void ahash_done(void *cbk_ctx, u32 status) |
3400 | { |
3401 | struct crypto_async_request *areq = cbk_ctx; |
3402 | struct ahash_request *req = ahash_request_cast(req: areq); |
3403 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); |
3404 | struct caam_hash_state *state = ahash_request_ctx_dma(req); |
3405 | struct ahash_edesc *edesc = state->caam_req.edesc; |
3406 | struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm: ahash); |
3407 | int digestsize = crypto_ahash_digestsize(tfm: ahash); |
3408 | int ecode = 0; |
3409 | |
3410 | dev_dbg(ctx->dev, "%s %d: err 0x%x\n" , __func__, __LINE__, status); |
3411 | |
3412 | if (unlikely(status)) |
3413 | ecode = caam_qi2_strstatus(ctx->dev, status); |
3414 | |
3415 | ahash_unmap_ctx(dev: ctx->dev, edesc, req, flag: DMA_FROM_DEVICE); |
3416 | memcpy(req->result, state->caam_ctx, digestsize); |
3417 | qi_cache_free(obj: edesc); |
3418 | |
3419 | print_hex_dump_debug("ctx@" __stringify(__LINE__)": " , |
3420 | DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, |
3421 | ctx->ctx_len, 1); |
3422 | |
3423 | ahash_request_complete(req, err: ecode); |
3424 | } |
3425 | |
3426 | static void ahash_done_bi(void *cbk_ctx, u32 status) |
3427 | { |
3428 | struct crypto_async_request *areq = cbk_ctx; |
3429 | struct ahash_request *req = ahash_request_cast(req: areq); |
3430 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); |
3431 | struct caam_hash_state *state = ahash_request_ctx_dma(req); |
3432 | struct ahash_edesc *edesc = state->caam_req.edesc; |
3433 | struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm: ahash); |
3434 | int ecode = 0; |
3435 | |
3436 | dev_dbg(ctx->dev, "%s %d: err 0x%x\n" , __func__, __LINE__, status); |
3437 | |
3438 | if (unlikely(status)) |
3439 | ecode = caam_qi2_strstatus(ctx->dev, status); |
3440 | |
3441 | ahash_unmap_ctx(dev: ctx->dev, edesc, req, flag: DMA_BIDIRECTIONAL); |
3442 | qi_cache_free(obj: edesc); |
3443 | |
3444 | scatterwalk_map_and_copy(buf: state->buf, sg: req->src, |
3445 | start: req->nbytes - state->next_buflen, |
3446 | nbytes: state->next_buflen, out: 0); |
3447 | state->buflen = state->next_buflen; |
3448 | |
3449 | print_hex_dump_debug("buf@" __stringify(__LINE__)": " , |
3450 | DUMP_PREFIX_ADDRESS, 16, 4, state->buf, |
3451 | state->buflen, 1); |
3452 | |
3453 | print_hex_dump_debug("ctx@" __stringify(__LINE__)": " , |
3454 | DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, |
3455 | ctx->ctx_len, 1); |
3456 | if (req->result) |
3457 | print_hex_dump_debug("result@" __stringify(__LINE__)": " , |
3458 | DUMP_PREFIX_ADDRESS, 16, 4, req->result, |
3459 | crypto_ahash_digestsize(ahash), 1); |
3460 | |
3461 | ahash_request_complete(req, err: ecode); |
3462 | } |
3463 | |
3464 | static void ahash_done_ctx_src(void *cbk_ctx, u32 status) |
3465 | { |
3466 | struct crypto_async_request *areq = cbk_ctx; |
3467 | struct ahash_request *req = ahash_request_cast(req: areq); |
3468 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); |
3469 | struct caam_hash_state *state = ahash_request_ctx_dma(req); |
3470 | struct ahash_edesc *edesc = state->caam_req.edesc; |
3471 | struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm: ahash); |
3472 | int digestsize = crypto_ahash_digestsize(tfm: ahash); |
3473 | int ecode = 0; |
3474 | |
3475 | dev_dbg(ctx->dev, "%s %d: err 0x%x\n" , __func__, __LINE__, status); |
3476 | |
3477 | if (unlikely(status)) |
3478 | ecode = caam_qi2_strstatus(ctx->dev, status); |
3479 | |
3480 | ahash_unmap_ctx(dev: ctx->dev, edesc, req, flag: DMA_BIDIRECTIONAL); |
3481 | memcpy(req->result, state->caam_ctx, digestsize); |
3482 | qi_cache_free(obj: edesc); |
3483 | |
3484 | print_hex_dump_debug("ctx@" __stringify(__LINE__)": " , |
3485 | DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, |
3486 | ctx->ctx_len, 1); |
3487 | |
3488 | ahash_request_complete(req, err: ecode); |
3489 | } |
3490 | |
3491 | static void ahash_done_ctx_dst(void *cbk_ctx, u32 status) |
3492 | { |
3493 | struct crypto_async_request *areq = cbk_ctx; |
3494 | struct ahash_request *req = ahash_request_cast(req: areq); |
3495 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); |
3496 | struct caam_hash_state *state = ahash_request_ctx_dma(req); |
3497 | struct ahash_edesc *edesc = state->caam_req.edesc; |
3498 | struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm: ahash); |
3499 | int ecode = 0; |
3500 | |
3501 | dev_dbg(ctx->dev, "%s %d: err 0x%x\n" , __func__, __LINE__, status); |
3502 | |
3503 | if (unlikely(status)) |
3504 | ecode = caam_qi2_strstatus(ctx->dev, status); |
3505 | |
3506 | ahash_unmap_ctx(dev: ctx->dev, edesc, req, flag: DMA_FROM_DEVICE); |
3507 | qi_cache_free(obj: edesc); |
3508 | |
3509 | scatterwalk_map_and_copy(buf: state->buf, sg: req->src, |
3510 | start: req->nbytes - state->next_buflen, |
3511 | nbytes: state->next_buflen, out: 0); |
3512 | state->buflen = state->next_buflen; |
3513 | |
3514 | print_hex_dump_debug("buf@" __stringify(__LINE__)": " , |
3515 | DUMP_PREFIX_ADDRESS, 16, 4, state->buf, |
3516 | state->buflen, 1); |
3517 | |
3518 | print_hex_dump_debug("ctx@" __stringify(__LINE__)": " , |
3519 | DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, |
3520 | ctx->ctx_len, 1); |
3521 | if (req->result) |
3522 | print_hex_dump_debug("result@" __stringify(__LINE__)": " , |
3523 | DUMP_PREFIX_ADDRESS, 16, 4, req->result, |
3524 | crypto_ahash_digestsize(ahash), 1); |
3525 | |
3526 | ahash_request_complete(req, err: ecode); |
3527 | } |
3528 | |
3529 | static int ahash_update_ctx(struct ahash_request *req) |
3530 | { |
3531 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); |
3532 | struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm: ahash); |
3533 | struct caam_hash_state *state = ahash_request_ctx_dma(req); |
3534 | struct caam_request *req_ctx = &state->caam_req; |
3535 | struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; |
3536 | struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; |
3537 | gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? |
3538 | GFP_KERNEL : GFP_ATOMIC; |
3539 | u8 *buf = state->buf; |
3540 | int *buflen = &state->buflen; |
3541 | int *next_buflen = &state->next_buflen; |
3542 | int in_len = *buflen + req->nbytes, to_hash; |
3543 | int src_nents, mapped_nents, qm_sg_bytes, qm_sg_src_index; |
3544 | struct ahash_edesc *edesc; |
3545 | int ret = 0; |
3546 | |
3547 | *next_buflen = in_len & (crypto_tfm_alg_blocksize(tfm: &ahash->base) - 1); |
3548 | to_hash = in_len - *next_buflen; |
3549 | |
3550 | if (to_hash) { |
3551 | struct dpaa2_sg_entry *sg_table; |
3552 | int src_len = req->nbytes - *next_buflen; |
3553 | |
3554 | src_nents = sg_nents_for_len(sg: req->src, len: src_len); |
3555 | if (src_nents < 0) { |
3556 | dev_err(ctx->dev, "Invalid number of src SG.\n" ); |
3557 | return src_nents; |
3558 | } |
3559 | |
3560 | if (src_nents) { |
3561 | mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents, |
3562 | DMA_TO_DEVICE); |
3563 | if (!mapped_nents) { |
3564 | dev_err(ctx->dev, "unable to DMA map source\n" ); |
3565 | return -ENOMEM; |
3566 | } |
3567 | } else { |
3568 | mapped_nents = 0; |
3569 | } |
3570 | |
3571 | /* allocate space for base edesc and link tables */ |
3572 | edesc = qi_cache_zalloc(flags); |
3573 | if (!edesc) { |
3574 | dma_unmap_sg(ctx->dev, req->src, src_nents, |
3575 | DMA_TO_DEVICE); |
3576 | return -ENOMEM; |
3577 | } |
3578 | |
3579 | edesc->src_nents = src_nents; |
3580 | qm_sg_src_index = 1 + (*buflen ? 1 : 0); |
3581 | qm_sg_bytes = pad_sg_nents(sg_nents: qm_sg_src_index + mapped_nents) * |
3582 | sizeof(*sg_table); |
3583 | sg_table = &edesc->sgt[0]; |
3584 | |
3585 | ret = ctx_map_to_qm_sg(dev: ctx->dev, state, ctx_len: ctx->ctx_len, qm_sg: sg_table, |
3586 | flag: DMA_BIDIRECTIONAL); |
3587 | if (ret) |
3588 | goto unmap_ctx; |
3589 | |
3590 | ret = buf_map_to_qm_sg(dev: ctx->dev, qm_sg: sg_table + 1, state); |
3591 | if (ret) |
3592 | goto unmap_ctx; |
3593 | |
3594 | if (mapped_nents) { |
3595 | sg_to_qm_sg_last(sg: req->src, len: src_len, |
3596 | qm_sg_ptr: sg_table + qm_sg_src_index, offset: 0); |
3597 | } else { |
3598 | dpaa2_sg_set_final(sg: sg_table + qm_sg_src_index - 1, |
3599 | final: true); |
3600 | } |
3601 | |
3602 | edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, |
3603 | qm_sg_bytes, DMA_TO_DEVICE); |
3604 | if (dma_mapping_error(dev: ctx->dev, dma_addr: edesc->qm_sg_dma)) { |
3605 | dev_err(ctx->dev, "unable to map S/G table\n" ); |
3606 | ret = -ENOMEM; |
3607 | goto unmap_ctx; |
3608 | } |
3609 | edesc->qm_sg_bytes = qm_sg_bytes; |
3610 | |
3611 | memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); |
3612 | dpaa2_fl_set_final(fle: in_fle, final: true); |
3613 | dpaa2_fl_set_format(fle: in_fle, format: dpaa2_fl_sg); |
3614 | dpaa2_fl_set_addr(fle: in_fle, addr: edesc->qm_sg_dma); |
3615 | dpaa2_fl_set_len(fle: in_fle, len: ctx->ctx_len + to_hash); |
3616 | dpaa2_fl_set_format(fle: out_fle, format: dpaa2_fl_single); |
3617 | dpaa2_fl_set_addr(fle: out_fle, addr: state->ctx_dma); |
3618 | dpaa2_fl_set_len(fle: out_fle, len: ctx->ctx_len); |
3619 | |
3620 | req_ctx->flc = &ctx->flc[UPDATE]; |
3621 | req_ctx->flc_dma = ctx->flc_dma[UPDATE]; |
3622 | req_ctx->cbk = ahash_done_bi; |
3623 | req_ctx->ctx = &req->base; |
3624 | req_ctx->edesc = edesc; |
3625 | |
3626 | ret = dpaa2_caam_enqueue(dev: ctx->dev, req: req_ctx); |
3627 | if (ret != -EINPROGRESS && |
3628 | !(ret == -EBUSY && |
3629 | req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) |
3630 | goto unmap_ctx; |
3631 | } else if (*next_buflen) { |
3632 | scatterwalk_map_and_copy(buf: buf + *buflen, sg: req->src, start: 0, |
3633 | nbytes: req->nbytes, out: 0); |
3634 | *buflen = *next_buflen; |
3635 | |
3636 | print_hex_dump_debug("buf@" __stringify(__LINE__)": " , |
3637 | DUMP_PREFIX_ADDRESS, 16, 4, buf, |
3638 | *buflen, 1); |
3639 | } |
3640 | |
3641 | return ret; |
3642 | unmap_ctx: |
3643 | ahash_unmap_ctx(dev: ctx->dev, edesc, req, flag: DMA_BIDIRECTIONAL); |
3644 | qi_cache_free(obj: edesc); |
3645 | return ret; |
3646 | } |
3647 | |
3648 | static int ahash_final_ctx(struct ahash_request *req) |
3649 | { |
3650 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); |
3651 | struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm: ahash); |
3652 | struct caam_hash_state *state = ahash_request_ctx_dma(req); |
3653 | struct caam_request *req_ctx = &state->caam_req; |
3654 | struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; |
3655 | struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; |
3656 | gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? |
3657 | GFP_KERNEL : GFP_ATOMIC; |
3658 | int buflen = state->buflen; |
3659 | int qm_sg_bytes; |
3660 | int digestsize = crypto_ahash_digestsize(tfm: ahash); |
3661 | struct ahash_edesc *edesc; |
3662 | struct dpaa2_sg_entry *sg_table; |
3663 | int ret; |
3664 | |
3665 | /* allocate space for base edesc and link tables */ |
3666 | edesc = qi_cache_zalloc(flags); |
3667 | if (!edesc) |
3668 | return -ENOMEM; |
3669 | |
3670 | qm_sg_bytes = pad_sg_nents(sg_nents: 1 + (buflen ? 1 : 0)) * sizeof(*sg_table); |
3671 | sg_table = &edesc->sgt[0]; |
3672 | |
3673 | ret = ctx_map_to_qm_sg(dev: ctx->dev, state, ctx_len: ctx->ctx_len, qm_sg: sg_table, |
3674 | flag: DMA_BIDIRECTIONAL); |
3675 | if (ret) |
3676 | goto unmap_ctx; |
3677 | |
3678 | ret = buf_map_to_qm_sg(dev: ctx->dev, qm_sg: sg_table + 1, state); |
3679 | if (ret) |
3680 | goto unmap_ctx; |
3681 | |
3682 | dpaa2_sg_set_final(sg: sg_table + (buflen ? 1 : 0), final: true); |
3683 | |
3684 | edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes, |
3685 | DMA_TO_DEVICE); |
3686 | if (dma_mapping_error(dev: ctx->dev, dma_addr: edesc->qm_sg_dma)) { |
3687 | dev_err(ctx->dev, "unable to map S/G table\n" ); |
3688 | ret = -ENOMEM; |
3689 | goto unmap_ctx; |
3690 | } |
3691 | edesc->qm_sg_bytes = qm_sg_bytes; |
3692 | |
3693 | memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); |
3694 | dpaa2_fl_set_final(fle: in_fle, final: true); |
3695 | dpaa2_fl_set_format(fle: in_fle, format: dpaa2_fl_sg); |
3696 | dpaa2_fl_set_addr(fle: in_fle, addr: edesc->qm_sg_dma); |
3697 | dpaa2_fl_set_len(fle: in_fle, len: ctx->ctx_len + buflen); |
3698 | dpaa2_fl_set_format(fle: out_fle, format: dpaa2_fl_single); |
3699 | dpaa2_fl_set_addr(fle: out_fle, addr: state->ctx_dma); |
3700 | dpaa2_fl_set_len(fle: out_fle, len: digestsize); |
3701 | |
3702 | req_ctx->flc = &ctx->flc[FINALIZE]; |
3703 | req_ctx->flc_dma = ctx->flc_dma[FINALIZE]; |
3704 | req_ctx->cbk = ahash_done_ctx_src; |
3705 | req_ctx->ctx = &req->base; |
3706 | req_ctx->edesc = edesc; |
3707 | |
3708 | ret = dpaa2_caam_enqueue(dev: ctx->dev, req: req_ctx); |
3709 | if (ret == -EINPROGRESS || |
3710 | (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) |
3711 | return ret; |
3712 | |
3713 | unmap_ctx: |
3714 | ahash_unmap_ctx(dev: ctx->dev, edesc, req, flag: DMA_BIDIRECTIONAL); |
3715 | qi_cache_free(obj: edesc); |
3716 | return ret; |
3717 | } |
3718 | |
3719 | static int ahash_finup_ctx(struct ahash_request *req) |
3720 | { |
3721 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); |
3722 | struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm: ahash); |
3723 | struct caam_hash_state *state = ahash_request_ctx_dma(req); |
3724 | struct caam_request *req_ctx = &state->caam_req; |
3725 | struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; |
3726 | struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; |
3727 | gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? |
3728 | GFP_KERNEL : GFP_ATOMIC; |
3729 | int buflen = state->buflen; |
3730 | int qm_sg_bytes, qm_sg_src_index; |
3731 | int src_nents, mapped_nents; |
3732 | int digestsize = crypto_ahash_digestsize(tfm: ahash); |
3733 | struct ahash_edesc *edesc; |
3734 | struct dpaa2_sg_entry *sg_table; |
3735 | int ret; |
3736 | |
3737 | src_nents = sg_nents_for_len(sg: req->src, len: req->nbytes); |
3738 | if (src_nents < 0) { |
3739 | dev_err(ctx->dev, "Invalid number of src SG.\n" ); |
3740 | return src_nents; |
3741 | } |
3742 | |
3743 | if (src_nents) { |
3744 | mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents, |
3745 | DMA_TO_DEVICE); |
3746 | if (!mapped_nents) { |
3747 | dev_err(ctx->dev, "unable to DMA map source\n" ); |
3748 | return -ENOMEM; |
3749 | } |
3750 | } else { |
3751 | mapped_nents = 0; |
3752 | } |
3753 | |
3754 | /* allocate space for base edesc and link tables */ |
3755 | edesc = qi_cache_zalloc(flags); |
3756 | if (!edesc) { |
3757 | dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE); |
3758 | return -ENOMEM; |
3759 | } |
3760 | |
3761 | edesc->src_nents = src_nents; |
3762 | qm_sg_src_index = 1 + (buflen ? 1 : 0); |
3763 | qm_sg_bytes = pad_sg_nents(sg_nents: qm_sg_src_index + mapped_nents) * |
3764 | sizeof(*sg_table); |
3765 | sg_table = &edesc->sgt[0]; |
3766 | |
3767 | ret = ctx_map_to_qm_sg(dev: ctx->dev, state, ctx_len: ctx->ctx_len, qm_sg: sg_table, |
3768 | flag: DMA_BIDIRECTIONAL); |
3769 | if (ret) |
3770 | goto unmap_ctx; |
3771 | |
3772 | ret = buf_map_to_qm_sg(dev: ctx->dev, qm_sg: sg_table + 1, state); |
3773 | if (ret) |
3774 | goto unmap_ctx; |
3775 | |
3776 | sg_to_qm_sg_last(sg: req->src, len: req->nbytes, qm_sg_ptr: sg_table + qm_sg_src_index, offset: 0); |
3777 | |
3778 | edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes, |
3779 | DMA_TO_DEVICE); |
3780 | if (dma_mapping_error(dev: ctx->dev, dma_addr: edesc->qm_sg_dma)) { |
3781 | dev_err(ctx->dev, "unable to map S/G table\n" ); |
3782 | ret = -ENOMEM; |
3783 | goto unmap_ctx; |
3784 | } |
3785 | edesc->qm_sg_bytes = qm_sg_bytes; |
3786 | |
3787 | memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); |
3788 | dpaa2_fl_set_final(fle: in_fle, final: true); |
3789 | dpaa2_fl_set_format(fle: in_fle, format: dpaa2_fl_sg); |
3790 | dpaa2_fl_set_addr(fle: in_fle, addr: edesc->qm_sg_dma); |
3791 | dpaa2_fl_set_len(fle: in_fle, len: ctx->ctx_len + buflen + req->nbytes); |
3792 | dpaa2_fl_set_format(fle: out_fle, format: dpaa2_fl_single); |
3793 | dpaa2_fl_set_addr(fle: out_fle, addr: state->ctx_dma); |
3794 | dpaa2_fl_set_len(fle: out_fle, len: digestsize); |
3795 | |
3796 | req_ctx->flc = &ctx->flc[FINALIZE]; |
3797 | req_ctx->flc_dma = ctx->flc_dma[FINALIZE]; |
3798 | req_ctx->cbk = ahash_done_ctx_src; |
3799 | req_ctx->ctx = &req->base; |
3800 | req_ctx->edesc = edesc; |
3801 | |
3802 | ret = dpaa2_caam_enqueue(dev: ctx->dev, req: req_ctx); |
3803 | if (ret == -EINPROGRESS || |
3804 | (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) |
3805 | return ret; |
3806 | |
3807 | unmap_ctx: |
3808 | ahash_unmap_ctx(dev: ctx->dev, edesc, req, flag: DMA_BIDIRECTIONAL); |
3809 | qi_cache_free(obj: edesc); |
3810 | return ret; |
3811 | } |
3812 | |
3813 | static int ahash_digest(struct ahash_request *req) |
3814 | { |
3815 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); |
3816 | struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm: ahash); |
3817 | struct caam_hash_state *state = ahash_request_ctx_dma(req); |
3818 | struct caam_request *req_ctx = &state->caam_req; |
3819 | struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; |
3820 | struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; |
3821 | gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? |
3822 | GFP_KERNEL : GFP_ATOMIC; |
3823 | int digestsize = crypto_ahash_digestsize(tfm: ahash); |
3824 | int src_nents, mapped_nents; |
3825 | struct ahash_edesc *edesc; |
3826 | int ret = -ENOMEM; |
3827 | |
3828 | state->buf_dma = 0; |
3829 | |
3830 | src_nents = sg_nents_for_len(sg: req->src, len: req->nbytes); |
3831 | if (src_nents < 0) { |
3832 | dev_err(ctx->dev, "Invalid number of src SG.\n" ); |
3833 | return src_nents; |
3834 | } |
3835 | |
3836 | if (src_nents) { |
3837 | mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents, |
3838 | DMA_TO_DEVICE); |
3839 | if (!mapped_nents) { |
3840 | dev_err(ctx->dev, "unable to map source for DMA\n" ); |
3841 | return ret; |
3842 | } |
3843 | } else { |
3844 | mapped_nents = 0; |
3845 | } |
3846 | |
3847 | /* allocate space for base edesc and link tables */ |
3848 | edesc = qi_cache_zalloc(flags); |
3849 | if (!edesc) { |
3850 | dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE); |
3851 | return ret; |
3852 | } |
3853 | |
3854 | edesc->src_nents = src_nents; |
3855 | memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); |
3856 | |
3857 | if (mapped_nents > 1) { |
3858 | int qm_sg_bytes; |
3859 | struct dpaa2_sg_entry *sg_table = &edesc->sgt[0]; |
3860 | |
3861 | qm_sg_bytes = pad_sg_nents(sg_nents: mapped_nents) * sizeof(*sg_table); |
3862 | sg_to_qm_sg_last(sg: req->src, len: req->nbytes, qm_sg_ptr: sg_table, offset: 0); |
3863 | edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, |
3864 | qm_sg_bytes, DMA_TO_DEVICE); |
3865 | if (dma_mapping_error(dev: ctx->dev, dma_addr: edesc->qm_sg_dma)) { |
3866 | dev_err(ctx->dev, "unable to map S/G table\n" ); |
3867 | goto unmap; |
3868 | } |
3869 | edesc->qm_sg_bytes = qm_sg_bytes; |
3870 | dpaa2_fl_set_format(fle: in_fle, format: dpaa2_fl_sg); |
3871 | dpaa2_fl_set_addr(fle: in_fle, addr: edesc->qm_sg_dma); |
3872 | } else { |
3873 | dpaa2_fl_set_format(fle: in_fle, format: dpaa2_fl_single); |
3874 | dpaa2_fl_set_addr(fle: in_fle, sg_dma_address(req->src)); |
3875 | } |
3876 | |
3877 | state->ctx_dma_len = digestsize; |
3878 | state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize, |
3879 | DMA_FROM_DEVICE); |
3880 | if (dma_mapping_error(dev: ctx->dev, dma_addr: state->ctx_dma)) { |
3881 | dev_err(ctx->dev, "unable to map ctx\n" ); |
3882 | state->ctx_dma = 0; |
3883 | goto unmap; |
3884 | } |
3885 | |
3886 | dpaa2_fl_set_final(fle: in_fle, final: true); |
3887 | dpaa2_fl_set_len(fle: in_fle, len: req->nbytes); |
3888 | dpaa2_fl_set_format(fle: out_fle, format: dpaa2_fl_single); |
3889 | dpaa2_fl_set_addr(fle: out_fle, addr: state->ctx_dma); |
3890 | dpaa2_fl_set_len(fle: out_fle, len: digestsize); |
3891 | |
3892 | req_ctx->flc = &ctx->flc[DIGEST]; |
3893 | req_ctx->flc_dma = ctx->flc_dma[DIGEST]; |
3894 | req_ctx->cbk = ahash_done; |
3895 | req_ctx->ctx = &req->base; |
3896 | req_ctx->edesc = edesc; |
3897 | ret = dpaa2_caam_enqueue(dev: ctx->dev, req: req_ctx); |
3898 | if (ret == -EINPROGRESS || |
3899 | (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) |
3900 | return ret; |
3901 | |
3902 | unmap: |
3903 | ahash_unmap_ctx(dev: ctx->dev, edesc, req, flag: DMA_FROM_DEVICE); |
3904 | qi_cache_free(obj: edesc); |
3905 | return ret; |
3906 | } |
3907 | |
3908 | static int ahash_final_no_ctx(struct ahash_request *req) |
3909 | { |
3910 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); |
3911 | struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm: ahash); |
3912 | struct caam_hash_state *state = ahash_request_ctx_dma(req); |
3913 | struct caam_request *req_ctx = &state->caam_req; |
3914 | struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; |
3915 | struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; |
3916 | gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? |
3917 | GFP_KERNEL : GFP_ATOMIC; |
3918 | u8 *buf = state->buf; |
3919 | int buflen = state->buflen; |
3920 | int digestsize = crypto_ahash_digestsize(tfm: ahash); |
3921 | struct ahash_edesc *edesc; |
3922 | int ret = -ENOMEM; |
3923 | |
3924 | /* allocate space for base edesc and link tables */ |
3925 | edesc = qi_cache_zalloc(flags); |
3926 | if (!edesc) |
3927 | return ret; |
3928 | |
3929 | if (buflen) { |
3930 | state->buf_dma = dma_map_single(ctx->dev, buf, buflen, |
3931 | DMA_TO_DEVICE); |
3932 | if (dma_mapping_error(dev: ctx->dev, dma_addr: state->buf_dma)) { |
3933 | dev_err(ctx->dev, "unable to map src\n" ); |
3934 | goto unmap; |
3935 | } |
3936 | } |
3937 | |
3938 | state->ctx_dma_len = digestsize; |
3939 | state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize, |
3940 | DMA_FROM_DEVICE); |
3941 | if (dma_mapping_error(dev: ctx->dev, dma_addr: state->ctx_dma)) { |
3942 | dev_err(ctx->dev, "unable to map ctx\n" ); |
3943 | state->ctx_dma = 0; |
3944 | goto unmap; |
3945 | } |
3946 | |
3947 | memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); |
3948 | dpaa2_fl_set_final(fle: in_fle, final: true); |
3949 | /* |
3950 | * crypto engine requires the input entry to be present when |
3951 | * "frame list" FD is used. |
3952 | * Since engine does not support FMT=2'b11 (unused entry type), leaving |
3953 | * in_fle zeroized (except for "Final" flag) is the best option. |
3954 | */ |
3955 | if (buflen) { |
3956 | dpaa2_fl_set_format(fle: in_fle, format: dpaa2_fl_single); |
3957 | dpaa2_fl_set_addr(fle: in_fle, addr: state->buf_dma); |
3958 | dpaa2_fl_set_len(fle: in_fle, len: buflen); |
3959 | } |
3960 | dpaa2_fl_set_format(fle: out_fle, format: dpaa2_fl_single); |
3961 | dpaa2_fl_set_addr(fle: out_fle, addr: state->ctx_dma); |
3962 | dpaa2_fl_set_len(fle: out_fle, len: digestsize); |
3963 | |
3964 | req_ctx->flc = &ctx->flc[DIGEST]; |
3965 | req_ctx->flc_dma = ctx->flc_dma[DIGEST]; |
3966 | req_ctx->cbk = ahash_done; |
3967 | req_ctx->ctx = &req->base; |
3968 | req_ctx->edesc = edesc; |
3969 | |
3970 | ret = dpaa2_caam_enqueue(dev: ctx->dev, req: req_ctx); |
3971 | if (ret == -EINPROGRESS || |
3972 | (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) |
3973 | return ret; |
3974 | |
3975 | unmap: |
3976 | ahash_unmap_ctx(dev: ctx->dev, edesc, req, flag: DMA_FROM_DEVICE); |
3977 | qi_cache_free(obj: edesc); |
3978 | return ret; |
3979 | } |
3980 | |
3981 | static int ahash_update_no_ctx(struct ahash_request *req) |
3982 | { |
3983 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); |
3984 | struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm: ahash); |
3985 | struct caam_hash_state *state = ahash_request_ctx_dma(req); |
3986 | struct caam_request *req_ctx = &state->caam_req; |
3987 | struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; |
3988 | struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; |
3989 | gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? |
3990 | GFP_KERNEL : GFP_ATOMIC; |
3991 | u8 *buf = state->buf; |
3992 | int *buflen = &state->buflen; |
3993 | int *next_buflen = &state->next_buflen; |
3994 | int in_len = *buflen + req->nbytes, to_hash; |
3995 | int qm_sg_bytes, src_nents, mapped_nents; |
3996 | struct ahash_edesc *edesc; |
3997 | int ret = 0; |
3998 | |
3999 | *next_buflen = in_len & (crypto_tfm_alg_blocksize(tfm: &ahash->base) - 1); |
4000 | to_hash = in_len - *next_buflen; |
4001 | |
4002 | if (to_hash) { |
4003 | struct dpaa2_sg_entry *sg_table; |
4004 | int src_len = req->nbytes - *next_buflen; |
4005 | |
4006 | src_nents = sg_nents_for_len(sg: req->src, len: src_len); |
4007 | if (src_nents < 0) { |
4008 | dev_err(ctx->dev, "Invalid number of src SG.\n" ); |
4009 | return src_nents; |
4010 | } |
4011 | |
4012 | if (src_nents) { |
4013 | mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents, |
4014 | DMA_TO_DEVICE); |
4015 | if (!mapped_nents) { |
4016 | dev_err(ctx->dev, "unable to DMA map source\n" ); |
4017 | return -ENOMEM; |
4018 | } |
4019 | } else { |
4020 | mapped_nents = 0; |
4021 | } |
4022 | |
4023 | /* allocate space for base edesc and link tables */ |
4024 | edesc = qi_cache_zalloc(flags); |
4025 | if (!edesc) { |
4026 | dma_unmap_sg(ctx->dev, req->src, src_nents, |
4027 | DMA_TO_DEVICE); |
4028 | return -ENOMEM; |
4029 | } |
4030 | |
4031 | edesc->src_nents = src_nents; |
4032 | qm_sg_bytes = pad_sg_nents(sg_nents: 1 + mapped_nents) * |
4033 | sizeof(*sg_table); |
4034 | sg_table = &edesc->sgt[0]; |
4035 | |
4036 | ret = buf_map_to_qm_sg(dev: ctx->dev, qm_sg: sg_table, state); |
4037 | if (ret) |
4038 | goto unmap_ctx; |
4039 | |
4040 | sg_to_qm_sg_last(sg: req->src, len: src_len, qm_sg_ptr: sg_table + 1, offset: 0); |
4041 | |
4042 | edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, |
4043 | qm_sg_bytes, DMA_TO_DEVICE); |
4044 | if (dma_mapping_error(dev: ctx->dev, dma_addr: edesc->qm_sg_dma)) { |
4045 | dev_err(ctx->dev, "unable to map S/G table\n" ); |
4046 | ret = -ENOMEM; |
4047 | goto unmap_ctx; |
4048 | } |
4049 | edesc->qm_sg_bytes = qm_sg_bytes; |
4050 | |
4051 | state->ctx_dma_len = ctx->ctx_len; |
4052 | state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, |
4053 | ctx->ctx_len, DMA_FROM_DEVICE); |
4054 | if (dma_mapping_error(dev: ctx->dev, dma_addr: state->ctx_dma)) { |
4055 | dev_err(ctx->dev, "unable to map ctx\n" ); |
4056 | state->ctx_dma = 0; |
4057 | ret = -ENOMEM; |
4058 | goto unmap_ctx; |
4059 | } |
4060 | |
4061 | memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); |
4062 | dpaa2_fl_set_final(fle: in_fle, final: true); |
4063 | dpaa2_fl_set_format(fle: in_fle, format: dpaa2_fl_sg); |
4064 | dpaa2_fl_set_addr(fle: in_fle, addr: edesc->qm_sg_dma); |
4065 | dpaa2_fl_set_len(fle: in_fle, len: to_hash); |
4066 | dpaa2_fl_set_format(fle: out_fle, format: dpaa2_fl_single); |
4067 | dpaa2_fl_set_addr(fle: out_fle, addr: state->ctx_dma); |
4068 | dpaa2_fl_set_len(fle: out_fle, len: ctx->ctx_len); |
4069 | |
4070 | req_ctx->flc = &ctx->flc[UPDATE_FIRST]; |
4071 | req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST]; |
4072 | req_ctx->cbk = ahash_done_ctx_dst; |
4073 | req_ctx->ctx = &req->base; |
4074 | req_ctx->edesc = edesc; |
4075 | |
4076 | ret = dpaa2_caam_enqueue(dev: ctx->dev, req: req_ctx); |
4077 | if (ret != -EINPROGRESS && |
4078 | !(ret == -EBUSY && |
4079 | req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) |
4080 | goto unmap_ctx; |
4081 | |
4082 | state->update = ahash_update_ctx; |
4083 | state->finup = ahash_finup_ctx; |
4084 | state->final = ahash_final_ctx; |
4085 | } else if (*next_buflen) { |
4086 | scatterwalk_map_and_copy(buf: buf + *buflen, sg: req->src, start: 0, |
4087 | nbytes: req->nbytes, out: 0); |
4088 | *buflen = *next_buflen; |
4089 | |
4090 | print_hex_dump_debug("buf@" __stringify(__LINE__)": " , |
4091 | DUMP_PREFIX_ADDRESS, 16, 4, buf, |
4092 | *buflen, 1); |
4093 | } |
4094 | |
4095 | return ret; |
4096 | unmap_ctx: |
4097 | ahash_unmap_ctx(dev: ctx->dev, edesc, req, flag: DMA_TO_DEVICE); |
4098 | qi_cache_free(obj: edesc); |
4099 | return ret; |
4100 | } |
4101 | |
4102 | static int ahash_finup_no_ctx(struct ahash_request *req) |
4103 | { |
4104 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); |
4105 | struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm: ahash); |
4106 | struct caam_hash_state *state = ahash_request_ctx_dma(req); |
4107 | struct caam_request *req_ctx = &state->caam_req; |
4108 | struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; |
4109 | struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; |
4110 | gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? |
4111 | GFP_KERNEL : GFP_ATOMIC; |
4112 | int buflen = state->buflen; |
4113 | int qm_sg_bytes, src_nents, mapped_nents; |
4114 | int digestsize = crypto_ahash_digestsize(tfm: ahash); |
4115 | struct ahash_edesc *edesc; |
4116 | struct dpaa2_sg_entry *sg_table; |
4117 | int ret = -ENOMEM; |
4118 | |
4119 | src_nents = sg_nents_for_len(sg: req->src, len: req->nbytes); |
4120 | if (src_nents < 0) { |
4121 | dev_err(ctx->dev, "Invalid number of src SG.\n" ); |
4122 | return src_nents; |
4123 | } |
4124 | |
4125 | if (src_nents) { |
4126 | mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents, |
4127 | DMA_TO_DEVICE); |
4128 | if (!mapped_nents) { |
4129 | dev_err(ctx->dev, "unable to DMA map source\n" ); |
4130 | return ret; |
4131 | } |
4132 | } else { |
4133 | mapped_nents = 0; |
4134 | } |
4135 | |
4136 | /* allocate space for base edesc and link tables */ |
4137 | edesc = qi_cache_zalloc(flags); |
4138 | if (!edesc) { |
4139 | dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE); |
4140 | return ret; |
4141 | } |
4142 | |
4143 | edesc->src_nents = src_nents; |
4144 | qm_sg_bytes = pad_sg_nents(sg_nents: 2 + mapped_nents) * sizeof(*sg_table); |
4145 | sg_table = &edesc->sgt[0]; |
4146 | |
4147 | ret = buf_map_to_qm_sg(dev: ctx->dev, qm_sg: sg_table, state); |
4148 | if (ret) |
4149 | goto unmap; |
4150 | |
4151 | sg_to_qm_sg_last(sg: req->src, len: req->nbytes, qm_sg_ptr: sg_table + 1, offset: 0); |
4152 | |
4153 | edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes, |
4154 | DMA_TO_DEVICE); |
4155 | if (dma_mapping_error(dev: ctx->dev, dma_addr: edesc->qm_sg_dma)) { |
4156 | dev_err(ctx->dev, "unable to map S/G table\n" ); |
4157 | ret = -ENOMEM; |
4158 | goto unmap; |
4159 | } |
4160 | edesc->qm_sg_bytes = qm_sg_bytes; |
4161 | |
4162 | state->ctx_dma_len = digestsize; |
4163 | state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize, |
4164 | DMA_FROM_DEVICE); |
4165 | if (dma_mapping_error(dev: ctx->dev, dma_addr: state->ctx_dma)) { |
4166 | dev_err(ctx->dev, "unable to map ctx\n" ); |
4167 | state->ctx_dma = 0; |
4168 | ret = -ENOMEM; |
4169 | goto unmap; |
4170 | } |
4171 | |
4172 | memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); |
4173 | dpaa2_fl_set_final(fle: in_fle, final: true); |
4174 | dpaa2_fl_set_format(fle: in_fle, format: dpaa2_fl_sg); |
4175 | dpaa2_fl_set_addr(fle: in_fle, addr: edesc->qm_sg_dma); |
4176 | dpaa2_fl_set_len(fle: in_fle, len: buflen + req->nbytes); |
4177 | dpaa2_fl_set_format(fle: out_fle, format: dpaa2_fl_single); |
4178 | dpaa2_fl_set_addr(fle: out_fle, addr: state->ctx_dma); |
4179 | dpaa2_fl_set_len(fle: out_fle, len: digestsize); |
4180 | |
4181 | req_ctx->flc = &ctx->flc[DIGEST]; |
4182 | req_ctx->flc_dma = ctx->flc_dma[DIGEST]; |
4183 | req_ctx->cbk = ahash_done; |
4184 | req_ctx->ctx = &req->base; |
4185 | req_ctx->edesc = edesc; |
4186 | ret = dpaa2_caam_enqueue(dev: ctx->dev, req: req_ctx); |
4187 | if (ret != -EINPROGRESS && |
4188 | !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) |
4189 | goto unmap; |
4190 | |
4191 | return ret; |
4192 | unmap: |
4193 | ahash_unmap_ctx(dev: ctx->dev, edesc, req, flag: DMA_FROM_DEVICE); |
4194 | qi_cache_free(obj: edesc); |
4195 | return ret; |
4196 | } |
4197 | |
4198 | static int ahash_update_first(struct ahash_request *req) |
4199 | { |
4200 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); |
4201 | struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm: ahash); |
4202 | struct caam_hash_state *state = ahash_request_ctx_dma(req); |
4203 | struct caam_request *req_ctx = &state->caam_req; |
4204 | struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; |
4205 | struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; |
4206 | gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? |
4207 | GFP_KERNEL : GFP_ATOMIC; |
4208 | u8 *buf = state->buf; |
4209 | int *buflen = &state->buflen; |
4210 | int *next_buflen = &state->next_buflen; |
4211 | int to_hash; |
4212 | int src_nents, mapped_nents; |
4213 | struct ahash_edesc *edesc; |
4214 | int ret = 0; |
4215 | |
4216 | *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(tfm: &ahash->base) - |
4217 | 1); |
4218 | to_hash = req->nbytes - *next_buflen; |
4219 | |
4220 | if (to_hash) { |
4221 | struct dpaa2_sg_entry *sg_table; |
4222 | int src_len = req->nbytes - *next_buflen; |
4223 | |
4224 | src_nents = sg_nents_for_len(sg: req->src, len: src_len); |
4225 | if (src_nents < 0) { |
4226 | dev_err(ctx->dev, "Invalid number of src SG.\n" ); |
4227 | return src_nents; |
4228 | } |
4229 | |
4230 | if (src_nents) { |
4231 | mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents, |
4232 | DMA_TO_DEVICE); |
4233 | if (!mapped_nents) { |
4234 | dev_err(ctx->dev, "unable to map source for DMA\n" ); |
4235 | return -ENOMEM; |
4236 | } |
4237 | } else { |
4238 | mapped_nents = 0; |
4239 | } |
4240 | |
4241 | /* allocate space for base edesc and link tables */ |
4242 | edesc = qi_cache_zalloc(flags); |
4243 | if (!edesc) { |
4244 | dma_unmap_sg(ctx->dev, req->src, src_nents, |
4245 | DMA_TO_DEVICE); |
4246 | return -ENOMEM; |
4247 | } |
4248 | |
4249 | edesc->src_nents = src_nents; |
4250 | sg_table = &edesc->sgt[0]; |
4251 | |
4252 | memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); |
4253 | dpaa2_fl_set_final(fle: in_fle, final: true); |
4254 | dpaa2_fl_set_len(fle: in_fle, len: to_hash); |
4255 | |
4256 | if (mapped_nents > 1) { |
4257 | int qm_sg_bytes; |
4258 | |
4259 | sg_to_qm_sg_last(sg: req->src, len: src_len, qm_sg_ptr: sg_table, offset: 0); |
4260 | qm_sg_bytes = pad_sg_nents(sg_nents: mapped_nents) * |
4261 | sizeof(*sg_table); |
4262 | edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, |
4263 | qm_sg_bytes, |
4264 | DMA_TO_DEVICE); |
4265 | if (dma_mapping_error(dev: ctx->dev, dma_addr: edesc->qm_sg_dma)) { |
4266 | dev_err(ctx->dev, "unable to map S/G table\n" ); |
4267 | ret = -ENOMEM; |
4268 | goto unmap_ctx; |
4269 | } |
4270 | edesc->qm_sg_bytes = qm_sg_bytes; |
4271 | dpaa2_fl_set_format(fle: in_fle, format: dpaa2_fl_sg); |
4272 | dpaa2_fl_set_addr(fle: in_fle, addr: edesc->qm_sg_dma); |
4273 | } else { |
4274 | dpaa2_fl_set_format(fle: in_fle, format: dpaa2_fl_single); |
4275 | dpaa2_fl_set_addr(fle: in_fle, sg_dma_address(req->src)); |
4276 | } |
4277 | |
4278 | state->ctx_dma_len = ctx->ctx_len; |
4279 | state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, |
4280 | ctx->ctx_len, DMA_FROM_DEVICE); |
4281 | if (dma_mapping_error(dev: ctx->dev, dma_addr: state->ctx_dma)) { |
4282 | dev_err(ctx->dev, "unable to map ctx\n" ); |
4283 | state->ctx_dma = 0; |
4284 | ret = -ENOMEM; |
4285 | goto unmap_ctx; |
4286 | } |
4287 | |
4288 | dpaa2_fl_set_format(fle: out_fle, format: dpaa2_fl_single); |
4289 | dpaa2_fl_set_addr(fle: out_fle, addr: state->ctx_dma); |
4290 | dpaa2_fl_set_len(fle: out_fle, len: ctx->ctx_len); |
4291 | |
4292 | req_ctx->flc = &ctx->flc[UPDATE_FIRST]; |
4293 | req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST]; |
4294 | req_ctx->cbk = ahash_done_ctx_dst; |
4295 | req_ctx->ctx = &req->base; |
4296 | req_ctx->edesc = edesc; |
4297 | |
4298 | ret = dpaa2_caam_enqueue(dev: ctx->dev, req: req_ctx); |
4299 | if (ret != -EINPROGRESS && |
4300 | !(ret == -EBUSY && req->base.flags & |
4301 | CRYPTO_TFM_REQ_MAY_BACKLOG)) |
4302 | goto unmap_ctx; |
4303 | |
4304 | state->update = ahash_update_ctx; |
4305 | state->finup = ahash_finup_ctx; |
4306 | state->final = ahash_final_ctx; |
4307 | } else if (*next_buflen) { |
4308 | state->update = ahash_update_no_ctx; |
4309 | state->finup = ahash_finup_no_ctx; |
4310 | state->final = ahash_final_no_ctx; |
4311 | scatterwalk_map_and_copy(buf, sg: req->src, start: 0, |
4312 | nbytes: req->nbytes, out: 0); |
4313 | *buflen = *next_buflen; |
4314 | |
4315 | print_hex_dump_debug("buf@" __stringify(__LINE__)": " , |
4316 | DUMP_PREFIX_ADDRESS, 16, 4, buf, |
4317 | *buflen, 1); |
4318 | } |
4319 | |
4320 | return ret; |
4321 | unmap_ctx: |
4322 | ahash_unmap_ctx(dev: ctx->dev, edesc, req, flag: DMA_TO_DEVICE); |
4323 | qi_cache_free(obj: edesc); |
4324 | return ret; |
4325 | } |
4326 | |
4327 | static int ahash_finup_first(struct ahash_request *req) |
4328 | { |
4329 | return ahash_digest(req); |
4330 | } |
4331 | |
4332 | static int ahash_init(struct ahash_request *req) |
4333 | { |
4334 | struct caam_hash_state *state = ahash_request_ctx_dma(req); |
4335 | |
4336 | state->update = ahash_update_first; |
4337 | state->finup = ahash_finup_first; |
4338 | state->final = ahash_final_no_ctx; |
4339 | |
4340 | state->ctx_dma = 0; |
4341 | state->ctx_dma_len = 0; |
4342 | state->buf_dma = 0; |
4343 | state->buflen = 0; |
4344 | state->next_buflen = 0; |
4345 | |
4346 | return 0; |
4347 | } |
4348 | |
4349 | static int ahash_update(struct ahash_request *req) |
4350 | { |
4351 | struct caam_hash_state *state = ahash_request_ctx_dma(req); |
4352 | |
4353 | return state->update(req); |
4354 | } |
4355 | |
4356 | static int ahash_finup(struct ahash_request *req) |
4357 | { |
4358 | struct caam_hash_state *state = ahash_request_ctx_dma(req); |
4359 | |
4360 | return state->finup(req); |
4361 | } |
4362 | |
4363 | static int ahash_final(struct ahash_request *req) |
4364 | { |
4365 | struct caam_hash_state *state = ahash_request_ctx_dma(req); |
4366 | |
4367 | return state->final(req); |
4368 | } |
4369 | |
4370 | static int ahash_export(struct ahash_request *req, void *out) |
4371 | { |
4372 | struct caam_hash_state *state = ahash_request_ctx_dma(req); |
4373 | struct caam_export_state *export = out; |
4374 | u8 *buf = state->buf; |
4375 | int len = state->buflen; |
4376 | |
4377 | memcpy(export->buf, buf, len); |
4378 | memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx)); |
4379 | export->buflen = len; |
4380 | export->update = state->update; |
4381 | export->final = state->final; |
4382 | export->finup = state->finup; |
4383 | |
4384 | return 0; |
4385 | } |
4386 | |
4387 | static int ahash_import(struct ahash_request *req, const void *in) |
4388 | { |
4389 | struct caam_hash_state *state = ahash_request_ctx_dma(req); |
4390 | const struct caam_export_state *export = in; |
4391 | |
4392 | memset(state, 0, sizeof(*state)); |
4393 | memcpy(state->buf, export->buf, export->buflen); |
4394 | memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx)); |
4395 | state->buflen = export->buflen; |
4396 | state->update = export->update; |
4397 | state->final = export->final; |
4398 | state->finup = export->finup; |
4399 | |
4400 | return 0; |
4401 | } |
4402 | |
4403 | struct caam_hash_template { |
4404 | char name[CRYPTO_MAX_ALG_NAME]; |
4405 | char driver_name[CRYPTO_MAX_ALG_NAME]; |
4406 | char hmac_name[CRYPTO_MAX_ALG_NAME]; |
4407 | char hmac_driver_name[CRYPTO_MAX_ALG_NAME]; |
4408 | unsigned int blocksize; |
4409 | struct ahash_alg template_ahash; |
4410 | u32 alg_type; |
4411 | }; |
4412 | |
4413 | /* ahash descriptors */ |
4414 | static struct caam_hash_template driver_hash[] = { |
4415 | { |
4416 | .name = "sha1" , |
4417 | .driver_name = "sha1-caam-qi2" , |
4418 | .hmac_name = "hmac(sha1)" , |
4419 | .hmac_driver_name = "hmac-sha1-caam-qi2" , |
4420 | .blocksize = SHA1_BLOCK_SIZE, |
4421 | .template_ahash = { |
4422 | .init = ahash_init, |
4423 | .update = ahash_update, |
4424 | .final = ahash_final, |
4425 | .finup = ahash_finup, |
4426 | .digest = ahash_digest, |
4427 | .export = ahash_export, |
4428 | .import = ahash_import, |
4429 | .setkey = ahash_setkey, |
4430 | .halg = { |
4431 | .digestsize = SHA1_DIGEST_SIZE, |
4432 | .statesize = sizeof(struct caam_export_state), |
4433 | }, |
4434 | }, |
4435 | .alg_type = OP_ALG_ALGSEL_SHA1, |
4436 | }, { |
4437 | .name = "sha224" , |
4438 | .driver_name = "sha224-caam-qi2" , |
4439 | .hmac_name = "hmac(sha224)" , |
4440 | .hmac_driver_name = "hmac-sha224-caam-qi2" , |
4441 | .blocksize = SHA224_BLOCK_SIZE, |
4442 | .template_ahash = { |
4443 | .init = ahash_init, |
4444 | .update = ahash_update, |
4445 | .final = ahash_final, |
4446 | .finup = ahash_finup, |
4447 | .digest = ahash_digest, |
4448 | .export = ahash_export, |
4449 | .import = ahash_import, |
4450 | .setkey = ahash_setkey, |
4451 | .halg = { |
4452 | .digestsize = SHA224_DIGEST_SIZE, |
4453 | .statesize = sizeof(struct caam_export_state), |
4454 | }, |
4455 | }, |
4456 | .alg_type = OP_ALG_ALGSEL_SHA224, |
4457 | }, { |
4458 | .name = "sha256" , |
4459 | .driver_name = "sha256-caam-qi2" , |
4460 | .hmac_name = "hmac(sha256)" , |
4461 | .hmac_driver_name = "hmac-sha256-caam-qi2" , |
4462 | .blocksize = SHA256_BLOCK_SIZE, |
4463 | .template_ahash = { |
4464 | .init = ahash_init, |
4465 | .update = ahash_update, |
4466 | .final = ahash_final, |
4467 | .finup = ahash_finup, |
4468 | .digest = ahash_digest, |
4469 | .export = ahash_export, |
4470 | .import = ahash_import, |
4471 | .setkey = ahash_setkey, |
4472 | .halg = { |
4473 | .digestsize = SHA256_DIGEST_SIZE, |
4474 | .statesize = sizeof(struct caam_export_state), |
4475 | }, |
4476 | }, |
4477 | .alg_type = OP_ALG_ALGSEL_SHA256, |
4478 | }, { |
4479 | .name = "sha384" , |
4480 | .driver_name = "sha384-caam-qi2" , |
4481 | .hmac_name = "hmac(sha384)" , |
4482 | .hmac_driver_name = "hmac-sha384-caam-qi2" , |
4483 | .blocksize = SHA384_BLOCK_SIZE, |
4484 | .template_ahash = { |
4485 | .init = ahash_init, |
4486 | .update = ahash_update, |
4487 | .final = ahash_final, |
4488 | .finup = ahash_finup, |
4489 | .digest = ahash_digest, |
4490 | .export = ahash_export, |
4491 | .import = ahash_import, |
4492 | .setkey = ahash_setkey, |
4493 | .halg = { |
4494 | .digestsize = SHA384_DIGEST_SIZE, |
4495 | .statesize = sizeof(struct caam_export_state), |
4496 | }, |
4497 | }, |
4498 | .alg_type = OP_ALG_ALGSEL_SHA384, |
4499 | }, { |
4500 | .name = "sha512" , |
4501 | .driver_name = "sha512-caam-qi2" , |
4502 | .hmac_name = "hmac(sha512)" , |
4503 | .hmac_driver_name = "hmac-sha512-caam-qi2" , |
4504 | .blocksize = SHA512_BLOCK_SIZE, |
4505 | .template_ahash = { |
4506 | .init = ahash_init, |
4507 | .update = ahash_update, |
4508 | .final = ahash_final, |
4509 | .finup = ahash_finup, |
4510 | .digest = ahash_digest, |
4511 | .export = ahash_export, |
4512 | .import = ahash_import, |
4513 | .setkey = ahash_setkey, |
4514 | .halg = { |
4515 | .digestsize = SHA512_DIGEST_SIZE, |
4516 | .statesize = sizeof(struct caam_export_state), |
4517 | }, |
4518 | }, |
4519 | .alg_type = OP_ALG_ALGSEL_SHA512, |
4520 | }, { |
4521 | .name = "md5" , |
4522 | .driver_name = "md5-caam-qi2" , |
4523 | .hmac_name = "hmac(md5)" , |
4524 | .hmac_driver_name = "hmac-md5-caam-qi2" , |
4525 | .blocksize = MD5_BLOCK_WORDS * 4, |
4526 | .template_ahash = { |
4527 | .init = ahash_init, |
4528 | .update = ahash_update, |
4529 | .final = ahash_final, |
4530 | .finup = ahash_finup, |
4531 | .digest = ahash_digest, |
4532 | .export = ahash_export, |
4533 | .import = ahash_import, |
4534 | .setkey = ahash_setkey, |
4535 | .halg = { |
4536 | .digestsize = MD5_DIGEST_SIZE, |
4537 | .statesize = sizeof(struct caam_export_state), |
4538 | }, |
4539 | }, |
4540 | .alg_type = OP_ALG_ALGSEL_MD5, |
4541 | } |
4542 | }; |
4543 | |
4544 | struct caam_hash_alg { |
4545 | struct list_head entry; |
4546 | struct device *dev; |
4547 | int alg_type; |
4548 | struct ahash_alg ahash_alg; |
4549 | }; |
4550 | |
4551 | static int caam_hash_cra_init(struct crypto_tfm *tfm) |
4552 | { |
4553 | struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); |
4554 | struct crypto_alg *base = tfm->__crt_alg; |
4555 | struct hash_alg_common *halg = |
4556 | container_of(base, struct hash_alg_common, base); |
4557 | struct ahash_alg *alg = |
4558 | container_of(halg, struct ahash_alg, halg); |
4559 | struct caam_hash_alg *caam_hash = |
4560 | container_of(alg, struct caam_hash_alg, ahash_alg); |
4561 | struct caam_hash_ctx *ctx = crypto_tfm_ctx_dma(tfm); |
4562 | /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */ |
4563 | static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE, |
4564 | HASH_MSG_LEN + SHA1_DIGEST_SIZE, |
4565 | HASH_MSG_LEN + 32, |
4566 | HASH_MSG_LEN + SHA256_DIGEST_SIZE, |
4567 | HASH_MSG_LEN + 64, |
4568 | HASH_MSG_LEN + SHA512_DIGEST_SIZE }; |
4569 | dma_addr_t dma_addr; |
4570 | int i; |
4571 | |
4572 | ctx->dev = caam_hash->dev; |
4573 | |
4574 | if (alg->setkey) { |
4575 | ctx->adata.key_dma = dma_map_single_attrs(dev: ctx->dev, ptr: ctx->key, |
4576 | ARRAY_SIZE(ctx->key), |
4577 | dir: DMA_TO_DEVICE, |
4578 | DMA_ATTR_SKIP_CPU_SYNC); |
4579 | if (dma_mapping_error(dev: ctx->dev, dma_addr: ctx->adata.key_dma)) { |
4580 | dev_err(ctx->dev, "unable to map key\n" ); |
4581 | return -ENOMEM; |
4582 | } |
4583 | } |
4584 | |
4585 | dma_addr = dma_map_single_attrs(dev: ctx->dev, ptr: ctx->flc, size: sizeof(ctx->flc), |
4586 | dir: DMA_BIDIRECTIONAL, |
4587 | DMA_ATTR_SKIP_CPU_SYNC); |
4588 | if (dma_mapping_error(dev: ctx->dev, dma_addr)) { |
4589 | dev_err(ctx->dev, "unable to map shared descriptors\n" ); |
4590 | if (ctx->adata.key_dma) |
4591 | dma_unmap_single_attrs(dev: ctx->dev, addr: ctx->adata.key_dma, |
4592 | ARRAY_SIZE(ctx->key), |
4593 | dir: DMA_TO_DEVICE, |
4594 | DMA_ATTR_SKIP_CPU_SYNC); |
4595 | return -ENOMEM; |
4596 | } |
4597 | |
4598 | for (i = 0; i < HASH_NUM_OP; i++) |
4599 | ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]); |
4600 | |
4601 | /* copy descriptor header template value */ |
4602 | ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type; |
4603 | |
4604 | ctx->ctx_len = runninglen[(ctx->adata.algtype & |
4605 | OP_ALG_ALGSEL_SUBMASK) >> |
4606 | OP_ALG_ALGSEL_SHIFT]; |
4607 | |
4608 | crypto_ahash_set_reqsize_dma(ahash, reqsize: sizeof(struct caam_hash_state)); |
4609 | |
4610 | /* |
4611 | * For keyed hash algorithms shared descriptors |
4612 | * will be created later in setkey() callback |
4613 | */ |
4614 | return alg->setkey ? 0 : ahash_set_sh_desc(ahash); |
4615 | } |
4616 | |
4617 | static void caam_hash_cra_exit(struct crypto_tfm *tfm) |
4618 | { |
4619 | struct caam_hash_ctx *ctx = crypto_tfm_ctx_dma(tfm); |
4620 | |
4621 | dma_unmap_single_attrs(dev: ctx->dev, addr: ctx->flc_dma[0], size: sizeof(ctx->flc), |
4622 | dir: DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC); |
4623 | if (ctx->adata.key_dma) |
4624 | dma_unmap_single_attrs(dev: ctx->dev, addr: ctx->adata.key_dma, |
4625 | ARRAY_SIZE(ctx->key), dir: DMA_TO_DEVICE, |
4626 | DMA_ATTR_SKIP_CPU_SYNC); |
4627 | } |
4628 | |
4629 | static struct caam_hash_alg *caam_hash_alloc(struct device *dev, |
4630 | struct caam_hash_template *template, bool keyed) |
4631 | { |
4632 | struct caam_hash_alg *t_alg; |
4633 | struct ahash_alg *halg; |
4634 | struct crypto_alg *alg; |
4635 | |
4636 | t_alg = kzalloc(size: sizeof(*t_alg), GFP_KERNEL); |
4637 | if (!t_alg) |
4638 | return ERR_PTR(error: -ENOMEM); |
4639 | |
4640 | t_alg->ahash_alg = template->template_ahash; |
4641 | halg = &t_alg->ahash_alg; |
4642 | alg = &halg->halg.base; |
4643 | |
4644 | if (keyed) { |
4645 | snprintf(buf: alg->cra_name, CRYPTO_MAX_ALG_NAME, fmt: "%s" , |
4646 | template->hmac_name); |
4647 | snprintf(buf: alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, fmt: "%s" , |
4648 | template->hmac_driver_name); |
4649 | } else { |
4650 | snprintf(buf: alg->cra_name, CRYPTO_MAX_ALG_NAME, fmt: "%s" , |
4651 | template->name); |
4652 | snprintf(buf: alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, fmt: "%s" , |
4653 | template->driver_name); |
4654 | t_alg->ahash_alg.setkey = NULL; |
4655 | } |
4656 | alg->cra_module = THIS_MODULE; |
4657 | alg->cra_init = caam_hash_cra_init; |
4658 | alg->cra_exit = caam_hash_cra_exit; |
4659 | alg->cra_ctxsize = sizeof(struct caam_hash_ctx) + crypto_dma_padding(); |
4660 | alg->cra_priority = CAAM_CRA_PRIORITY; |
4661 | alg->cra_blocksize = template->blocksize; |
4662 | alg->cra_alignmask = 0; |
4663 | alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY; |
4664 | |
4665 | t_alg->alg_type = template->alg_type; |
4666 | t_alg->dev = dev; |
4667 | |
4668 | return t_alg; |
4669 | } |
4670 | |
4671 | static void dpaa2_caam_fqdan_cb(struct dpaa2_io_notification_ctx *nctx) |
4672 | { |
4673 | struct dpaa2_caam_priv_per_cpu *ppriv; |
4674 | |
4675 | ppriv = container_of(nctx, struct dpaa2_caam_priv_per_cpu, nctx); |
4676 | napi_schedule_irqoff(n: &ppriv->napi); |
4677 | } |
4678 | |
4679 | static int __cold dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv *priv) |
4680 | { |
4681 | struct device *dev = priv->dev; |
4682 | struct dpaa2_io_notification_ctx *nctx; |
4683 | struct dpaa2_caam_priv_per_cpu *ppriv; |
4684 | int err, i = 0, cpu; |
4685 | |
4686 | for_each_online_cpu(cpu) { |
4687 | ppriv = per_cpu_ptr(priv->ppriv, cpu); |
4688 | ppriv->priv = priv; |
4689 | nctx = &ppriv->nctx; |
4690 | nctx->is_cdan = 0; |
4691 | nctx->id = ppriv->rsp_fqid; |
4692 | nctx->desired_cpu = cpu; |
4693 | nctx->cb = dpaa2_caam_fqdan_cb; |
4694 | |
4695 | /* Register notification callbacks */ |
4696 | ppriv->dpio = dpaa2_io_service_select(cpu); |
4697 | err = dpaa2_io_service_register(service: ppriv->dpio, ctx: nctx, dev); |
4698 | if (unlikely(err)) { |
4699 | dev_dbg(dev, "No affine DPIO for cpu %d\n" , cpu); |
4700 | nctx->cb = NULL; |
4701 | /* |
4702 | * If no affine DPIO for this core, there's probably |
4703 | * none available for next cores either. Signal we want |
4704 | * to retry later, in case the DPIO devices weren't |
4705 | * probed yet. |
4706 | */ |
4707 | err = -EPROBE_DEFER; |
4708 | goto err; |
4709 | } |
4710 | |
4711 | ppriv->store = dpaa2_io_store_create(DPAA2_CAAM_STORE_SIZE, |
4712 | dev); |
4713 | if (unlikely(!ppriv->store)) { |
4714 | dev_err(dev, "dpaa2_io_store_create() failed\n" ); |
4715 | err = -ENOMEM; |
4716 | goto err; |
4717 | } |
4718 | |
4719 | if (++i == priv->num_pairs) |
4720 | break; |
4721 | } |
4722 | |
4723 | return 0; |
4724 | |
4725 | err: |
4726 | for_each_online_cpu(cpu) { |
4727 | ppriv = per_cpu_ptr(priv->ppriv, cpu); |
4728 | if (!ppriv->nctx.cb) |
4729 | break; |
4730 | dpaa2_io_service_deregister(service: ppriv->dpio, ctx: &ppriv->nctx, dev); |
4731 | } |
4732 | |
4733 | for_each_online_cpu(cpu) { |
4734 | ppriv = per_cpu_ptr(priv->ppriv, cpu); |
4735 | if (!ppriv->store) |
4736 | break; |
4737 | dpaa2_io_store_destroy(s: ppriv->store); |
4738 | } |
4739 | |
4740 | return err; |
4741 | } |
4742 | |
4743 | static void __cold dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv *priv) |
4744 | { |
4745 | struct dpaa2_caam_priv_per_cpu *ppriv; |
4746 | int i = 0, cpu; |
4747 | |
4748 | for_each_online_cpu(cpu) { |
4749 | ppriv = per_cpu_ptr(priv->ppriv, cpu); |
4750 | dpaa2_io_service_deregister(service: ppriv->dpio, ctx: &ppriv->nctx, |
4751 | dev: priv->dev); |
4752 | dpaa2_io_store_destroy(s: ppriv->store); |
4753 | |
4754 | if (++i == priv->num_pairs) |
4755 | return; |
4756 | } |
4757 | } |
4758 | |
4759 | static int dpaa2_dpseci_bind(struct dpaa2_caam_priv *priv) |
4760 | { |
4761 | struct dpseci_rx_queue_cfg rx_queue_cfg; |
4762 | struct device *dev = priv->dev; |
4763 | struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev); |
4764 | struct dpaa2_caam_priv_per_cpu *ppriv; |
4765 | int err = 0, i = 0, cpu; |
4766 | |
4767 | /* Configure Rx queues */ |
4768 | for_each_online_cpu(cpu) { |
4769 | ppriv = per_cpu_ptr(priv->ppriv, cpu); |
4770 | |
4771 | rx_queue_cfg.options = DPSECI_QUEUE_OPT_DEST | |
4772 | DPSECI_QUEUE_OPT_USER_CTX; |
4773 | rx_queue_cfg.order_preservation_en = 0; |
4774 | rx_queue_cfg.dest_cfg.dest_type = DPSECI_DEST_DPIO; |
4775 | rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id; |
4776 | /* |
4777 | * Rx priority (WQ) doesn't really matter, since we use |
4778 | * pull mode, i.e. volatile dequeues from specific FQs |
4779 | */ |
4780 | rx_queue_cfg.dest_cfg.priority = 0; |
4781 | rx_queue_cfg.user_ctx = ppriv->nctx.qman64; |
4782 | |
4783 | err = dpseci_set_rx_queue(mc_io: priv->mc_io, cmd_flags: 0, token: ls_dev->mc_handle, queue: i, |
4784 | cfg: &rx_queue_cfg); |
4785 | if (err) { |
4786 | dev_err(dev, "dpseci_set_rx_queue() failed with err %d\n" , |
4787 | err); |
4788 | return err; |
4789 | } |
4790 | |
4791 | if (++i == priv->num_pairs) |
4792 | break; |
4793 | } |
4794 | |
4795 | return err; |
4796 | } |
4797 | |
4798 | static void dpaa2_dpseci_congestion_free(struct dpaa2_caam_priv *priv) |
4799 | { |
4800 | struct device *dev = priv->dev; |
4801 | |
4802 | if (!priv->cscn_mem) |
4803 | return; |
4804 | |
4805 | dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE); |
4806 | kfree(objp: priv->cscn_mem); |
4807 | } |
4808 | |
4809 | static void dpaa2_dpseci_free(struct dpaa2_caam_priv *priv) |
4810 | { |
4811 | struct device *dev = priv->dev; |
4812 | struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev); |
4813 | int err; |
4814 | |
4815 | if (DPSECI_VER(priv->major_ver, priv->minor_ver) > DPSECI_VER(5, 3)) { |
4816 | err = dpseci_reset(mc_io: priv->mc_io, cmd_flags: 0, token: ls_dev->mc_handle); |
4817 | if (err) |
4818 | dev_err(dev, "dpseci_reset() failed\n" ); |
4819 | } |
4820 | |
4821 | dpaa2_dpseci_congestion_free(priv); |
4822 | dpseci_close(mc_io: priv->mc_io, cmd_flags: 0, token: ls_dev->mc_handle); |
4823 | } |
4824 | |
4825 | static void dpaa2_caam_process_fd(struct dpaa2_caam_priv *priv, |
4826 | const struct dpaa2_fd *fd) |
4827 | { |
4828 | struct caam_request *req; |
4829 | u32 fd_err; |
4830 | |
4831 | if (dpaa2_fd_get_format(fd) != dpaa2_fd_list) { |
4832 | dev_err(priv->dev, "Only Frame List FD format is supported!\n" ); |
4833 | return; |
4834 | } |
4835 | |
4836 | fd_err = dpaa2_fd_get_ctrl(fd) & FD_CTRL_ERR_MASK; |
4837 | if (unlikely(fd_err)) |
4838 | dev_err_ratelimited(priv->dev, "FD error: %08x\n" , fd_err); |
4839 | |
4840 | /* |
4841 | * FD[ADDR] is guaranteed to be valid, irrespective of errors reported |
4842 | * in FD[ERR] or FD[FRC]. |
4843 | */ |
4844 | req = dpaa2_caam_iova_to_virt(priv, iova_addr: dpaa2_fd_get_addr(fd)); |
4845 | dma_unmap_single(priv->dev, req->fd_flt_dma, sizeof(req->fd_flt), |
4846 | DMA_BIDIRECTIONAL); |
4847 | req->cbk(req->ctx, dpaa2_fd_get_frc(fd)); |
4848 | } |
4849 | |
4850 | static int dpaa2_caam_pull_fq(struct dpaa2_caam_priv_per_cpu *ppriv) |
4851 | { |
4852 | int err; |
4853 | |
4854 | /* Retry while portal is busy */ |
4855 | do { |
4856 | err = dpaa2_io_service_pull_fq(d: ppriv->dpio, fqid: ppriv->rsp_fqid, |
4857 | s: ppriv->store); |
4858 | } while (err == -EBUSY); |
4859 | |
4860 | if (unlikely(err)) |
4861 | dev_err(ppriv->priv->dev, "dpaa2_io_service_pull err %d" , err); |
4862 | |
4863 | return err; |
4864 | } |
4865 | |
4866 | static int dpaa2_caam_store_consume(struct dpaa2_caam_priv_per_cpu *ppriv) |
4867 | { |
4868 | struct dpaa2_dq *dq; |
4869 | int cleaned = 0, is_last; |
4870 | |
4871 | do { |
4872 | dq = dpaa2_io_store_next(s: ppriv->store, is_last: &is_last); |
4873 | if (unlikely(!dq)) { |
4874 | if (unlikely(!is_last)) { |
4875 | dev_dbg(ppriv->priv->dev, |
4876 | "FQ %d returned no valid frames\n" , |
4877 | ppriv->rsp_fqid); |
4878 | /* |
4879 | * MUST retry until we get some sort of |
4880 | * valid response token (be it "empty dequeue" |
4881 | * or a valid frame). |
4882 | */ |
4883 | continue; |
4884 | } |
4885 | break; |
4886 | } |
4887 | |
4888 | /* Process FD */ |
4889 | dpaa2_caam_process_fd(priv: ppriv->priv, fd: dpaa2_dq_fd(dq)); |
4890 | cleaned++; |
4891 | } while (!is_last); |
4892 | |
4893 | return cleaned; |
4894 | } |
4895 | |
4896 | static int dpaa2_dpseci_poll(struct napi_struct *napi, int budget) |
4897 | { |
4898 | struct dpaa2_caam_priv_per_cpu *ppriv; |
4899 | struct dpaa2_caam_priv *priv; |
4900 | int err, cleaned = 0, store_cleaned; |
4901 | |
4902 | ppriv = container_of(napi, struct dpaa2_caam_priv_per_cpu, napi); |
4903 | priv = ppriv->priv; |
4904 | |
4905 | if (unlikely(dpaa2_caam_pull_fq(ppriv))) |
4906 | return 0; |
4907 | |
4908 | do { |
4909 | store_cleaned = dpaa2_caam_store_consume(ppriv); |
4910 | cleaned += store_cleaned; |
4911 | |
4912 | if (store_cleaned == 0 || |
4913 | cleaned > budget - DPAA2_CAAM_STORE_SIZE) |
4914 | break; |
4915 | |
4916 | /* Try to dequeue some more */ |
4917 | err = dpaa2_caam_pull_fq(ppriv); |
4918 | if (unlikely(err)) |
4919 | break; |
4920 | } while (1); |
4921 | |
4922 | if (cleaned < budget) { |
4923 | napi_complete_done(n: napi, work_done: cleaned); |
4924 | err = dpaa2_io_service_rearm(service: ppriv->dpio, ctx: &ppriv->nctx); |
4925 | if (unlikely(err)) |
4926 | dev_err(priv->dev, "Notification rearm failed: %d\n" , |
4927 | err); |
4928 | } |
4929 | |
4930 | return cleaned; |
4931 | } |
4932 | |
4933 | static int dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv *priv, |
4934 | u16 token) |
4935 | { |
4936 | struct dpseci_congestion_notification_cfg cong_notif_cfg = { 0 }; |
4937 | struct device *dev = priv->dev; |
4938 | unsigned int alignmask; |
4939 | int err; |
4940 | |
4941 | /* |
4942 | * Congestion group feature supported starting with DPSECI API v5.1 |
4943 | * and only when object has been created with this capability. |
4944 | */ |
4945 | if ((DPSECI_VER(priv->major_ver, priv->minor_ver) < DPSECI_VER(5, 1)) || |
4946 | !(priv->dpseci_attr.options & DPSECI_OPT_HAS_CG)) |
4947 | return 0; |
4948 | |
4949 | alignmask = DPAA2_CSCN_ALIGN - 1; |
4950 | alignmask |= dma_get_cache_alignment() - 1; |
4951 | priv->cscn_mem = kzalloc(ALIGN(DPAA2_CSCN_SIZE, alignmask + 1), |
4952 | GFP_KERNEL); |
4953 | if (!priv->cscn_mem) |
4954 | return -ENOMEM; |
4955 | |
4956 | priv->cscn_dma = dma_map_single(dev, priv->cscn_mem, |
4957 | DPAA2_CSCN_SIZE, DMA_FROM_DEVICE); |
4958 | if (dma_mapping_error(dev, dma_addr: priv->cscn_dma)) { |
4959 | dev_err(dev, "Error mapping CSCN memory area\n" ); |
4960 | err = -ENOMEM; |
4961 | goto err_dma_map; |
4962 | } |
4963 | |
4964 | cong_notif_cfg.units = DPSECI_CONGESTION_UNIT_BYTES; |
4965 | cong_notif_cfg.threshold_entry = DPAA2_SEC_CONG_ENTRY_THRESH; |
4966 | cong_notif_cfg.threshold_exit = DPAA2_SEC_CONG_EXIT_THRESH; |
4967 | cong_notif_cfg.message_ctx = (uintptr_t)priv; |
4968 | cong_notif_cfg.message_iova = priv->cscn_dma; |
4969 | cong_notif_cfg.notification_mode = DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER | |
4970 | DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT | |
4971 | DPSECI_CGN_MODE_COHERENT_WRITE; |
4972 | |
4973 | err = dpseci_set_congestion_notification(mc_io: priv->mc_io, cmd_flags: 0, token, |
4974 | cfg: &cong_notif_cfg); |
4975 | if (err) { |
4976 | dev_err(dev, "dpseci_set_congestion_notification failed\n" ); |
4977 | goto err_set_cong; |
4978 | } |
4979 | |
4980 | return 0; |
4981 | |
4982 | err_set_cong: |
4983 | dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE); |
4984 | err_dma_map: |
4985 | kfree(objp: priv->cscn_mem); |
4986 | |
4987 | return err; |
4988 | } |
4989 | |
4990 | static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev) |
4991 | { |
4992 | struct device *dev = &ls_dev->dev; |
4993 | struct dpaa2_caam_priv *priv; |
4994 | struct dpaa2_caam_priv_per_cpu *ppriv; |
4995 | int err, cpu; |
4996 | u8 i; |
4997 | |
4998 | priv = dev_get_drvdata(dev); |
4999 | |
5000 | priv->dev = dev; |
5001 | priv->dpsec_id = ls_dev->obj_desc.id; |
5002 | |
5003 | /* Get a handle for the DPSECI this interface is associate with */ |
5004 | err = dpseci_open(mc_io: priv->mc_io, cmd_flags: 0, dpseci_id: priv->dpsec_id, token: &ls_dev->mc_handle); |
5005 | if (err) { |
5006 | dev_err(dev, "dpseci_open() failed: %d\n" , err); |
5007 | goto err_open; |
5008 | } |
5009 | |
5010 | err = dpseci_get_api_version(mc_io: priv->mc_io, cmd_flags: 0, major_ver: &priv->major_ver, |
5011 | minor_ver: &priv->minor_ver); |
5012 | if (err) { |
5013 | dev_err(dev, "dpseci_get_api_version() failed\n" ); |
5014 | goto err_get_vers; |
5015 | } |
5016 | |
5017 | dev_info(dev, "dpseci v%d.%d\n" , priv->major_ver, priv->minor_ver); |
5018 | |
5019 | if (DPSECI_VER(priv->major_ver, priv->minor_ver) > DPSECI_VER(5, 3)) { |
5020 | err = dpseci_reset(mc_io: priv->mc_io, cmd_flags: 0, token: ls_dev->mc_handle); |
5021 | if (err) { |
5022 | dev_err(dev, "dpseci_reset() failed\n" ); |
5023 | goto err_get_vers; |
5024 | } |
5025 | } |
5026 | |
5027 | err = dpseci_get_attributes(mc_io: priv->mc_io, cmd_flags: 0, token: ls_dev->mc_handle, |
5028 | attr: &priv->dpseci_attr); |
5029 | if (err) { |
5030 | dev_err(dev, "dpseci_get_attributes() failed\n" ); |
5031 | goto err_get_vers; |
5032 | } |
5033 | |
5034 | err = dpseci_get_sec_attr(mc_io: priv->mc_io, cmd_flags: 0, token: ls_dev->mc_handle, |
5035 | attr: &priv->sec_attr); |
5036 | if (err) { |
5037 | dev_err(dev, "dpseci_get_sec_attr() failed\n" ); |
5038 | goto err_get_vers; |
5039 | } |
5040 | |
5041 | err = dpaa2_dpseci_congestion_setup(priv, token: ls_dev->mc_handle); |
5042 | if (err) { |
5043 | dev_err(dev, "setup_congestion() failed\n" ); |
5044 | goto err_get_vers; |
5045 | } |
5046 | |
5047 | priv->num_pairs = min(priv->dpseci_attr.num_rx_queues, |
5048 | priv->dpseci_attr.num_tx_queues); |
5049 | if (priv->num_pairs > num_online_cpus()) { |
5050 | dev_warn(dev, "%d queues won't be used\n" , |
5051 | priv->num_pairs - num_online_cpus()); |
5052 | priv->num_pairs = num_online_cpus(); |
5053 | } |
5054 | |
5055 | for (i = 0; i < priv->dpseci_attr.num_rx_queues; i++) { |
5056 | err = dpseci_get_rx_queue(mc_io: priv->mc_io, cmd_flags: 0, token: ls_dev->mc_handle, queue: i, |
5057 | attr: &priv->rx_queue_attr[i]); |
5058 | if (err) { |
5059 | dev_err(dev, "dpseci_get_rx_queue() failed\n" ); |
5060 | goto err_get_rx_queue; |
5061 | } |
5062 | } |
5063 | |
5064 | for (i = 0; i < priv->dpseci_attr.num_tx_queues; i++) { |
5065 | err = dpseci_get_tx_queue(mc_io: priv->mc_io, cmd_flags: 0, token: ls_dev->mc_handle, queue: i, |
5066 | attr: &priv->tx_queue_attr[i]); |
5067 | if (err) { |
5068 | dev_err(dev, "dpseci_get_tx_queue() failed\n" ); |
5069 | goto err_get_rx_queue; |
5070 | } |
5071 | } |
5072 | |
5073 | i = 0; |
5074 | for_each_online_cpu(cpu) { |
5075 | u8 j; |
5076 | |
5077 | j = i % priv->num_pairs; |
5078 | |
5079 | ppriv = per_cpu_ptr(priv->ppriv, cpu); |
5080 | ppriv->req_fqid = priv->tx_queue_attr[j].fqid; |
5081 | |
5082 | /* |
5083 | * Allow all cores to enqueue, while only some of them |
5084 | * will take part in dequeuing. |
5085 | */ |
5086 | if (++i > priv->num_pairs) |
5087 | continue; |
5088 | |
5089 | ppriv->rsp_fqid = priv->rx_queue_attr[j].fqid; |
5090 | ppriv->prio = j; |
5091 | |
5092 | dev_dbg(dev, "pair %d: rx queue %d, tx queue %d\n" , j, |
5093 | priv->rx_queue_attr[j].fqid, |
5094 | priv->tx_queue_attr[j].fqid); |
5095 | |
5096 | ppriv->net_dev.dev = *dev; |
5097 | INIT_LIST_HEAD(list: &ppriv->net_dev.napi_list); |
5098 | netif_napi_add_tx_weight(dev: &ppriv->net_dev, napi: &ppriv->napi, |
5099 | poll: dpaa2_dpseci_poll, |
5100 | DPAA2_CAAM_NAPI_WEIGHT); |
5101 | } |
5102 | |
5103 | return 0; |
5104 | |
5105 | err_get_rx_queue: |
5106 | dpaa2_dpseci_congestion_free(priv); |
5107 | err_get_vers: |
5108 | dpseci_close(mc_io: priv->mc_io, cmd_flags: 0, token: ls_dev->mc_handle); |
5109 | err_open: |
5110 | return err; |
5111 | } |
5112 | |
5113 | static int dpaa2_dpseci_enable(struct dpaa2_caam_priv *priv) |
5114 | { |
5115 | struct device *dev = priv->dev; |
5116 | struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev); |
5117 | struct dpaa2_caam_priv_per_cpu *ppriv; |
5118 | int i; |
5119 | |
5120 | for (i = 0; i < priv->num_pairs; i++) { |
5121 | ppriv = per_cpu_ptr(priv->ppriv, i); |
5122 | napi_enable(n: &ppriv->napi); |
5123 | } |
5124 | |
5125 | return dpseci_enable(mc_io: priv->mc_io, cmd_flags: 0, token: ls_dev->mc_handle); |
5126 | } |
5127 | |
5128 | static int __cold dpaa2_dpseci_disable(struct dpaa2_caam_priv *priv) |
5129 | { |
5130 | struct device *dev = priv->dev; |
5131 | struct dpaa2_caam_priv_per_cpu *ppriv; |
5132 | struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev); |
5133 | int i, err = 0, enabled; |
5134 | |
5135 | err = dpseci_disable(mc_io: priv->mc_io, cmd_flags: 0, token: ls_dev->mc_handle); |
5136 | if (err) { |
5137 | dev_err(dev, "dpseci_disable() failed\n" ); |
5138 | return err; |
5139 | } |
5140 | |
5141 | err = dpseci_is_enabled(mc_io: priv->mc_io, cmd_flags: 0, token: ls_dev->mc_handle, en: &enabled); |
5142 | if (err) { |
5143 | dev_err(dev, "dpseci_is_enabled() failed\n" ); |
5144 | return err; |
5145 | } |
5146 | |
5147 | dev_dbg(dev, "disable: %s\n" , enabled ? "false" : "true" ); |
5148 | |
5149 | for (i = 0; i < priv->num_pairs; i++) { |
5150 | ppriv = per_cpu_ptr(priv->ppriv, i); |
5151 | napi_disable(n: &ppriv->napi); |
5152 | netif_napi_del(napi: &ppriv->napi); |
5153 | } |
5154 | |
5155 | return 0; |
5156 | } |
5157 | |
5158 | static struct list_head hash_list; |
5159 | |
5160 | static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev) |
5161 | { |
5162 | struct device *dev; |
5163 | struct dpaa2_caam_priv *priv; |
5164 | int i, err = 0; |
5165 | bool registered = false; |
5166 | |
5167 | /* |
5168 | * There is no way to get CAAM endianness - there is no direct register |
5169 | * space access and MC f/w does not provide this attribute. |
5170 | * All DPAA2-based SoCs have little endian CAAM, thus hard-code this |
5171 | * property. |
5172 | */ |
5173 | caam_little_end = true; |
5174 | |
5175 | caam_imx = false; |
5176 | |
5177 | dev = &dpseci_dev->dev; |
5178 | |
5179 | priv = devm_kzalloc(dev, size: sizeof(*priv), GFP_KERNEL); |
5180 | if (!priv) |
5181 | return -ENOMEM; |
5182 | |
5183 | dev_set_drvdata(dev, data: priv); |
5184 | |
5185 | priv->domain = iommu_get_domain_for_dev(dev); |
5186 | |
5187 | qi_cache = kmem_cache_create(name: "dpaa2_caamqicache" , CAAM_QI_MEMCACHE_SIZE, |
5188 | align: 0, flags: 0, NULL); |
5189 | if (!qi_cache) { |
5190 | dev_err(dev, "Can't allocate SEC cache\n" ); |
5191 | return -ENOMEM; |
5192 | } |
5193 | |
5194 | err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49)); |
5195 | if (err) { |
5196 | dev_err(dev, "dma_set_mask_and_coherent() failed\n" ); |
5197 | goto err_dma_mask; |
5198 | } |
5199 | |
5200 | /* Obtain a MC portal */ |
5201 | err = fsl_mc_portal_allocate(mc_dev: dpseci_dev, mc_io_flags: 0, new_mc_io: &priv->mc_io); |
5202 | if (err) { |
5203 | if (err == -ENXIO) |
5204 | err = -EPROBE_DEFER; |
5205 | else |
5206 | dev_err(dev, "MC portal allocation failed\n" ); |
5207 | |
5208 | goto err_dma_mask; |
5209 | } |
5210 | |
5211 | priv->ppriv = alloc_percpu(*priv->ppriv); |
5212 | if (!priv->ppriv) { |
5213 | dev_err(dev, "alloc_percpu() failed\n" ); |
5214 | err = -ENOMEM; |
5215 | goto err_alloc_ppriv; |
5216 | } |
5217 | |
5218 | /* DPSECI initialization */ |
5219 | err = dpaa2_dpseci_setup(ls_dev: dpseci_dev); |
5220 | if (err) { |
5221 | dev_err(dev, "dpaa2_dpseci_setup() failed\n" ); |
5222 | goto err_dpseci_setup; |
5223 | } |
5224 | |
5225 | /* DPIO */ |
5226 | err = dpaa2_dpseci_dpio_setup(priv); |
5227 | if (err) { |
5228 | dev_err_probe(dev, err, fmt: "dpaa2_dpseci_dpio_setup() failed\n" ); |
5229 | goto err_dpio_setup; |
5230 | } |
5231 | |
5232 | /* DPSECI binding to DPIO */ |
5233 | err = dpaa2_dpseci_bind(priv); |
5234 | if (err) { |
5235 | dev_err(dev, "dpaa2_dpseci_bind() failed\n" ); |
5236 | goto err_bind; |
5237 | } |
5238 | |
5239 | /* DPSECI enable */ |
5240 | err = dpaa2_dpseci_enable(priv); |
5241 | if (err) { |
5242 | dev_err(dev, "dpaa2_dpseci_enable() failed\n" ); |
5243 | goto err_bind; |
5244 | } |
5245 | |
5246 | dpaa2_dpseci_debugfs_init(priv); |
5247 | |
5248 | /* register crypto algorithms the device supports */ |
5249 | for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { |
5250 | struct caam_skcipher_alg *t_alg = driver_algs + i; |
5251 | u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK; |
5252 | |
5253 | /* Skip DES algorithms if not supported by device */ |
5254 | if (!priv->sec_attr.des_acc_num && |
5255 | (alg_sel == OP_ALG_ALGSEL_3DES || |
5256 | alg_sel == OP_ALG_ALGSEL_DES)) |
5257 | continue; |
5258 | |
5259 | /* Skip AES algorithms if not supported by device */ |
5260 | if (!priv->sec_attr.aes_acc_num && |
5261 | alg_sel == OP_ALG_ALGSEL_AES) |
5262 | continue; |
5263 | |
5264 | /* Skip CHACHA20 algorithms if not supported by device */ |
5265 | if (alg_sel == OP_ALG_ALGSEL_CHACHA20 && |
5266 | !priv->sec_attr.ccha_acc_num) |
5267 | continue; |
5268 | |
5269 | t_alg->caam.dev = dev; |
5270 | caam_skcipher_alg_init(t_alg); |
5271 | |
5272 | err = crypto_register_skcipher(alg: &t_alg->skcipher); |
5273 | if (err) { |
5274 | dev_warn(dev, "%s alg registration failed: %d\n" , |
5275 | t_alg->skcipher.base.cra_driver_name, err); |
5276 | continue; |
5277 | } |
5278 | |
5279 | t_alg->registered = true; |
5280 | registered = true; |
5281 | } |
5282 | |
5283 | for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) { |
5284 | struct caam_aead_alg *t_alg = driver_aeads + i; |
5285 | u32 c1_alg_sel = t_alg->caam.class1_alg_type & |
5286 | OP_ALG_ALGSEL_MASK; |
5287 | u32 c2_alg_sel = t_alg->caam.class2_alg_type & |
5288 | OP_ALG_ALGSEL_MASK; |
5289 | |
5290 | /* Skip DES algorithms if not supported by device */ |
5291 | if (!priv->sec_attr.des_acc_num && |
5292 | (c1_alg_sel == OP_ALG_ALGSEL_3DES || |
5293 | c1_alg_sel == OP_ALG_ALGSEL_DES)) |
5294 | continue; |
5295 | |
5296 | /* Skip AES algorithms if not supported by device */ |
5297 | if (!priv->sec_attr.aes_acc_num && |
5298 | c1_alg_sel == OP_ALG_ALGSEL_AES) |
5299 | continue; |
5300 | |
5301 | /* Skip CHACHA20 algorithms if not supported by device */ |
5302 | if (c1_alg_sel == OP_ALG_ALGSEL_CHACHA20 && |
5303 | !priv->sec_attr.ccha_acc_num) |
5304 | continue; |
5305 | |
5306 | /* Skip POLY1305 algorithms if not supported by device */ |
5307 | if (c2_alg_sel == OP_ALG_ALGSEL_POLY1305 && |
5308 | !priv->sec_attr.ptha_acc_num) |
5309 | continue; |
5310 | |
5311 | /* |
5312 | * Skip algorithms requiring message digests |
5313 | * if MD not supported by device. |
5314 | */ |
5315 | if ((c2_alg_sel & ~OP_ALG_ALGSEL_SUBMASK) == 0x40 && |
5316 | !priv->sec_attr.md_acc_num) |
5317 | continue; |
5318 | |
5319 | t_alg->caam.dev = dev; |
5320 | caam_aead_alg_init(t_alg); |
5321 | |
5322 | err = crypto_register_aead(alg: &t_alg->aead); |
5323 | if (err) { |
5324 | dev_warn(dev, "%s alg registration failed: %d\n" , |
5325 | t_alg->aead.base.cra_driver_name, err); |
5326 | continue; |
5327 | } |
5328 | |
5329 | t_alg->registered = true; |
5330 | registered = true; |
5331 | } |
5332 | if (registered) |
5333 | dev_info(dev, "algorithms registered in /proc/crypto\n" ); |
5334 | |
5335 | /* register hash algorithms the device supports */ |
5336 | INIT_LIST_HEAD(list: &hash_list); |
5337 | |
5338 | /* |
5339 | * Skip registration of any hashing algorithms if MD block |
5340 | * is not present. |
5341 | */ |
5342 | if (!priv->sec_attr.md_acc_num) |
5343 | return 0; |
5344 | |
5345 | for (i = 0; i < ARRAY_SIZE(driver_hash); i++) { |
5346 | struct caam_hash_alg *t_alg; |
5347 | struct caam_hash_template *alg = driver_hash + i; |
5348 | |
5349 | /* register hmac version */ |
5350 | t_alg = caam_hash_alloc(dev, template: alg, keyed: true); |
5351 | if (IS_ERR(ptr: t_alg)) { |
5352 | err = PTR_ERR(ptr: t_alg); |
5353 | dev_warn(dev, "%s hash alg allocation failed: %d\n" , |
5354 | alg->hmac_driver_name, err); |
5355 | continue; |
5356 | } |
5357 | |
5358 | err = crypto_register_ahash(alg: &t_alg->ahash_alg); |
5359 | if (err) { |
5360 | dev_warn(dev, "%s alg registration failed: %d\n" , |
5361 | t_alg->ahash_alg.halg.base.cra_driver_name, |
5362 | err); |
5363 | kfree(objp: t_alg); |
5364 | } else { |
5365 | list_add_tail(new: &t_alg->entry, head: &hash_list); |
5366 | } |
5367 | |
5368 | /* register unkeyed version */ |
5369 | t_alg = caam_hash_alloc(dev, template: alg, keyed: false); |
5370 | if (IS_ERR(ptr: t_alg)) { |
5371 | err = PTR_ERR(ptr: t_alg); |
5372 | dev_warn(dev, "%s alg allocation failed: %d\n" , |
5373 | alg->driver_name, err); |
5374 | continue; |
5375 | } |
5376 | |
5377 | err = crypto_register_ahash(alg: &t_alg->ahash_alg); |
5378 | if (err) { |
5379 | dev_warn(dev, "%s alg registration failed: %d\n" , |
5380 | t_alg->ahash_alg.halg.base.cra_driver_name, |
5381 | err); |
5382 | kfree(objp: t_alg); |
5383 | } else { |
5384 | list_add_tail(new: &t_alg->entry, head: &hash_list); |
5385 | } |
5386 | } |
5387 | if (!list_empty(head: &hash_list)) |
5388 | dev_info(dev, "hash algorithms registered in /proc/crypto\n" ); |
5389 | |
5390 | return err; |
5391 | |
5392 | err_bind: |
5393 | dpaa2_dpseci_dpio_free(priv); |
5394 | err_dpio_setup: |
5395 | dpaa2_dpseci_free(priv); |
5396 | err_dpseci_setup: |
5397 | free_percpu(pdata: priv->ppriv); |
5398 | err_alloc_ppriv: |
5399 | fsl_mc_portal_free(mc_io: priv->mc_io); |
5400 | err_dma_mask: |
5401 | kmem_cache_destroy(s: qi_cache); |
5402 | |
5403 | return err; |
5404 | } |
5405 | |
5406 | static void __cold dpaa2_caam_remove(struct fsl_mc_device *ls_dev) |
5407 | { |
5408 | struct device *dev; |
5409 | struct dpaa2_caam_priv *priv; |
5410 | int i; |
5411 | |
5412 | dev = &ls_dev->dev; |
5413 | priv = dev_get_drvdata(dev); |
5414 | |
5415 | dpaa2_dpseci_debugfs_exit(priv); |
5416 | |
5417 | for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) { |
5418 | struct caam_aead_alg *t_alg = driver_aeads + i; |
5419 | |
5420 | if (t_alg->registered) |
5421 | crypto_unregister_aead(alg: &t_alg->aead); |
5422 | } |
5423 | |
5424 | for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { |
5425 | struct caam_skcipher_alg *t_alg = driver_algs + i; |
5426 | |
5427 | if (t_alg->registered) |
5428 | crypto_unregister_skcipher(alg: &t_alg->skcipher); |
5429 | } |
5430 | |
5431 | if (hash_list.next) { |
5432 | struct caam_hash_alg *t_hash_alg, *p; |
5433 | |
5434 | list_for_each_entry_safe(t_hash_alg, p, &hash_list, entry) { |
5435 | crypto_unregister_ahash(alg: &t_hash_alg->ahash_alg); |
5436 | list_del(entry: &t_hash_alg->entry); |
5437 | kfree(objp: t_hash_alg); |
5438 | } |
5439 | } |
5440 | |
5441 | dpaa2_dpseci_disable(priv); |
5442 | dpaa2_dpseci_dpio_free(priv); |
5443 | dpaa2_dpseci_free(priv); |
5444 | free_percpu(pdata: priv->ppriv); |
5445 | fsl_mc_portal_free(mc_io: priv->mc_io); |
5446 | kmem_cache_destroy(s: qi_cache); |
5447 | } |
5448 | |
5449 | int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req) |
5450 | { |
5451 | struct dpaa2_fd fd; |
5452 | struct dpaa2_caam_priv *priv = dev_get_drvdata(dev); |
5453 | struct dpaa2_caam_priv_per_cpu *ppriv; |
5454 | int err = 0, i; |
5455 | |
5456 | if (IS_ERR(ptr: req)) |
5457 | return PTR_ERR(ptr: req); |
5458 | |
5459 | if (priv->cscn_mem) { |
5460 | dma_sync_single_for_cpu(dev: priv->dev, addr: priv->cscn_dma, |
5461 | DPAA2_CSCN_SIZE, |
5462 | dir: DMA_FROM_DEVICE); |
5463 | if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem))) { |
5464 | dev_dbg_ratelimited(dev, "Dropping request\n" ); |
5465 | return -EBUSY; |
5466 | } |
5467 | } |
5468 | |
5469 | dpaa2_fl_set_flc(fle: &req->fd_flt[1], flc_addr: req->flc_dma); |
5470 | |
5471 | req->fd_flt_dma = dma_map_single(dev, req->fd_flt, sizeof(req->fd_flt), |
5472 | DMA_BIDIRECTIONAL); |
5473 | if (dma_mapping_error(dev, dma_addr: req->fd_flt_dma)) { |
5474 | dev_err(dev, "DMA mapping error for QI enqueue request\n" ); |
5475 | goto err_out; |
5476 | } |
5477 | |
5478 | memset(&fd, 0, sizeof(fd)); |
5479 | dpaa2_fd_set_format(fd: &fd, format: dpaa2_fd_list); |
5480 | dpaa2_fd_set_addr(fd: &fd, addr: req->fd_flt_dma); |
5481 | dpaa2_fd_set_len(fd: &fd, len: dpaa2_fl_get_len(fle: &req->fd_flt[1])); |
5482 | dpaa2_fd_set_flc(fd: &fd, flc_addr: req->flc_dma); |
5483 | |
5484 | ppriv = raw_cpu_ptr(priv->ppriv); |
5485 | for (i = 0; i < (priv->dpseci_attr.num_tx_queues << 1); i++) { |
5486 | err = dpaa2_io_service_enqueue_fq(d: ppriv->dpio, fqid: ppriv->req_fqid, |
5487 | fd: &fd); |
5488 | if (err != -EBUSY) |
5489 | break; |
5490 | |
5491 | cpu_relax(); |
5492 | } |
5493 | |
5494 | if (unlikely(err)) { |
5495 | dev_err_ratelimited(dev, "Error enqueuing frame: %d\n" , err); |
5496 | goto err_out; |
5497 | } |
5498 | |
5499 | return -EINPROGRESS; |
5500 | |
5501 | err_out: |
5502 | dma_unmap_single(dev, req->fd_flt_dma, sizeof(req->fd_flt), |
5503 | DMA_BIDIRECTIONAL); |
5504 | return -EIO; |
5505 | } |
5506 | EXPORT_SYMBOL(dpaa2_caam_enqueue); |
5507 | |
5508 | static const struct fsl_mc_device_id dpaa2_caam_match_id_table[] = { |
5509 | { |
5510 | .vendor = FSL_MC_VENDOR_FREESCALE, |
5511 | .obj_type = "dpseci" , |
5512 | }, |
5513 | { .vendor = 0x0 } |
5514 | }; |
5515 | MODULE_DEVICE_TABLE(fslmc, dpaa2_caam_match_id_table); |
5516 | |
5517 | static struct fsl_mc_driver dpaa2_caam_driver = { |
5518 | .driver = { |
5519 | .name = KBUILD_MODNAME, |
5520 | .owner = THIS_MODULE, |
5521 | }, |
5522 | .probe = dpaa2_caam_probe, |
5523 | .remove = dpaa2_caam_remove, |
5524 | .match_id_table = dpaa2_caam_match_id_table |
5525 | }; |
5526 | |
5527 | MODULE_LICENSE("Dual BSD/GPL" ); |
5528 | MODULE_AUTHOR("Freescale Semiconductor, Inc" ); |
5529 | MODULE_DESCRIPTION("Freescale DPAA2 CAAM Driver" ); |
5530 | |
5531 | module_fsl_mc_driver(dpaa2_caam_driver); |
5532 | |