1 | // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) |
2 | /* |
3 | * caam - Freescale FSL CAAM support for Public Key Cryptography |
4 | * |
5 | * Copyright 2016 Freescale Semiconductor, Inc. |
6 | * Copyright 2018-2019, 2023 NXP |
7 | * |
8 | * There is no Shared Descriptor for PKC so that the Job Descriptor must carry |
9 | * all the desired key parameters, input and output pointers. |
10 | */ |
11 | #include "compat.h" |
12 | #include "regs.h" |
13 | #include "intern.h" |
14 | #include "jr.h" |
15 | #include "error.h" |
16 | #include "desc_constr.h" |
17 | #include "sg_sw_sec4.h" |
18 | #include "caampkc.h" |
19 | #include <crypto/internal/engine.h> |
20 | #include <linux/dma-mapping.h> |
21 | #include <linux/err.h> |
22 | #include <linux/kernel.h> |
23 | #include <linux/slab.h> |
24 | #include <linux/string.h> |
25 | |
26 | #define DESC_RSA_PUB_LEN (2 * CAAM_CMD_SZ + SIZEOF_RSA_PUB_PDB) |
27 | #define DESC_RSA_PRIV_F1_LEN (2 * CAAM_CMD_SZ + \ |
28 | SIZEOF_RSA_PRIV_F1_PDB) |
29 | #define DESC_RSA_PRIV_F2_LEN (2 * CAAM_CMD_SZ + \ |
30 | SIZEOF_RSA_PRIV_F2_PDB) |
31 | #define DESC_RSA_PRIV_F3_LEN (2 * CAAM_CMD_SZ + \ |
32 | SIZEOF_RSA_PRIV_F3_PDB) |
33 | #define CAAM_RSA_MAX_INPUT_SIZE 512 /* for a 4096-bit modulus */ |
34 | |
35 | /* buffer filled with zeros, used for padding */ |
36 | static u8 *zero_buffer; |
37 | |
38 | /* |
39 | * variable used to avoid double free of resources in case |
40 | * algorithm registration was unsuccessful |
41 | */ |
42 | static bool init_done; |
43 | |
44 | struct caam_akcipher_alg { |
45 | struct akcipher_engine_alg akcipher; |
46 | bool registered; |
47 | }; |
48 | |
49 | static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc, |
50 | struct akcipher_request *req) |
51 | { |
52 | struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); |
53 | |
54 | dma_unmap_sg(dev, req->dst, edesc->dst_nents, DMA_FROM_DEVICE); |
55 | dma_unmap_sg(dev, req_ctx->fixup_src, edesc->src_nents, DMA_TO_DEVICE); |
56 | |
57 | if (edesc->sec4_sg_bytes) |
58 | dma_unmap_single(dev, edesc->sec4_sg_dma, edesc->sec4_sg_bytes, |
59 | DMA_TO_DEVICE); |
60 | } |
61 | |
62 | static void rsa_pub_unmap(struct device *dev, struct rsa_edesc *edesc, |
63 | struct akcipher_request *req) |
64 | { |
65 | struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); |
66 | struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm); |
67 | struct caam_rsa_key *key = &ctx->key; |
68 | struct rsa_pub_pdb *pdb = &edesc->pdb.pub; |
69 | |
70 | dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE); |
71 | dma_unmap_single(dev, pdb->e_dma, key->e_sz, DMA_TO_DEVICE); |
72 | } |
73 | |
74 | static void rsa_priv_f1_unmap(struct device *dev, struct rsa_edesc *edesc, |
75 | struct akcipher_request *req) |
76 | { |
77 | struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); |
78 | struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm); |
79 | struct caam_rsa_key *key = &ctx->key; |
80 | struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1; |
81 | |
82 | dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE); |
83 | dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE); |
84 | } |
85 | |
86 | static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc, |
87 | struct akcipher_request *req) |
88 | { |
89 | struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); |
90 | struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm); |
91 | struct caam_rsa_key *key = &ctx->key; |
92 | struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2; |
93 | size_t p_sz = key->p_sz; |
94 | size_t q_sz = key->q_sz; |
95 | |
96 | dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE); |
97 | dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE); |
98 | dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE); |
99 | dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL); |
100 | dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL); |
101 | } |
102 | |
103 | static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc, |
104 | struct akcipher_request *req) |
105 | { |
106 | struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); |
107 | struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm); |
108 | struct caam_rsa_key *key = &ctx->key; |
109 | struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3; |
110 | size_t p_sz = key->p_sz; |
111 | size_t q_sz = key->q_sz; |
112 | |
113 | dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE); |
114 | dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE); |
115 | dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE); |
116 | dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE); |
117 | dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE); |
118 | dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL); |
119 | dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL); |
120 | } |
121 | |
122 | /* RSA Job Completion handler */ |
123 | static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context) |
124 | { |
125 | struct akcipher_request *req = context; |
126 | struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); |
127 | struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); |
128 | struct rsa_edesc *edesc; |
129 | int ecode = 0; |
130 | bool has_bklog; |
131 | |
132 | if (err) |
133 | ecode = caam_jr_strstatus(dev, err); |
134 | |
135 | edesc = req_ctx->edesc; |
136 | has_bklog = edesc->bklog; |
137 | |
138 | rsa_pub_unmap(dev, edesc, req); |
139 | rsa_io_unmap(dev, edesc, req); |
140 | kfree(objp: edesc); |
141 | |
142 | /* |
143 | * If no backlog flag, the completion of the request is done |
144 | * by CAAM, not crypto engine. |
145 | */ |
146 | if (!has_bklog) |
147 | akcipher_request_complete(req, err: ecode); |
148 | else |
149 | crypto_finalize_akcipher_request(engine: jrp->engine, req, err: ecode); |
150 | } |
151 | |
152 | static void rsa_priv_f_done(struct device *dev, u32 *desc, u32 err, |
153 | void *context) |
154 | { |
155 | struct akcipher_request *req = context; |
156 | struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); |
157 | struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); |
158 | struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm); |
159 | struct caam_rsa_key *key = &ctx->key; |
160 | struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); |
161 | struct rsa_edesc *edesc; |
162 | int ecode = 0; |
163 | bool has_bklog; |
164 | |
165 | if (err) |
166 | ecode = caam_jr_strstatus(dev, err); |
167 | |
168 | edesc = req_ctx->edesc; |
169 | has_bklog = edesc->bklog; |
170 | |
171 | switch (key->priv_form) { |
172 | case FORM1: |
173 | rsa_priv_f1_unmap(dev, edesc, req); |
174 | break; |
175 | case FORM2: |
176 | rsa_priv_f2_unmap(dev, edesc, req); |
177 | break; |
178 | case FORM3: |
179 | rsa_priv_f3_unmap(dev, edesc, req); |
180 | } |
181 | |
182 | rsa_io_unmap(dev, edesc, req); |
183 | kfree(objp: edesc); |
184 | |
185 | /* |
186 | * If no backlog flag, the completion of the request is done |
187 | * by CAAM, not crypto engine. |
188 | */ |
189 | if (!has_bklog) |
190 | akcipher_request_complete(req, err: ecode); |
191 | else |
192 | crypto_finalize_akcipher_request(engine: jrp->engine, req, err: ecode); |
193 | } |
194 | |
195 | /** |
196 | * caam_rsa_count_leading_zeros - Count leading zeros, need it to strip, |
197 | * from a given scatterlist |
198 | * |
199 | * @sgl : scatterlist to count zeros from |
200 | * @nbytes: number of zeros, in bytes, to strip |
201 | * @flags : operation flags |
202 | */ |
203 | static int caam_rsa_count_leading_zeros(struct scatterlist *sgl, |
204 | unsigned int nbytes, |
205 | unsigned int flags) |
206 | { |
207 | struct sg_mapping_iter miter; |
208 | int lzeros, ents; |
209 | unsigned int len; |
210 | unsigned int tbytes = nbytes; |
211 | const u8 *buff; |
212 | |
213 | ents = sg_nents_for_len(sg: sgl, len: nbytes); |
214 | if (ents < 0) |
215 | return ents; |
216 | |
217 | sg_miter_start(miter: &miter, sgl, nents: ents, SG_MITER_FROM_SG | flags); |
218 | |
219 | lzeros = 0; |
220 | len = 0; |
221 | while (nbytes > 0) { |
222 | /* do not strip more than given bytes */ |
223 | while (len && !*buff && lzeros < nbytes) { |
224 | lzeros++; |
225 | len--; |
226 | buff++; |
227 | } |
228 | |
229 | if (len && *buff) |
230 | break; |
231 | |
232 | if (!sg_miter_next(miter: &miter)) |
233 | break; |
234 | |
235 | buff = miter.addr; |
236 | len = miter.length; |
237 | |
238 | nbytes -= lzeros; |
239 | lzeros = 0; |
240 | } |
241 | |
242 | miter.consumed = lzeros; |
243 | sg_miter_stop(miter: &miter); |
244 | nbytes -= lzeros; |
245 | |
246 | return tbytes - nbytes; |
247 | } |
248 | |
249 | static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req, |
250 | size_t desclen) |
251 | { |
252 | struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); |
253 | struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm); |
254 | struct device *dev = ctx->dev; |
255 | struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); |
256 | struct caam_rsa_key *key = &ctx->key; |
257 | struct rsa_edesc *edesc; |
258 | gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? |
259 | GFP_KERNEL : GFP_ATOMIC; |
260 | int sg_flags = (flags == GFP_ATOMIC) ? SG_MITER_ATOMIC : 0; |
261 | int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes; |
262 | int src_nents, dst_nents; |
263 | int mapped_src_nents, mapped_dst_nents; |
264 | unsigned int diff_size = 0; |
265 | int lzeros; |
266 | |
267 | if (req->src_len > key->n_sz) { |
268 | /* |
269 | * strip leading zeros and |
270 | * return the number of zeros to skip |
271 | */ |
272 | lzeros = caam_rsa_count_leading_zeros(sgl: req->src, nbytes: req->src_len - |
273 | key->n_sz, flags: sg_flags); |
274 | if (lzeros < 0) |
275 | return ERR_PTR(error: lzeros); |
276 | |
277 | req_ctx->fixup_src = scatterwalk_ffwd(dst: req_ctx->src, src: req->src, |
278 | len: lzeros); |
279 | req_ctx->fixup_src_len = req->src_len - lzeros; |
280 | } else { |
281 | /* |
282 | * input src is less then n key modulus, |
283 | * so there will be zero padding |
284 | */ |
285 | diff_size = key->n_sz - req->src_len; |
286 | req_ctx->fixup_src = req->src; |
287 | req_ctx->fixup_src_len = req->src_len; |
288 | } |
289 | |
290 | src_nents = sg_nents_for_len(sg: req_ctx->fixup_src, |
291 | len: req_ctx->fixup_src_len); |
292 | dst_nents = sg_nents_for_len(sg: req->dst, len: req->dst_len); |
293 | |
294 | mapped_src_nents = dma_map_sg(dev, req_ctx->fixup_src, src_nents, |
295 | DMA_TO_DEVICE); |
296 | if (unlikely(!mapped_src_nents)) { |
297 | dev_err(dev, "unable to map source\n" ); |
298 | return ERR_PTR(error: -ENOMEM); |
299 | } |
300 | mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents, |
301 | DMA_FROM_DEVICE); |
302 | if (unlikely(!mapped_dst_nents)) { |
303 | dev_err(dev, "unable to map destination\n" ); |
304 | goto src_fail; |
305 | } |
306 | |
307 | if (!diff_size && mapped_src_nents == 1) |
308 | sec4_sg_len = 0; /* no need for an input hw s/g table */ |
309 | else |
310 | sec4_sg_len = mapped_src_nents + !!diff_size; |
311 | sec4_sg_index = sec4_sg_len; |
312 | |
313 | if (mapped_dst_nents > 1) |
314 | sec4_sg_len += pad_sg_nents(sg_nents: mapped_dst_nents); |
315 | else |
316 | sec4_sg_len = pad_sg_nents(sg_nents: sec4_sg_len); |
317 | |
318 | sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry); |
319 | |
320 | /* allocate space for base edesc, hw desc commands and link tables */ |
321 | edesc = kzalloc(size: sizeof(*edesc) + desclen + sec4_sg_bytes, flags); |
322 | if (!edesc) |
323 | goto dst_fail; |
324 | |
325 | edesc->sec4_sg = (void *)edesc + sizeof(*edesc) + desclen; |
326 | if (diff_size) |
327 | dma_to_sec4_sg_one(sec4_sg_ptr: edesc->sec4_sg, dma: ctx->padding_dma, len: diff_size, |
328 | offset: 0); |
329 | |
330 | if (sec4_sg_index) |
331 | sg_to_sec4_sg_last(sg: req_ctx->fixup_src, len: req_ctx->fixup_src_len, |
332 | sec4_sg_ptr: edesc->sec4_sg + !!diff_size, offset: 0); |
333 | |
334 | if (mapped_dst_nents > 1) |
335 | sg_to_sec4_sg_last(sg: req->dst, len: req->dst_len, |
336 | sec4_sg_ptr: edesc->sec4_sg + sec4_sg_index, offset: 0); |
337 | |
338 | /* Save nents for later use in Job Descriptor */ |
339 | edesc->src_nents = src_nents; |
340 | edesc->dst_nents = dst_nents; |
341 | |
342 | req_ctx->edesc = edesc; |
343 | |
344 | if (!sec4_sg_bytes) |
345 | return edesc; |
346 | |
347 | edesc->mapped_src_nents = mapped_src_nents; |
348 | edesc->mapped_dst_nents = mapped_dst_nents; |
349 | |
350 | edesc->sec4_sg_dma = dma_map_single(dev, edesc->sec4_sg, |
351 | sec4_sg_bytes, DMA_TO_DEVICE); |
352 | if (dma_mapping_error(dev, dma_addr: edesc->sec4_sg_dma)) { |
353 | dev_err(dev, "unable to map S/G table\n" ); |
354 | goto sec4_sg_fail; |
355 | } |
356 | |
357 | edesc->sec4_sg_bytes = sec4_sg_bytes; |
358 | |
359 | print_hex_dump_debug("caampkc sec4_sg@" __stringify(__LINE__) ": " , |
360 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg, |
361 | edesc->sec4_sg_bytes, 1); |
362 | |
363 | return edesc; |
364 | |
365 | sec4_sg_fail: |
366 | kfree(objp: edesc); |
367 | dst_fail: |
368 | dma_unmap_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE); |
369 | src_fail: |
370 | dma_unmap_sg(dev, req_ctx->fixup_src, src_nents, DMA_TO_DEVICE); |
371 | return ERR_PTR(error: -ENOMEM); |
372 | } |
373 | |
374 | static int akcipher_do_one_req(struct crypto_engine *engine, void *areq) |
375 | { |
376 | struct akcipher_request *req = container_of(areq, |
377 | struct akcipher_request, |
378 | base); |
379 | struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); |
380 | struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); |
381 | struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm); |
382 | struct device *jrdev = ctx->dev; |
383 | u32 *desc = req_ctx->edesc->hw_desc; |
384 | int ret; |
385 | |
386 | req_ctx->edesc->bklog = true; |
387 | |
388 | ret = caam_jr_enqueue(dev: jrdev, desc, cbk: req_ctx->akcipher_op_done, areq: req); |
389 | |
390 | if (ret == -ENOSPC && engine->retry_support) |
391 | return ret; |
392 | |
393 | if (ret != -EINPROGRESS) { |
394 | rsa_pub_unmap(dev: jrdev, edesc: req_ctx->edesc, req); |
395 | rsa_io_unmap(dev: jrdev, edesc: req_ctx->edesc, req); |
396 | kfree(objp: req_ctx->edesc); |
397 | } else { |
398 | ret = 0; |
399 | } |
400 | |
401 | return ret; |
402 | } |
403 | |
404 | static int set_rsa_pub_pdb(struct akcipher_request *req, |
405 | struct rsa_edesc *edesc) |
406 | { |
407 | struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); |
408 | struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); |
409 | struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm); |
410 | struct caam_rsa_key *key = &ctx->key; |
411 | struct device *dev = ctx->dev; |
412 | struct rsa_pub_pdb *pdb = &edesc->pdb.pub; |
413 | int sec4_sg_index = 0; |
414 | |
415 | pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE); |
416 | if (dma_mapping_error(dev, dma_addr: pdb->n_dma)) { |
417 | dev_err(dev, "Unable to map RSA modulus memory\n" ); |
418 | return -ENOMEM; |
419 | } |
420 | |
421 | pdb->e_dma = dma_map_single(dev, key->e, key->e_sz, DMA_TO_DEVICE); |
422 | if (dma_mapping_error(dev, dma_addr: pdb->e_dma)) { |
423 | dev_err(dev, "Unable to map RSA public exponent memory\n" ); |
424 | dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE); |
425 | return -ENOMEM; |
426 | } |
427 | |
428 | if (edesc->mapped_src_nents > 1) { |
429 | pdb->sgf |= RSA_PDB_SGF_F; |
430 | pdb->f_dma = edesc->sec4_sg_dma; |
431 | sec4_sg_index += edesc->mapped_src_nents; |
432 | } else { |
433 | pdb->f_dma = sg_dma_address(req_ctx->fixup_src); |
434 | } |
435 | |
436 | if (edesc->mapped_dst_nents > 1) { |
437 | pdb->sgf |= RSA_PDB_SGF_G; |
438 | pdb->g_dma = edesc->sec4_sg_dma + |
439 | sec4_sg_index * sizeof(struct sec4_sg_entry); |
440 | } else { |
441 | pdb->g_dma = sg_dma_address(req->dst); |
442 | } |
443 | |
444 | pdb->sgf |= (key->e_sz << RSA_PDB_E_SHIFT) | key->n_sz; |
445 | pdb->f_len = req_ctx->fixup_src_len; |
446 | |
447 | return 0; |
448 | } |
449 | |
450 | static int set_rsa_priv_f1_pdb(struct akcipher_request *req, |
451 | struct rsa_edesc *edesc) |
452 | { |
453 | struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); |
454 | struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm); |
455 | struct caam_rsa_key *key = &ctx->key; |
456 | struct device *dev = ctx->dev; |
457 | struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1; |
458 | int sec4_sg_index = 0; |
459 | |
460 | pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE); |
461 | if (dma_mapping_error(dev, dma_addr: pdb->n_dma)) { |
462 | dev_err(dev, "Unable to map modulus memory\n" ); |
463 | return -ENOMEM; |
464 | } |
465 | |
466 | pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE); |
467 | if (dma_mapping_error(dev, dma_addr: pdb->d_dma)) { |
468 | dev_err(dev, "Unable to map RSA private exponent memory\n" ); |
469 | dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE); |
470 | return -ENOMEM; |
471 | } |
472 | |
473 | if (edesc->mapped_src_nents > 1) { |
474 | pdb->sgf |= RSA_PRIV_PDB_SGF_G; |
475 | pdb->g_dma = edesc->sec4_sg_dma; |
476 | sec4_sg_index += edesc->mapped_src_nents; |
477 | |
478 | } else { |
479 | struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); |
480 | |
481 | pdb->g_dma = sg_dma_address(req_ctx->fixup_src); |
482 | } |
483 | |
484 | if (edesc->mapped_dst_nents > 1) { |
485 | pdb->sgf |= RSA_PRIV_PDB_SGF_F; |
486 | pdb->f_dma = edesc->sec4_sg_dma + |
487 | sec4_sg_index * sizeof(struct sec4_sg_entry); |
488 | } else { |
489 | pdb->f_dma = sg_dma_address(req->dst); |
490 | } |
491 | |
492 | pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz; |
493 | |
494 | return 0; |
495 | } |
496 | |
497 | static int set_rsa_priv_f2_pdb(struct akcipher_request *req, |
498 | struct rsa_edesc *edesc) |
499 | { |
500 | struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); |
501 | struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm); |
502 | struct caam_rsa_key *key = &ctx->key; |
503 | struct device *dev = ctx->dev; |
504 | struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2; |
505 | int sec4_sg_index = 0; |
506 | size_t p_sz = key->p_sz; |
507 | size_t q_sz = key->q_sz; |
508 | |
509 | pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE); |
510 | if (dma_mapping_error(dev, dma_addr: pdb->d_dma)) { |
511 | dev_err(dev, "Unable to map RSA private exponent memory\n" ); |
512 | return -ENOMEM; |
513 | } |
514 | |
515 | pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE); |
516 | if (dma_mapping_error(dev, dma_addr: pdb->p_dma)) { |
517 | dev_err(dev, "Unable to map RSA prime factor p memory\n" ); |
518 | goto unmap_d; |
519 | } |
520 | |
521 | pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE); |
522 | if (dma_mapping_error(dev, dma_addr: pdb->q_dma)) { |
523 | dev_err(dev, "Unable to map RSA prime factor q memory\n" ); |
524 | goto unmap_p; |
525 | } |
526 | |
527 | pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL); |
528 | if (dma_mapping_error(dev, dma_addr: pdb->tmp1_dma)) { |
529 | dev_err(dev, "Unable to map RSA tmp1 memory\n" ); |
530 | goto unmap_q; |
531 | } |
532 | |
533 | pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL); |
534 | if (dma_mapping_error(dev, dma_addr: pdb->tmp2_dma)) { |
535 | dev_err(dev, "Unable to map RSA tmp2 memory\n" ); |
536 | goto unmap_tmp1; |
537 | } |
538 | |
539 | if (edesc->mapped_src_nents > 1) { |
540 | pdb->sgf |= RSA_PRIV_PDB_SGF_G; |
541 | pdb->g_dma = edesc->sec4_sg_dma; |
542 | sec4_sg_index += edesc->mapped_src_nents; |
543 | } else { |
544 | struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); |
545 | |
546 | pdb->g_dma = sg_dma_address(req_ctx->fixup_src); |
547 | } |
548 | |
549 | if (edesc->mapped_dst_nents > 1) { |
550 | pdb->sgf |= RSA_PRIV_PDB_SGF_F; |
551 | pdb->f_dma = edesc->sec4_sg_dma + |
552 | sec4_sg_index * sizeof(struct sec4_sg_entry); |
553 | } else { |
554 | pdb->f_dma = sg_dma_address(req->dst); |
555 | } |
556 | |
557 | pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz; |
558 | pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz; |
559 | |
560 | return 0; |
561 | |
562 | unmap_tmp1: |
563 | dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL); |
564 | unmap_q: |
565 | dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE); |
566 | unmap_p: |
567 | dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE); |
568 | unmap_d: |
569 | dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE); |
570 | |
571 | return -ENOMEM; |
572 | } |
573 | |
574 | static int set_rsa_priv_f3_pdb(struct akcipher_request *req, |
575 | struct rsa_edesc *edesc) |
576 | { |
577 | struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); |
578 | struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm); |
579 | struct caam_rsa_key *key = &ctx->key; |
580 | struct device *dev = ctx->dev; |
581 | struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3; |
582 | int sec4_sg_index = 0; |
583 | size_t p_sz = key->p_sz; |
584 | size_t q_sz = key->q_sz; |
585 | |
586 | pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE); |
587 | if (dma_mapping_error(dev, dma_addr: pdb->p_dma)) { |
588 | dev_err(dev, "Unable to map RSA prime factor p memory\n" ); |
589 | return -ENOMEM; |
590 | } |
591 | |
592 | pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE); |
593 | if (dma_mapping_error(dev, dma_addr: pdb->q_dma)) { |
594 | dev_err(dev, "Unable to map RSA prime factor q memory\n" ); |
595 | goto unmap_p; |
596 | } |
597 | |
598 | pdb->dp_dma = dma_map_single(dev, key->dp, p_sz, DMA_TO_DEVICE); |
599 | if (dma_mapping_error(dev, dma_addr: pdb->dp_dma)) { |
600 | dev_err(dev, "Unable to map RSA exponent dp memory\n" ); |
601 | goto unmap_q; |
602 | } |
603 | |
604 | pdb->dq_dma = dma_map_single(dev, key->dq, q_sz, DMA_TO_DEVICE); |
605 | if (dma_mapping_error(dev, dma_addr: pdb->dq_dma)) { |
606 | dev_err(dev, "Unable to map RSA exponent dq memory\n" ); |
607 | goto unmap_dp; |
608 | } |
609 | |
610 | pdb->c_dma = dma_map_single(dev, key->qinv, p_sz, DMA_TO_DEVICE); |
611 | if (dma_mapping_error(dev, dma_addr: pdb->c_dma)) { |
612 | dev_err(dev, "Unable to map RSA CRT coefficient qinv memory\n" ); |
613 | goto unmap_dq; |
614 | } |
615 | |
616 | pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL); |
617 | if (dma_mapping_error(dev, dma_addr: pdb->tmp1_dma)) { |
618 | dev_err(dev, "Unable to map RSA tmp1 memory\n" ); |
619 | goto unmap_qinv; |
620 | } |
621 | |
622 | pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL); |
623 | if (dma_mapping_error(dev, dma_addr: pdb->tmp2_dma)) { |
624 | dev_err(dev, "Unable to map RSA tmp2 memory\n" ); |
625 | goto unmap_tmp1; |
626 | } |
627 | |
628 | if (edesc->mapped_src_nents > 1) { |
629 | pdb->sgf |= RSA_PRIV_PDB_SGF_G; |
630 | pdb->g_dma = edesc->sec4_sg_dma; |
631 | sec4_sg_index += edesc->mapped_src_nents; |
632 | } else { |
633 | struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); |
634 | |
635 | pdb->g_dma = sg_dma_address(req_ctx->fixup_src); |
636 | } |
637 | |
638 | if (edesc->mapped_dst_nents > 1) { |
639 | pdb->sgf |= RSA_PRIV_PDB_SGF_F; |
640 | pdb->f_dma = edesc->sec4_sg_dma + |
641 | sec4_sg_index * sizeof(struct sec4_sg_entry); |
642 | } else { |
643 | pdb->f_dma = sg_dma_address(req->dst); |
644 | } |
645 | |
646 | pdb->sgf |= key->n_sz; |
647 | pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz; |
648 | |
649 | return 0; |
650 | |
651 | unmap_tmp1: |
652 | dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL); |
653 | unmap_qinv: |
654 | dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE); |
655 | unmap_dq: |
656 | dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE); |
657 | unmap_dp: |
658 | dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE); |
659 | unmap_q: |
660 | dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE); |
661 | unmap_p: |
662 | dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE); |
663 | |
664 | return -ENOMEM; |
665 | } |
666 | |
667 | static int akcipher_enqueue_req(struct device *jrdev, |
668 | void (*cbk)(struct device *jrdev, u32 *desc, |
669 | u32 err, void *context), |
670 | struct akcipher_request *req) |
671 | { |
672 | struct caam_drv_private_jr *jrpriv = dev_get_drvdata(dev: jrdev); |
673 | struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); |
674 | struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm); |
675 | struct caam_rsa_key *key = &ctx->key; |
676 | struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); |
677 | struct rsa_edesc *edesc = req_ctx->edesc; |
678 | u32 *desc = edesc->hw_desc; |
679 | int ret; |
680 | |
681 | req_ctx->akcipher_op_done = cbk; |
682 | /* |
683 | * Only the backlog request are sent to crypto-engine since the others |
684 | * can be handled by CAAM, if free, especially since JR has up to 1024 |
685 | * entries (more than the 10 entries from crypto-engine). |
686 | */ |
687 | if (req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG) |
688 | ret = crypto_transfer_akcipher_request_to_engine(engine: jrpriv->engine, |
689 | req); |
690 | else |
691 | ret = caam_jr_enqueue(dev: jrdev, desc, cbk, areq: req); |
692 | |
693 | if ((ret != -EINPROGRESS) && (ret != -EBUSY)) { |
694 | switch (key->priv_form) { |
695 | case FORM1: |
696 | rsa_priv_f1_unmap(dev: jrdev, edesc, req); |
697 | break; |
698 | case FORM2: |
699 | rsa_priv_f2_unmap(dev: jrdev, edesc, req); |
700 | break; |
701 | case FORM3: |
702 | rsa_priv_f3_unmap(dev: jrdev, edesc, req); |
703 | break; |
704 | default: |
705 | rsa_pub_unmap(dev: jrdev, edesc, req); |
706 | } |
707 | rsa_io_unmap(dev: jrdev, edesc, req); |
708 | kfree(objp: edesc); |
709 | } |
710 | |
711 | return ret; |
712 | } |
713 | |
714 | static int caam_rsa_enc(struct akcipher_request *req) |
715 | { |
716 | struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); |
717 | struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm); |
718 | struct caam_rsa_key *key = &ctx->key; |
719 | struct device *jrdev = ctx->dev; |
720 | struct rsa_edesc *edesc; |
721 | int ret; |
722 | |
723 | if (unlikely(!key->n || !key->e)) |
724 | return -EINVAL; |
725 | |
726 | if (req->dst_len < key->n_sz) { |
727 | req->dst_len = key->n_sz; |
728 | dev_err(jrdev, "Output buffer length less than parameter n\n" ); |
729 | return -EOVERFLOW; |
730 | } |
731 | |
732 | /* Allocate extended descriptor */ |
733 | edesc = rsa_edesc_alloc(req, DESC_RSA_PUB_LEN); |
734 | if (IS_ERR(ptr: edesc)) |
735 | return PTR_ERR(ptr: edesc); |
736 | |
737 | /* Set RSA Encrypt Protocol Data Block */ |
738 | ret = set_rsa_pub_pdb(req, edesc); |
739 | if (ret) |
740 | goto init_fail; |
741 | |
742 | /* Initialize Job Descriptor */ |
743 | init_rsa_pub_desc(desc: edesc->hw_desc, pdb: &edesc->pdb.pub); |
744 | |
745 | return akcipher_enqueue_req(jrdev, cbk: rsa_pub_done, req); |
746 | |
747 | init_fail: |
748 | rsa_io_unmap(dev: jrdev, edesc, req); |
749 | kfree(objp: edesc); |
750 | return ret; |
751 | } |
752 | |
753 | static int caam_rsa_dec_priv_f1(struct akcipher_request *req) |
754 | { |
755 | struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); |
756 | struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm); |
757 | struct device *jrdev = ctx->dev; |
758 | struct rsa_edesc *edesc; |
759 | int ret; |
760 | |
761 | /* Allocate extended descriptor */ |
762 | edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F1_LEN); |
763 | if (IS_ERR(ptr: edesc)) |
764 | return PTR_ERR(ptr: edesc); |
765 | |
766 | /* Set RSA Decrypt Protocol Data Block - Private Key Form #1 */ |
767 | ret = set_rsa_priv_f1_pdb(req, edesc); |
768 | if (ret) |
769 | goto init_fail; |
770 | |
771 | /* Initialize Job Descriptor */ |
772 | init_rsa_priv_f1_desc(desc: edesc->hw_desc, pdb: &edesc->pdb.priv_f1); |
773 | |
774 | return akcipher_enqueue_req(jrdev, cbk: rsa_priv_f_done, req); |
775 | |
776 | init_fail: |
777 | rsa_io_unmap(dev: jrdev, edesc, req); |
778 | kfree(objp: edesc); |
779 | return ret; |
780 | } |
781 | |
782 | static int caam_rsa_dec_priv_f2(struct akcipher_request *req) |
783 | { |
784 | struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); |
785 | struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm); |
786 | struct device *jrdev = ctx->dev; |
787 | struct rsa_edesc *edesc; |
788 | int ret; |
789 | |
790 | /* Allocate extended descriptor */ |
791 | edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F2_LEN); |
792 | if (IS_ERR(ptr: edesc)) |
793 | return PTR_ERR(ptr: edesc); |
794 | |
795 | /* Set RSA Decrypt Protocol Data Block - Private Key Form #2 */ |
796 | ret = set_rsa_priv_f2_pdb(req, edesc); |
797 | if (ret) |
798 | goto init_fail; |
799 | |
800 | /* Initialize Job Descriptor */ |
801 | init_rsa_priv_f2_desc(desc: edesc->hw_desc, pdb: &edesc->pdb.priv_f2); |
802 | |
803 | return akcipher_enqueue_req(jrdev, cbk: rsa_priv_f_done, req); |
804 | |
805 | init_fail: |
806 | rsa_io_unmap(dev: jrdev, edesc, req); |
807 | kfree(objp: edesc); |
808 | return ret; |
809 | } |
810 | |
811 | static int caam_rsa_dec_priv_f3(struct akcipher_request *req) |
812 | { |
813 | struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); |
814 | struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm); |
815 | struct device *jrdev = ctx->dev; |
816 | struct rsa_edesc *edesc; |
817 | int ret; |
818 | |
819 | /* Allocate extended descriptor */ |
820 | edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F3_LEN); |
821 | if (IS_ERR(ptr: edesc)) |
822 | return PTR_ERR(ptr: edesc); |
823 | |
824 | /* Set RSA Decrypt Protocol Data Block - Private Key Form #3 */ |
825 | ret = set_rsa_priv_f3_pdb(req, edesc); |
826 | if (ret) |
827 | goto init_fail; |
828 | |
829 | /* Initialize Job Descriptor */ |
830 | init_rsa_priv_f3_desc(desc: edesc->hw_desc, pdb: &edesc->pdb.priv_f3); |
831 | |
832 | return akcipher_enqueue_req(jrdev, cbk: rsa_priv_f_done, req); |
833 | |
834 | init_fail: |
835 | rsa_io_unmap(dev: jrdev, edesc, req); |
836 | kfree(objp: edesc); |
837 | return ret; |
838 | } |
839 | |
840 | static int caam_rsa_dec(struct akcipher_request *req) |
841 | { |
842 | struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); |
843 | struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm); |
844 | struct caam_rsa_key *key = &ctx->key; |
845 | int ret; |
846 | |
847 | if (unlikely(!key->n || !key->d)) |
848 | return -EINVAL; |
849 | |
850 | if (req->dst_len < key->n_sz) { |
851 | req->dst_len = key->n_sz; |
852 | dev_err(ctx->dev, "Output buffer length less than parameter n\n" ); |
853 | return -EOVERFLOW; |
854 | } |
855 | |
856 | if (key->priv_form == FORM3) |
857 | ret = caam_rsa_dec_priv_f3(req); |
858 | else if (key->priv_form == FORM2) |
859 | ret = caam_rsa_dec_priv_f2(req); |
860 | else |
861 | ret = caam_rsa_dec_priv_f1(req); |
862 | |
863 | return ret; |
864 | } |
865 | |
866 | static void caam_rsa_free_key(struct caam_rsa_key *key) |
867 | { |
868 | kfree_sensitive(objp: key->d); |
869 | kfree_sensitive(objp: key->p); |
870 | kfree_sensitive(objp: key->q); |
871 | kfree_sensitive(objp: key->dp); |
872 | kfree_sensitive(objp: key->dq); |
873 | kfree_sensitive(objp: key->qinv); |
874 | kfree_sensitive(objp: key->tmp1); |
875 | kfree_sensitive(objp: key->tmp2); |
876 | kfree(objp: key->e); |
877 | kfree(objp: key->n); |
878 | memset(key, 0, sizeof(*key)); |
879 | } |
880 | |
881 | static void caam_rsa_drop_leading_zeros(const u8 **ptr, size_t *nbytes) |
882 | { |
883 | while (!**ptr && *nbytes) { |
884 | (*ptr)++; |
885 | (*nbytes)--; |
886 | } |
887 | } |
888 | |
889 | /** |
890 | * caam_read_rsa_crt - Used for reading dP, dQ, qInv CRT members. |
891 | * dP, dQ and qInv could decode to less than corresponding p, q length, as the |
892 | * BER-encoding requires that the minimum number of bytes be used to encode the |
893 | * integer. dP, dQ, qInv decoded values have to be zero-padded to appropriate |
894 | * length. |
895 | * |
896 | * @ptr : pointer to {dP, dQ, qInv} CRT member |
897 | * @nbytes: length in bytes of {dP, dQ, qInv} CRT member |
898 | * @dstlen: length in bytes of corresponding p or q prime factor |
899 | */ |
900 | static u8 *caam_read_rsa_crt(const u8 *ptr, size_t nbytes, size_t dstlen) |
901 | { |
902 | u8 *dst; |
903 | |
904 | caam_rsa_drop_leading_zeros(ptr: &ptr, nbytes: &nbytes); |
905 | if (!nbytes) |
906 | return NULL; |
907 | |
908 | dst = kzalloc(size: dstlen, GFP_KERNEL); |
909 | if (!dst) |
910 | return NULL; |
911 | |
912 | memcpy(dst + (dstlen - nbytes), ptr, nbytes); |
913 | |
914 | return dst; |
915 | } |
916 | |
917 | /** |
918 | * caam_read_raw_data - Read a raw byte stream as a positive integer. |
919 | * The function skips buffer's leading zeros, copies the remained data |
920 | * to a buffer allocated in the GFP_KERNEL zone and returns |
921 | * the address of the new buffer. |
922 | * |
923 | * @buf : The data to read |
924 | * @nbytes: The amount of data to read |
925 | */ |
926 | static inline u8 *caam_read_raw_data(const u8 *buf, size_t *nbytes) |
927 | { |
928 | |
929 | caam_rsa_drop_leading_zeros(ptr: &buf, nbytes); |
930 | if (!*nbytes) |
931 | return NULL; |
932 | |
933 | return kmemdup(p: buf, size: *nbytes, GFP_KERNEL); |
934 | } |
935 | |
936 | static int caam_rsa_check_key_length(unsigned int len) |
937 | { |
938 | if (len > 4096) |
939 | return -EINVAL; |
940 | return 0; |
941 | } |
942 | |
943 | static int caam_rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key, |
944 | unsigned int keylen) |
945 | { |
946 | struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm); |
947 | struct rsa_key raw_key = {NULL}; |
948 | struct caam_rsa_key *rsa_key = &ctx->key; |
949 | int ret; |
950 | |
951 | /* Free the old RSA key if any */ |
952 | caam_rsa_free_key(key: rsa_key); |
953 | |
954 | ret = rsa_parse_pub_key(rsa_key: &raw_key, key, key_len: keylen); |
955 | if (ret) |
956 | return ret; |
957 | |
958 | /* Copy key in DMA zone */ |
959 | rsa_key->e = kmemdup(p: raw_key.e, size: raw_key.e_sz, GFP_KERNEL); |
960 | if (!rsa_key->e) |
961 | goto err; |
962 | |
963 | /* |
964 | * Skip leading zeros and copy the positive integer to a buffer |
965 | * allocated in the GFP_KERNEL zone. The decryption descriptor |
966 | * expects a positive integer for the RSA modulus and uses its length as |
967 | * decryption output length. |
968 | */ |
969 | rsa_key->n = caam_read_raw_data(buf: raw_key.n, nbytes: &raw_key.n_sz); |
970 | if (!rsa_key->n) |
971 | goto err; |
972 | |
973 | if (caam_rsa_check_key_length(len: raw_key.n_sz << 3)) { |
974 | caam_rsa_free_key(key: rsa_key); |
975 | return -EINVAL; |
976 | } |
977 | |
978 | rsa_key->e_sz = raw_key.e_sz; |
979 | rsa_key->n_sz = raw_key.n_sz; |
980 | |
981 | return 0; |
982 | err: |
983 | caam_rsa_free_key(key: rsa_key); |
984 | return -ENOMEM; |
985 | } |
986 | |
987 | static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx, |
988 | struct rsa_key *raw_key) |
989 | { |
990 | struct caam_rsa_key *rsa_key = &ctx->key; |
991 | size_t p_sz = raw_key->p_sz; |
992 | size_t q_sz = raw_key->q_sz; |
993 | unsigned aligned_size; |
994 | |
995 | rsa_key->p = caam_read_raw_data(buf: raw_key->p, nbytes: &p_sz); |
996 | if (!rsa_key->p) |
997 | return; |
998 | rsa_key->p_sz = p_sz; |
999 | |
1000 | rsa_key->q = caam_read_raw_data(buf: raw_key->q, nbytes: &q_sz); |
1001 | if (!rsa_key->q) |
1002 | goto free_p; |
1003 | rsa_key->q_sz = q_sz; |
1004 | |
1005 | aligned_size = ALIGN(raw_key->p_sz, dma_get_cache_alignment()); |
1006 | rsa_key->tmp1 = kzalloc(size: aligned_size, GFP_KERNEL); |
1007 | if (!rsa_key->tmp1) |
1008 | goto free_q; |
1009 | |
1010 | aligned_size = ALIGN(raw_key->q_sz, dma_get_cache_alignment()); |
1011 | rsa_key->tmp2 = kzalloc(size: aligned_size, GFP_KERNEL); |
1012 | if (!rsa_key->tmp2) |
1013 | goto free_tmp1; |
1014 | |
1015 | rsa_key->priv_form = FORM2; |
1016 | |
1017 | rsa_key->dp = caam_read_rsa_crt(ptr: raw_key->dp, nbytes: raw_key->dp_sz, dstlen: p_sz); |
1018 | if (!rsa_key->dp) |
1019 | goto free_tmp2; |
1020 | |
1021 | rsa_key->dq = caam_read_rsa_crt(ptr: raw_key->dq, nbytes: raw_key->dq_sz, dstlen: q_sz); |
1022 | if (!rsa_key->dq) |
1023 | goto free_dp; |
1024 | |
1025 | rsa_key->qinv = caam_read_rsa_crt(ptr: raw_key->qinv, nbytes: raw_key->qinv_sz, |
1026 | dstlen: q_sz); |
1027 | if (!rsa_key->qinv) |
1028 | goto free_dq; |
1029 | |
1030 | rsa_key->priv_form = FORM3; |
1031 | |
1032 | return; |
1033 | |
1034 | free_dq: |
1035 | kfree_sensitive(objp: rsa_key->dq); |
1036 | free_dp: |
1037 | kfree_sensitive(objp: rsa_key->dp); |
1038 | free_tmp2: |
1039 | kfree_sensitive(objp: rsa_key->tmp2); |
1040 | free_tmp1: |
1041 | kfree_sensitive(objp: rsa_key->tmp1); |
1042 | free_q: |
1043 | kfree_sensitive(objp: rsa_key->q); |
1044 | free_p: |
1045 | kfree_sensitive(objp: rsa_key->p); |
1046 | } |
1047 | |
1048 | static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key, |
1049 | unsigned int keylen) |
1050 | { |
1051 | struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm); |
1052 | struct rsa_key raw_key = {NULL}; |
1053 | struct caam_rsa_key *rsa_key = &ctx->key; |
1054 | int ret; |
1055 | |
1056 | /* Free the old RSA key if any */ |
1057 | caam_rsa_free_key(key: rsa_key); |
1058 | |
1059 | ret = rsa_parse_priv_key(rsa_key: &raw_key, key, key_len: keylen); |
1060 | if (ret) |
1061 | return ret; |
1062 | |
1063 | /* Copy key in DMA zone */ |
1064 | rsa_key->d = kmemdup(p: raw_key.d, size: raw_key.d_sz, GFP_KERNEL); |
1065 | if (!rsa_key->d) |
1066 | goto err; |
1067 | |
1068 | rsa_key->e = kmemdup(p: raw_key.e, size: raw_key.e_sz, GFP_KERNEL); |
1069 | if (!rsa_key->e) |
1070 | goto err; |
1071 | |
1072 | /* |
1073 | * Skip leading zeros and copy the positive integer to a buffer |
1074 | * allocated in the GFP_KERNEL zone. The decryption descriptor |
1075 | * expects a positive integer for the RSA modulus and uses its length as |
1076 | * decryption output length. |
1077 | */ |
1078 | rsa_key->n = caam_read_raw_data(buf: raw_key.n, nbytes: &raw_key.n_sz); |
1079 | if (!rsa_key->n) |
1080 | goto err; |
1081 | |
1082 | if (caam_rsa_check_key_length(len: raw_key.n_sz << 3)) { |
1083 | caam_rsa_free_key(key: rsa_key); |
1084 | return -EINVAL; |
1085 | } |
1086 | |
1087 | rsa_key->d_sz = raw_key.d_sz; |
1088 | rsa_key->e_sz = raw_key.e_sz; |
1089 | rsa_key->n_sz = raw_key.n_sz; |
1090 | |
1091 | caam_rsa_set_priv_key_form(ctx, raw_key: &raw_key); |
1092 | |
1093 | return 0; |
1094 | |
1095 | err: |
1096 | caam_rsa_free_key(key: rsa_key); |
1097 | return -ENOMEM; |
1098 | } |
1099 | |
1100 | static unsigned int caam_rsa_max_size(struct crypto_akcipher *tfm) |
1101 | { |
1102 | struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm); |
1103 | |
1104 | return ctx->key.n_sz; |
1105 | } |
1106 | |
1107 | /* Per session pkc's driver context creation function */ |
1108 | static int caam_rsa_init_tfm(struct crypto_akcipher *tfm) |
1109 | { |
1110 | struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm); |
1111 | |
1112 | akcipher_set_reqsize(akcipher: tfm, reqsize: sizeof(struct caam_rsa_req_ctx)); |
1113 | |
1114 | ctx->dev = caam_jr_alloc(); |
1115 | |
1116 | if (IS_ERR(ptr: ctx->dev)) { |
1117 | pr_err("Job Ring Device allocation for transform failed\n" ); |
1118 | return PTR_ERR(ptr: ctx->dev); |
1119 | } |
1120 | |
1121 | ctx->padding_dma = dma_map_single(ctx->dev, zero_buffer, |
1122 | CAAM_RSA_MAX_INPUT_SIZE - 1, |
1123 | DMA_TO_DEVICE); |
1124 | if (dma_mapping_error(dev: ctx->dev, dma_addr: ctx->padding_dma)) { |
1125 | dev_err(ctx->dev, "unable to map padding\n" ); |
1126 | caam_jr_free(rdev: ctx->dev); |
1127 | return -ENOMEM; |
1128 | } |
1129 | |
1130 | return 0; |
1131 | } |
1132 | |
1133 | /* Per session pkc's driver context cleanup function */ |
1134 | static void caam_rsa_exit_tfm(struct crypto_akcipher *tfm) |
1135 | { |
1136 | struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm); |
1137 | struct caam_rsa_key *key = &ctx->key; |
1138 | |
1139 | dma_unmap_single(ctx->dev, ctx->padding_dma, CAAM_RSA_MAX_INPUT_SIZE - |
1140 | 1, DMA_TO_DEVICE); |
1141 | caam_rsa_free_key(key); |
1142 | caam_jr_free(rdev: ctx->dev); |
1143 | } |
1144 | |
1145 | static struct caam_akcipher_alg caam_rsa = { |
1146 | .akcipher.base = { |
1147 | .encrypt = caam_rsa_enc, |
1148 | .decrypt = caam_rsa_dec, |
1149 | .set_pub_key = caam_rsa_set_pub_key, |
1150 | .set_priv_key = caam_rsa_set_priv_key, |
1151 | .max_size = caam_rsa_max_size, |
1152 | .init = caam_rsa_init_tfm, |
1153 | .exit = caam_rsa_exit_tfm, |
1154 | .base = { |
1155 | .cra_name = "rsa" , |
1156 | .cra_driver_name = "rsa-caam" , |
1157 | .cra_priority = 3000, |
1158 | .cra_module = THIS_MODULE, |
1159 | .cra_ctxsize = sizeof(struct caam_rsa_ctx) + |
1160 | CRYPTO_DMA_PADDING, |
1161 | }, |
1162 | }, |
1163 | .akcipher.op = { |
1164 | .do_one_request = akcipher_do_one_req, |
1165 | }, |
1166 | }; |
1167 | |
1168 | /* Public Key Cryptography module initialization handler */ |
1169 | int caam_pkc_init(struct device *ctrldev) |
1170 | { |
1171 | struct caam_drv_private *priv = dev_get_drvdata(dev: ctrldev); |
1172 | u32 pk_inst, pkha; |
1173 | int err; |
1174 | init_done = false; |
1175 | |
1176 | /* Determine public key hardware accelerator presence. */ |
1177 | if (priv->era < 10) { |
1178 | pk_inst = (rd_reg32(reg: &priv->jr[0]->perfmon.cha_num_ls) & |
1179 | CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT; |
1180 | } else { |
1181 | pkha = rd_reg32(reg: &priv->jr[0]->vreg.pkha); |
1182 | pk_inst = pkha & CHA_VER_NUM_MASK; |
1183 | |
1184 | /* |
1185 | * Newer CAAMs support partially disabled functionality. If this is the |
1186 | * case, the number is non-zero, but this bit is set to indicate that |
1187 | * no encryption or decryption is supported. Only signing and verifying |
1188 | * is supported. |
1189 | */ |
1190 | if (pkha & CHA_VER_MISC_PKHA_NO_CRYPT) |
1191 | pk_inst = 0; |
1192 | } |
1193 | |
1194 | /* Do not register algorithms if PKHA is not present. */ |
1195 | if (!pk_inst) |
1196 | return 0; |
1197 | |
1198 | /* allocate zero buffer, used for padding input */ |
1199 | zero_buffer = kzalloc(CAAM_RSA_MAX_INPUT_SIZE - 1, GFP_KERNEL); |
1200 | if (!zero_buffer) |
1201 | return -ENOMEM; |
1202 | |
1203 | err = crypto_engine_register_akcipher(alg: &caam_rsa.akcipher); |
1204 | |
1205 | if (err) { |
1206 | kfree(objp: zero_buffer); |
1207 | dev_warn(ctrldev, "%s alg registration failed\n" , |
1208 | caam_rsa.akcipher.base.base.cra_driver_name); |
1209 | } else { |
1210 | init_done = true; |
1211 | caam_rsa.registered = true; |
1212 | dev_info(ctrldev, "caam pkc algorithms registered in /proc/crypto\n" ); |
1213 | } |
1214 | |
1215 | return err; |
1216 | } |
1217 | |
1218 | void caam_pkc_exit(void) |
1219 | { |
1220 | if (!init_done) |
1221 | return; |
1222 | |
1223 | if (caam_rsa.registered) |
1224 | crypto_engine_unregister_akcipher(alg: &caam_rsa.akcipher); |
1225 | |
1226 | kfree(objp: zero_buffer); |
1227 | } |
1228 | |