1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved. |
4 | */ |
5 | |
6 | #include <linux/device.h> |
7 | #include <linux/dma-mapping.h> |
8 | #include <linux/interrupt.h> |
9 | #include <crypto/internal/hash.h> |
10 | |
11 | #include "common.h" |
12 | #include "core.h" |
13 | #include "sha.h" |
14 | |
15 | struct qce_sha_saved_state { |
16 | u8 pending_buf[QCE_SHA_MAX_BLOCKSIZE]; |
17 | u8 partial_digest[QCE_SHA_MAX_DIGESTSIZE]; |
18 | __be32 byte_count[2]; |
19 | unsigned int pending_buflen; |
20 | unsigned int flags; |
21 | u64 count; |
22 | bool first_blk; |
23 | }; |
24 | |
25 | static LIST_HEAD(ahash_algs); |
26 | |
27 | static const u32 std_iv_sha1[SHA256_DIGEST_SIZE / sizeof(u32)] = { |
28 | SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4, 0, 0, 0 |
29 | }; |
30 | |
31 | static const u32 std_iv_sha256[SHA256_DIGEST_SIZE / sizeof(u32)] = { |
32 | SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, |
33 | SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7 |
34 | }; |
35 | |
36 | static void qce_ahash_done(void *data) |
37 | { |
38 | struct crypto_async_request *async_req = data; |
39 | struct ahash_request *req = ahash_request_cast(req: async_req); |
40 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); |
41 | struct qce_sha_reqctx *rctx = ahash_request_ctx_dma(req); |
42 | struct qce_alg_template *tmpl = to_ahash_tmpl(tfm: async_req->tfm); |
43 | struct qce_device *qce = tmpl->qce; |
44 | struct qce_result_dump *result = qce->dma.result_buf; |
45 | unsigned int digestsize = crypto_ahash_digestsize(tfm: ahash); |
46 | int error; |
47 | u32 status; |
48 | |
49 | error = qce_dma_terminate_all(dma: &qce->dma); |
50 | if (error) |
51 | dev_dbg(qce->dev, "ahash dma termination error (%d)\n" , error); |
52 | |
53 | dma_unmap_sg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE); |
54 | dma_unmap_sg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE); |
55 | |
56 | memcpy(rctx->digest, result->auth_iv, digestsize); |
57 | if (req->result && rctx->last_blk) |
58 | memcpy(req->result, result->auth_iv, digestsize); |
59 | |
60 | rctx->byte_count[0] = cpu_to_be32(result->auth_byte_count[0]); |
61 | rctx->byte_count[1] = cpu_to_be32(result->auth_byte_count[1]); |
62 | |
63 | error = qce_check_status(qce, status: &status); |
64 | if (error < 0) |
65 | dev_dbg(qce->dev, "ahash operation error (%x)\n" , status); |
66 | |
67 | req->src = rctx->src_orig; |
68 | req->nbytes = rctx->nbytes_orig; |
69 | rctx->last_blk = false; |
70 | rctx->first_blk = false; |
71 | |
72 | qce->async_req_done(tmpl->qce, error); |
73 | } |
74 | |
75 | static int qce_ahash_async_req_handle(struct crypto_async_request *async_req) |
76 | { |
77 | struct ahash_request *req = ahash_request_cast(req: async_req); |
78 | struct qce_sha_reqctx *rctx = ahash_request_ctx_dma(req); |
79 | struct qce_sha_ctx *ctx = crypto_tfm_ctx(tfm: async_req->tfm); |
80 | struct qce_alg_template *tmpl = to_ahash_tmpl(tfm: async_req->tfm); |
81 | struct qce_device *qce = tmpl->qce; |
82 | unsigned long flags = rctx->flags; |
83 | int ret; |
84 | |
85 | if (IS_SHA_HMAC(flags)) { |
86 | rctx->authkey = ctx->authkey; |
87 | rctx->authklen = QCE_SHA_HMAC_KEY_SIZE; |
88 | } else if (IS_CMAC(flags)) { |
89 | rctx->authkey = ctx->authkey; |
90 | rctx->authklen = AES_KEYSIZE_128; |
91 | } |
92 | |
93 | rctx->src_nents = sg_nents_for_len(sg: req->src, len: req->nbytes); |
94 | if (rctx->src_nents < 0) { |
95 | dev_err(qce->dev, "Invalid numbers of src SG.\n" ); |
96 | return rctx->src_nents; |
97 | } |
98 | |
99 | ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE); |
100 | if (!ret) |
101 | return -EIO; |
102 | |
103 | sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ); |
104 | |
105 | ret = dma_map_sg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE); |
106 | if (!ret) { |
107 | ret = -EIO; |
108 | goto error_unmap_src; |
109 | } |
110 | |
111 | ret = qce_dma_prep_sgs(dma: &qce->dma, sg_in: req->src, in_ents: rctx->src_nents, |
112 | sg_out: &rctx->result_sg, out_ents: 1, cb: qce_ahash_done, cb_param: async_req); |
113 | if (ret) |
114 | goto error_unmap_dst; |
115 | |
116 | qce_dma_issue_pending(dma: &qce->dma); |
117 | |
118 | ret = qce_start(async_req, type: tmpl->crypto_alg_type); |
119 | if (ret) |
120 | goto error_terminate; |
121 | |
122 | return 0; |
123 | |
124 | error_terminate: |
125 | qce_dma_terminate_all(dma: &qce->dma); |
126 | error_unmap_dst: |
127 | dma_unmap_sg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE); |
128 | error_unmap_src: |
129 | dma_unmap_sg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE); |
130 | return ret; |
131 | } |
132 | |
133 | static int qce_ahash_init(struct ahash_request *req) |
134 | { |
135 | struct qce_sha_reqctx *rctx = ahash_request_ctx_dma(req); |
136 | struct qce_alg_template *tmpl = to_ahash_tmpl(tfm: req->base.tfm); |
137 | const u32 *std_iv = tmpl->std_iv; |
138 | |
139 | memset(rctx, 0, sizeof(*rctx)); |
140 | rctx->first_blk = true; |
141 | rctx->last_blk = false; |
142 | rctx->flags = tmpl->alg_flags; |
143 | memcpy(rctx->digest, std_iv, sizeof(rctx->digest)); |
144 | |
145 | return 0; |
146 | } |
147 | |
148 | static int qce_ahash_export(struct ahash_request *req, void *out) |
149 | { |
150 | struct qce_sha_reqctx *rctx = ahash_request_ctx_dma(req); |
151 | struct qce_sha_saved_state *export_state = out; |
152 | |
153 | memcpy(export_state->pending_buf, rctx->buf, rctx->buflen); |
154 | memcpy(export_state->partial_digest, rctx->digest, sizeof(rctx->digest)); |
155 | export_state->byte_count[0] = rctx->byte_count[0]; |
156 | export_state->byte_count[1] = rctx->byte_count[1]; |
157 | export_state->pending_buflen = rctx->buflen; |
158 | export_state->count = rctx->count; |
159 | export_state->first_blk = rctx->first_blk; |
160 | export_state->flags = rctx->flags; |
161 | |
162 | return 0; |
163 | } |
164 | |
165 | static int qce_ahash_import(struct ahash_request *req, const void *in) |
166 | { |
167 | struct qce_sha_reqctx *rctx = ahash_request_ctx_dma(req); |
168 | const struct qce_sha_saved_state *import_state = in; |
169 | |
170 | memset(rctx, 0, sizeof(*rctx)); |
171 | rctx->count = import_state->count; |
172 | rctx->buflen = import_state->pending_buflen; |
173 | rctx->first_blk = import_state->first_blk; |
174 | rctx->flags = import_state->flags; |
175 | rctx->byte_count[0] = import_state->byte_count[0]; |
176 | rctx->byte_count[1] = import_state->byte_count[1]; |
177 | memcpy(rctx->buf, import_state->pending_buf, rctx->buflen); |
178 | memcpy(rctx->digest, import_state->partial_digest, sizeof(rctx->digest)); |
179 | |
180 | return 0; |
181 | } |
182 | |
183 | static int qce_ahash_update(struct ahash_request *req) |
184 | { |
185 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
186 | struct qce_sha_reqctx *rctx = ahash_request_ctx_dma(req); |
187 | struct qce_alg_template *tmpl = to_ahash_tmpl(tfm: req->base.tfm); |
188 | struct qce_device *qce = tmpl->qce; |
189 | struct scatterlist *sg_last, *sg; |
190 | unsigned int total, len; |
191 | unsigned int hash_later; |
192 | unsigned int nbytes; |
193 | unsigned int blocksize; |
194 | |
195 | blocksize = crypto_tfm_alg_blocksize(tfm: crypto_ahash_tfm(tfm)); |
196 | rctx->count += req->nbytes; |
197 | |
198 | /* check for buffer from previous updates and append it */ |
199 | total = req->nbytes + rctx->buflen; |
200 | |
201 | if (total <= blocksize) { |
202 | scatterwalk_map_and_copy(buf: rctx->buf + rctx->buflen, sg: req->src, |
203 | start: 0, nbytes: req->nbytes, out: 0); |
204 | rctx->buflen += req->nbytes; |
205 | return 0; |
206 | } |
207 | |
208 | /* save the original req structure fields */ |
209 | rctx->src_orig = req->src; |
210 | rctx->nbytes_orig = req->nbytes; |
211 | |
212 | /* |
213 | * if we have data from previous update copy them on buffer. The old |
214 | * data will be combined with current request bytes. |
215 | */ |
216 | if (rctx->buflen) |
217 | memcpy(rctx->tmpbuf, rctx->buf, rctx->buflen); |
218 | |
219 | /* calculate how many bytes will be hashed later */ |
220 | hash_later = total % blocksize; |
221 | |
222 | /* |
223 | * At this point, there is more than one block size of data. If |
224 | * the available data to transfer is exactly a multiple of block |
225 | * size, save the last block to be transferred in qce_ahash_final |
226 | * (with the last block bit set) if this is indeed the end of data |
227 | * stream. If not this saved block will be transferred as part of |
228 | * next update. If this block is not held back and if this is |
229 | * indeed the end of data stream, the digest obtained will be wrong |
230 | * since qce_ahash_final will see that rctx->buflen is 0 and return |
231 | * doing nothing which in turn means that a digest will not be |
232 | * copied to the destination result buffer. qce_ahash_final cannot |
233 | * be made to alter this behavior and allowed to proceed if |
234 | * rctx->buflen is 0 because the crypto engine BAM does not allow |
235 | * for zero length transfers. |
236 | */ |
237 | if (!hash_later) |
238 | hash_later = blocksize; |
239 | |
240 | if (hash_later) { |
241 | unsigned int src_offset = req->nbytes - hash_later; |
242 | scatterwalk_map_and_copy(buf: rctx->buf, sg: req->src, start: src_offset, |
243 | nbytes: hash_later, out: 0); |
244 | } |
245 | |
246 | /* here nbytes is multiple of blocksize */ |
247 | nbytes = total - hash_later; |
248 | |
249 | len = rctx->buflen; |
250 | sg = sg_last = req->src; |
251 | |
252 | while (len < nbytes && sg) { |
253 | if (len + sg_dma_len(sg) > nbytes) |
254 | break; |
255 | len += sg_dma_len(sg); |
256 | sg_last = sg; |
257 | sg = sg_next(sg); |
258 | } |
259 | |
260 | if (!sg_last) |
261 | return -EINVAL; |
262 | |
263 | if (rctx->buflen) { |
264 | sg_init_table(rctx->sg, 2); |
265 | sg_set_buf(sg: rctx->sg, buf: rctx->tmpbuf, buflen: rctx->buflen); |
266 | sg_chain(prv: rctx->sg, prv_nents: 2, sgl: req->src); |
267 | req->src = rctx->sg; |
268 | } |
269 | |
270 | req->nbytes = nbytes; |
271 | rctx->buflen = hash_later; |
272 | |
273 | return qce->async_req_enqueue(tmpl->qce, &req->base); |
274 | } |
275 | |
276 | static int qce_ahash_final(struct ahash_request *req) |
277 | { |
278 | struct qce_sha_reqctx *rctx = ahash_request_ctx_dma(req); |
279 | struct qce_alg_template *tmpl = to_ahash_tmpl(tfm: req->base.tfm); |
280 | struct qce_device *qce = tmpl->qce; |
281 | |
282 | if (!rctx->buflen) { |
283 | if (tmpl->hash_zero) |
284 | memcpy(req->result, tmpl->hash_zero, |
285 | tmpl->alg.ahash.halg.digestsize); |
286 | return 0; |
287 | } |
288 | |
289 | rctx->last_blk = true; |
290 | |
291 | rctx->src_orig = req->src; |
292 | rctx->nbytes_orig = req->nbytes; |
293 | |
294 | memcpy(rctx->tmpbuf, rctx->buf, rctx->buflen); |
295 | sg_init_one(rctx->sg, rctx->tmpbuf, rctx->buflen); |
296 | |
297 | req->src = rctx->sg; |
298 | req->nbytes = rctx->buflen; |
299 | |
300 | return qce->async_req_enqueue(tmpl->qce, &req->base); |
301 | } |
302 | |
303 | static int qce_ahash_digest(struct ahash_request *req) |
304 | { |
305 | struct qce_sha_reqctx *rctx = ahash_request_ctx_dma(req); |
306 | struct qce_alg_template *tmpl = to_ahash_tmpl(tfm: req->base.tfm); |
307 | struct qce_device *qce = tmpl->qce; |
308 | int ret; |
309 | |
310 | ret = qce_ahash_init(req); |
311 | if (ret) |
312 | return ret; |
313 | |
314 | rctx->src_orig = req->src; |
315 | rctx->nbytes_orig = req->nbytes; |
316 | rctx->first_blk = true; |
317 | rctx->last_blk = true; |
318 | |
319 | if (!rctx->nbytes_orig) { |
320 | if (tmpl->hash_zero) |
321 | memcpy(req->result, tmpl->hash_zero, |
322 | tmpl->alg.ahash.halg.digestsize); |
323 | return 0; |
324 | } |
325 | |
326 | return qce->async_req_enqueue(tmpl->qce, &req->base); |
327 | } |
328 | |
329 | static int qce_ahash_hmac_setkey(struct crypto_ahash *tfm, const u8 *key, |
330 | unsigned int keylen) |
331 | { |
332 | unsigned int digestsize = crypto_ahash_digestsize(tfm); |
333 | struct qce_sha_ctx *ctx = crypto_tfm_ctx(tfm: &tfm->base); |
334 | struct crypto_wait wait; |
335 | struct ahash_request *req; |
336 | struct scatterlist sg; |
337 | unsigned int blocksize; |
338 | struct crypto_ahash *ahash_tfm; |
339 | u8 *buf; |
340 | int ret; |
341 | const char *alg_name; |
342 | |
343 | blocksize = crypto_tfm_alg_blocksize(tfm: crypto_ahash_tfm(tfm)); |
344 | memset(ctx->authkey, 0, sizeof(ctx->authkey)); |
345 | |
346 | if (keylen <= blocksize) { |
347 | memcpy(ctx->authkey, key, keylen); |
348 | return 0; |
349 | } |
350 | |
351 | if (digestsize == SHA1_DIGEST_SIZE) |
352 | alg_name = "sha1-qce" ; |
353 | else if (digestsize == SHA256_DIGEST_SIZE) |
354 | alg_name = "sha256-qce" ; |
355 | else |
356 | return -EINVAL; |
357 | |
358 | ahash_tfm = crypto_alloc_ahash(alg_name, type: 0, mask: 0); |
359 | if (IS_ERR(ptr: ahash_tfm)) |
360 | return PTR_ERR(ptr: ahash_tfm); |
361 | |
362 | req = ahash_request_alloc(tfm: ahash_tfm, GFP_KERNEL); |
363 | if (!req) { |
364 | ret = -ENOMEM; |
365 | goto err_free_ahash; |
366 | } |
367 | |
368 | crypto_init_wait(wait: &wait); |
369 | ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, |
370 | compl: crypto_req_done, data: &wait); |
371 | crypto_ahash_clear_flags(tfm: ahash_tfm, flags: ~0); |
372 | |
373 | buf = kzalloc(size: keylen + QCE_MAX_ALIGN_SIZE, GFP_KERNEL); |
374 | if (!buf) { |
375 | ret = -ENOMEM; |
376 | goto err_free_req; |
377 | } |
378 | |
379 | memcpy(buf, key, keylen); |
380 | sg_init_one(&sg, buf, keylen); |
381 | ahash_request_set_crypt(req, src: &sg, result: ctx->authkey, nbytes: keylen); |
382 | |
383 | ret = crypto_wait_req(err: crypto_ahash_digest(req), wait: &wait); |
384 | |
385 | kfree(objp: buf); |
386 | err_free_req: |
387 | ahash_request_free(req); |
388 | err_free_ahash: |
389 | crypto_free_ahash(tfm: ahash_tfm); |
390 | return ret; |
391 | } |
392 | |
393 | static int qce_ahash_cra_init(struct crypto_tfm *tfm) |
394 | { |
395 | struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); |
396 | struct qce_sha_ctx *ctx = crypto_tfm_ctx(tfm); |
397 | |
398 | crypto_ahash_set_reqsize_dma(ahash, reqsize: sizeof(struct qce_sha_reqctx)); |
399 | memset(ctx, 0, sizeof(*ctx)); |
400 | return 0; |
401 | } |
402 | |
403 | struct qce_ahash_def { |
404 | unsigned long flags; |
405 | const char *name; |
406 | const char *drv_name; |
407 | unsigned int digestsize; |
408 | unsigned int blocksize; |
409 | unsigned int statesize; |
410 | const u32 *std_iv; |
411 | }; |
412 | |
413 | static const struct qce_ahash_def ahash_def[] = { |
414 | { |
415 | .flags = QCE_HASH_SHA1, |
416 | .name = "sha1" , |
417 | .drv_name = "sha1-qce" , |
418 | .digestsize = SHA1_DIGEST_SIZE, |
419 | .blocksize = SHA1_BLOCK_SIZE, |
420 | .statesize = sizeof(struct qce_sha_saved_state), |
421 | .std_iv = std_iv_sha1, |
422 | }, |
423 | { |
424 | .flags = QCE_HASH_SHA256, |
425 | .name = "sha256" , |
426 | .drv_name = "sha256-qce" , |
427 | .digestsize = SHA256_DIGEST_SIZE, |
428 | .blocksize = SHA256_BLOCK_SIZE, |
429 | .statesize = sizeof(struct qce_sha_saved_state), |
430 | .std_iv = std_iv_sha256, |
431 | }, |
432 | { |
433 | .flags = QCE_HASH_SHA1_HMAC, |
434 | .name = "hmac(sha1)" , |
435 | .drv_name = "hmac-sha1-qce" , |
436 | .digestsize = SHA1_DIGEST_SIZE, |
437 | .blocksize = SHA1_BLOCK_SIZE, |
438 | .statesize = sizeof(struct qce_sha_saved_state), |
439 | .std_iv = std_iv_sha1, |
440 | }, |
441 | { |
442 | .flags = QCE_HASH_SHA256_HMAC, |
443 | .name = "hmac(sha256)" , |
444 | .drv_name = "hmac-sha256-qce" , |
445 | .digestsize = SHA256_DIGEST_SIZE, |
446 | .blocksize = SHA256_BLOCK_SIZE, |
447 | .statesize = sizeof(struct qce_sha_saved_state), |
448 | .std_iv = std_iv_sha256, |
449 | }, |
450 | }; |
451 | |
452 | static int qce_ahash_register_one(const struct qce_ahash_def *def, |
453 | struct qce_device *qce) |
454 | { |
455 | struct qce_alg_template *tmpl; |
456 | struct ahash_alg *alg; |
457 | struct crypto_alg *base; |
458 | int ret; |
459 | |
460 | tmpl = kzalloc(size: sizeof(*tmpl), GFP_KERNEL); |
461 | if (!tmpl) |
462 | return -ENOMEM; |
463 | |
464 | tmpl->std_iv = def->std_iv; |
465 | |
466 | alg = &tmpl->alg.ahash; |
467 | alg->init = qce_ahash_init; |
468 | alg->update = qce_ahash_update; |
469 | alg->final = qce_ahash_final; |
470 | alg->digest = qce_ahash_digest; |
471 | alg->export = qce_ahash_export; |
472 | alg->import = qce_ahash_import; |
473 | if (IS_SHA_HMAC(def->flags)) |
474 | alg->setkey = qce_ahash_hmac_setkey; |
475 | alg->halg.digestsize = def->digestsize; |
476 | alg->halg.statesize = def->statesize; |
477 | |
478 | if (IS_SHA1(def->flags)) |
479 | tmpl->hash_zero = sha1_zero_message_hash; |
480 | else if (IS_SHA256(def->flags)) |
481 | tmpl->hash_zero = sha256_zero_message_hash; |
482 | |
483 | base = &alg->halg.base; |
484 | base->cra_blocksize = def->blocksize; |
485 | base->cra_priority = 300; |
486 | base->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY; |
487 | base->cra_ctxsize = sizeof(struct qce_sha_ctx); |
488 | base->cra_alignmask = 0; |
489 | base->cra_module = THIS_MODULE; |
490 | base->cra_init = qce_ahash_cra_init; |
491 | |
492 | snprintf(buf: base->cra_name, CRYPTO_MAX_ALG_NAME, fmt: "%s" , def->name); |
493 | snprintf(buf: base->cra_driver_name, CRYPTO_MAX_ALG_NAME, fmt: "%s" , |
494 | def->drv_name); |
495 | |
496 | INIT_LIST_HEAD(list: &tmpl->entry); |
497 | tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_AHASH; |
498 | tmpl->alg_flags = def->flags; |
499 | tmpl->qce = qce; |
500 | |
501 | ret = crypto_register_ahash(alg); |
502 | if (ret) { |
503 | dev_err(qce->dev, "%s registration failed\n" , base->cra_name); |
504 | kfree(objp: tmpl); |
505 | return ret; |
506 | } |
507 | |
508 | list_add_tail(new: &tmpl->entry, head: &ahash_algs); |
509 | dev_dbg(qce->dev, "%s is registered\n" , base->cra_name); |
510 | return 0; |
511 | } |
512 | |
513 | static void qce_ahash_unregister(struct qce_device *qce) |
514 | { |
515 | struct qce_alg_template *tmpl, *n; |
516 | |
517 | list_for_each_entry_safe(tmpl, n, &ahash_algs, entry) { |
518 | crypto_unregister_ahash(alg: &tmpl->alg.ahash); |
519 | list_del(entry: &tmpl->entry); |
520 | kfree(objp: tmpl); |
521 | } |
522 | } |
523 | |
524 | static int qce_ahash_register(struct qce_device *qce) |
525 | { |
526 | int ret, i; |
527 | |
528 | for (i = 0; i < ARRAY_SIZE(ahash_def); i++) { |
529 | ret = qce_ahash_register_one(def: &ahash_def[i], qce); |
530 | if (ret) |
531 | goto err; |
532 | } |
533 | |
534 | return 0; |
535 | err: |
536 | qce_ahash_unregister(qce); |
537 | return ret; |
538 | } |
539 | |
540 | const struct qce_algo_ops ahash_ops = { |
541 | .type = CRYPTO_ALG_TYPE_AHASH, |
542 | .register_algs = qce_ahash_register, |
543 | .unregister_algs = qce_ahash_unregister, |
544 | .async_req_handle = qce_ahash_async_req_handle, |
545 | }; |
546 | |