1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * Handle async block request by crypto hardware engine. |
4 | * |
5 | * Copyright (C) 2016 Linaro, Inc. |
6 | * |
7 | * Author: Baolin Wang <baolin.wang@linaro.org> |
8 | */ |
9 | |
10 | #include <crypto/internal/aead.h> |
11 | #include <crypto/internal/akcipher.h> |
12 | #include <crypto/internal/engine.h> |
13 | #include <crypto/internal/hash.h> |
14 | #include <crypto/internal/kpp.h> |
15 | #include <crypto/internal/skcipher.h> |
16 | #include <linux/err.h> |
17 | #include <linux/delay.h> |
18 | #include <linux/device.h> |
19 | #include <linux/kernel.h> |
20 | #include <linux/module.h> |
21 | #include <uapi/linux/sched/types.h> |
22 | #include "internal.h" |
23 | |
24 | #define CRYPTO_ENGINE_MAX_QLEN 10 |
25 | |
26 | /* Temporary algorithm flag used to indicate an updated driver. */ |
27 | #define CRYPTO_ALG_ENGINE 0x200 |
28 | |
29 | struct crypto_engine_alg { |
30 | struct crypto_alg base; |
31 | struct crypto_engine_op op; |
32 | }; |
33 | |
34 | /** |
35 | * crypto_finalize_request - finalize one request if the request is done |
36 | * @engine: the hardware engine |
37 | * @req: the request need to be finalized |
38 | * @err: error number |
39 | */ |
40 | static void crypto_finalize_request(struct crypto_engine *engine, |
41 | struct crypto_async_request *req, int err) |
42 | { |
43 | unsigned long flags; |
44 | |
45 | /* |
46 | * If hardware cannot enqueue more requests |
47 | * and retry mechanism is not supported |
48 | * make sure we are completing the current request |
49 | */ |
50 | if (!engine->retry_support) { |
51 | spin_lock_irqsave(&engine->queue_lock, flags); |
52 | if (engine->cur_req == req) { |
53 | engine->cur_req = NULL; |
54 | } |
55 | spin_unlock_irqrestore(lock: &engine->queue_lock, flags); |
56 | } |
57 | |
58 | lockdep_assert_in_softirq(); |
59 | crypto_request_complete(req, err); |
60 | |
61 | kthread_queue_work(worker: engine->kworker, work: &engine->pump_requests); |
62 | } |
63 | |
64 | /** |
65 | * crypto_pump_requests - dequeue one request from engine queue to process |
66 | * @engine: the hardware engine |
67 | * @in_kthread: true if we are in the context of the request pump thread |
68 | * |
69 | * This function checks if there is any request in the engine queue that |
70 | * needs processing and if so call out to the driver to initialize hardware |
71 | * and handle each request. |
72 | */ |
73 | static void crypto_pump_requests(struct crypto_engine *engine, |
74 | bool in_kthread) |
75 | { |
76 | struct crypto_async_request *async_req, *backlog; |
77 | struct crypto_engine_alg *alg; |
78 | struct crypto_engine_op *op; |
79 | unsigned long flags; |
80 | bool was_busy = false; |
81 | int ret; |
82 | |
83 | spin_lock_irqsave(&engine->queue_lock, flags); |
84 | |
85 | /* Make sure we are not already running a request */ |
86 | if (!engine->retry_support && engine->cur_req) |
87 | goto out; |
88 | |
89 | /* If another context is idling then defer */ |
90 | if (engine->idling) { |
91 | kthread_queue_work(worker: engine->kworker, work: &engine->pump_requests); |
92 | goto out; |
93 | } |
94 | |
95 | /* Check if the engine queue is idle */ |
96 | if (!crypto_queue_len(queue: &engine->queue) || !engine->running) { |
97 | if (!engine->busy) |
98 | goto out; |
99 | |
100 | /* Only do teardown in the thread */ |
101 | if (!in_kthread) { |
102 | kthread_queue_work(worker: engine->kworker, |
103 | work: &engine->pump_requests); |
104 | goto out; |
105 | } |
106 | |
107 | engine->busy = false; |
108 | engine->idling = true; |
109 | spin_unlock_irqrestore(lock: &engine->queue_lock, flags); |
110 | |
111 | if (engine->unprepare_crypt_hardware && |
112 | engine->unprepare_crypt_hardware(engine)) |
113 | dev_err(engine->dev, "failed to unprepare crypt hardware\n" ); |
114 | |
115 | spin_lock_irqsave(&engine->queue_lock, flags); |
116 | engine->idling = false; |
117 | goto out; |
118 | } |
119 | |
120 | start_request: |
121 | /* Get the fist request from the engine queue to handle */ |
122 | backlog = crypto_get_backlog(queue: &engine->queue); |
123 | async_req = crypto_dequeue_request(queue: &engine->queue); |
124 | if (!async_req) |
125 | goto out; |
126 | |
127 | /* |
128 | * If hardware doesn't support the retry mechanism, |
129 | * keep track of the request we are processing now. |
130 | * We'll need it on completion (crypto_finalize_request). |
131 | */ |
132 | if (!engine->retry_support) |
133 | engine->cur_req = async_req; |
134 | |
135 | if (engine->busy) |
136 | was_busy = true; |
137 | else |
138 | engine->busy = true; |
139 | |
140 | spin_unlock_irqrestore(lock: &engine->queue_lock, flags); |
141 | |
142 | /* Until here we get the request need to be encrypted successfully */ |
143 | if (!was_busy && engine->prepare_crypt_hardware) { |
144 | ret = engine->prepare_crypt_hardware(engine); |
145 | if (ret) { |
146 | dev_err(engine->dev, "failed to prepare crypt hardware\n" ); |
147 | goto req_err_1; |
148 | } |
149 | } |
150 | |
151 | if (async_req->tfm->__crt_alg->cra_flags & CRYPTO_ALG_ENGINE) { |
152 | alg = container_of(async_req->tfm->__crt_alg, |
153 | struct crypto_engine_alg, base); |
154 | op = &alg->op; |
155 | } else { |
156 | dev_err(engine->dev, "failed to do request\n" ); |
157 | ret = -EINVAL; |
158 | goto req_err_1; |
159 | } |
160 | |
161 | ret = op->do_one_request(engine, async_req); |
162 | |
163 | /* Request unsuccessfully executed by hardware */ |
164 | if (ret < 0) { |
165 | /* |
166 | * If hardware queue is full (-ENOSPC), requeue request |
167 | * regardless of backlog flag. |
168 | * Otherwise, unprepare and complete the request. |
169 | */ |
170 | if (!engine->retry_support || |
171 | (ret != -ENOSPC)) { |
172 | dev_err(engine->dev, |
173 | "Failed to do one request from queue: %d\n" , |
174 | ret); |
175 | goto req_err_1; |
176 | } |
177 | spin_lock_irqsave(&engine->queue_lock, flags); |
178 | /* |
179 | * If hardware was unable to execute request, enqueue it |
180 | * back in front of crypto-engine queue, to keep the order |
181 | * of requests. |
182 | */ |
183 | crypto_enqueue_request_head(queue: &engine->queue, request: async_req); |
184 | |
185 | kthread_queue_work(worker: engine->kworker, work: &engine->pump_requests); |
186 | goto out; |
187 | } |
188 | |
189 | goto retry; |
190 | |
191 | req_err_1: |
192 | crypto_request_complete(req: async_req, err: ret); |
193 | |
194 | retry: |
195 | if (backlog) |
196 | crypto_request_complete(req: backlog, err: -EINPROGRESS); |
197 | |
198 | /* If retry mechanism is supported, send new requests to engine */ |
199 | if (engine->retry_support) { |
200 | spin_lock_irqsave(&engine->queue_lock, flags); |
201 | goto start_request; |
202 | } |
203 | return; |
204 | |
205 | out: |
206 | spin_unlock_irqrestore(lock: &engine->queue_lock, flags); |
207 | |
208 | /* |
209 | * Batch requests is possible only if |
210 | * hardware can enqueue multiple requests |
211 | */ |
212 | if (engine->do_batch_requests) { |
213 | ret = engine->do_batch_requests(engine); |
214 | if (ret) |
215 | dev_err(engine->dev, "failed to do batch requests: %d\n" , |
216 | ret); |
217 | } |
218 | |
219 | return; |
220 | } |
221 | |
222 | static void crypto_pump_work(struct kthread_work *work) |
223 | { |
224 | struct crypto_engine *engine = |
225 | container_of(work, struct crypto_engine, pump_requests); |
226 | |
227 | crypto_pump_requests(engine, in_kthread: true); |
228 | } |
229 | |
230 | /** |
231 | * crypto_transfer_request - transfer the new request into the engine queue |
232 | * @engine: the hardware engine |
233 | * @req: the request need to be listed into the engine queue |
234 | * @need_pump: indicates whether queue the pump of request to kthread_work |
235 | */ |
236 | static int crypto_transfer_request(struct crypto_engine *engine, |
237 | struct crypto_async_request *req, |
238 | bool need_pump) |
239 | { |
240 | unsigned long flags; |
241 | int ret; |
242 | |
243 | spin_lock_irqsave(&engine->queue_lock, flags); |
244 | |
245 | if (!engine->running) { |
246 | spin_unlock_irqrestore(lock: &engine->queue_lock, flags); |
247 | return -ESHUTDOWN; |
248 | } |
249 | |
250 | ret = crypto_enqueue_request(queue: &engine->queue, request: req); |
251 | |
252 | if (!engine->busy && need_pump) |
253 | kthread_queue_work(worker: engine->kworker, work: &engine->pump_requests); |
254 | |
255 | spin_unlock_irqrestore(lock: &engine->queue_lock, flags); |
256 | return ret; |
257 | } |
258 | |
259 | /** |
260 | * crypto_transfer_request_to_engine - transfer one request to list |
261 | * into the engine queue |
262 | * @engine: the hardware engine |
263 | * @req: the request need to be listed into the engine queue |
264 | */ |
265 | static int crypto_transfer_request_to_engine(struct crypto_engine *engine, |
266 | struct crypto_async_request *req) |
267 | { |
268 | return crypto_transfer_request(engine, req, need_pump: true); |
269 | } |
270 | |
271 | /** |
272 | * crypto_transfer_aead_request_to_engine - transfer one aead_request |
273 | * to list into the engine queue |
274 | * @engine: the hardware engine |
275 | * @req: the request need to be listed into the engine queue |
276 | */ |
277 | int crypto_transfer_aead_request_to_engine(struct crypto_engine *engine, |
278 | struct aead_request *req) |
279 | { |
280 | return crypto_transfer_request_to_engine(engine, req: &req->base); |
281 | } |
282 | EXPORT_SYMBOL_GPL(crypto_transfer_aead_request_to_engine); |
283 | |
284 | /** |
285 | * crypto_transfer_akcipher_request_to_engine - transfer one akcipher_request |
286 | * to list into the engine queue |
287 | * @engine: the hardware engine |
288 | * @req: the request need to be listed into the engine queue |
289 | */ |
290 | int crypto_transfer_akcipher_request_to_engine(struct crypto_engine *engine, |
291 | struct akcipher_request *req) |
292 | { |
293 | return crypto_transfer_request_to_engine(engine, req: &req->base); |
294 | } |
295 | EXPORT_SYMBOL_GPL(crypto_transfer_akcipher_request_to_engine); |
296 | |
297 | /** |
298 | * crypto_transfer_hash_request_to_engine - transfer one ahash_request |
299 | * to list into the engine queue |
300 | * @engine: the hardware engine |
301 | * @req: the request need to be listed into the engine queue |
302 | */ |
303 | int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine, |
304 | struct ahash_request *req) |
305 | { |
306 | return crypto_transfer_request_to_engine(engine, req: &req->base); |
307 | } |
308 | EXPORT_SYMBOL_GPL(crypto_transfer_hash_request_to_engine); |
309 | |
310 | /** |
311 | * crypto_transfer_kpp_request_to_engine - transfer one kpp_request to list |
312 | * into the engine queue |
313 | * @engine: the hardware engine |
314 | * @req: the request need to be listed into the engine queue |
315 | */ |
316 | int crypto_transfer_kpp_request_to_engine(struct crypto_engine *engine, |
317 | struct kpp_request *req) |
318 | { |
319 | return crypto_transfer_request_to_engine(engine, req: &req->base); |
320 | } |
321 | EXPORT_SYMBOL_GPL(crypto_transfer_kpp_request_to_engine); |
322 | |
323 | /** |
324 | * crypto_transfer_skcipher_request_to_engine - transfer one skcipher_request |
325 | * to list into the engine queue |
326 | * @engine: the hardware engine |
327 | * @req: the request need to be listed into the engine queue |
328 | */ |
329 | int crypto_transfer_skcipher_request_to_engine(struct crypto_engine *engine, |
330 | struct skcipher_request *req) |
331 | { |
332 | return crypto_transfer_request_to_engine(engine, req: &req->base); |
333 | } |
334 | EXPORT_SYMBOL_GPL(crypto_transfer_skcipher_request_to_engine); |
335 | |
336 | /** |
337 | * crypto_finalize_aead_request - finalize one aead_request if |
338 | * the request is done |
339 | * @engine: the hardware engine |
340 | * @req: the request need to be finalized |
341 | * @err: error number |
342 | */ |
343 | void crypto_finalize_aead_request(struct crypto_engine *engine, |
344 | struct aead_request *req, int err) |
345 | { |
346 | return crypto_finalize_request(engine, req: &req->base, err); |
347 | } |
348 | EXPORT_SYMBOL_GPL(crypto_finalize_aead_request); |
349 | |
350 | /** |
351 | * crypto_finalize_akcipher_request - finalize one akcipher_request if |
352 | * the request is done |
353 | * @engine: the hardware engine |
354 | * @req: the request need to be finalized |
355 | * @err: error number |
356 | */ |
357 | void crypto_finalize_akcipher_request(struct crypto_engine *engine, |
358 | struct akcipher_request *req, int err) |
359 | { |
360 | return crypto_finalize_request(engine, req: &req->base, err); |
361 | } |
362 | EXPORT_SYMBOL_GPL(crypto_finalize_akcipher_request); |
363 | |
364 | /** |
365 | * crypto_finalize_hash_request - finalize one ahash_request if |
366 | * the request is done |
367 | * @engine: the hardware engine |
368 | * @req: the request need to be finalized |
369 | * @err: error number |
370 | */ |
371 | void crypto_finalize_hash_request(struct crypto_engine *engine, |
372 | struct ahash_request *req, int err) |
373 | { |
374 | return crypto_finalize_request(engine, req: &req->base, err); |
375 | } |
376 | EXPORT_SYMBOL_GPL(crypto_finalize_hash_request); |
377 | |
378 | /** |
379 | * crypto_finalize_kpp_request - finalize one kpp_request if the request is done |
380 | * @engine: the hardware engine |
381 | * @req: the request need to be finalized |
382 | * @err: error number |
383 | */ |
384 | void crypto_finalize_kpp_request(struct crypto_engine *engine, |
385 | struct kpp_request *req, int err) |
386 | { |
387 | return crypto_finalize_request(engine, req: &req->base, err); |
388 | } |
389 | EXPORT_SYMBOL_GPL(crypto_finalize_kpp_request); |
390 | |
391 | /** |
392 | * crypto_finalize_skcipher_request - finalize one skcipher_request if |
393 | * the request is done |
394 | * @engine: the hardware engine |
395 | * @req: the request need to be finalized |
396 | * @err: error number |
397 | */ |
398 | void crypto_finalize_skcipher_request(struct crypto_engine *engine, |
399 | struct skcipher_request *req, int err) |
400 | { |
401 | return crypto_finalize_request(engine, req: &req->base, err); |
402 | } |
403 | EXPORT_SYMBOL_GPL(crypto_finalize_skcipher_request); |
404 | |
405 | /** |
406 | * crypto_engine_start - start the hardware engine |
407 | * @engine: the hardware engine need to be started |
408 | * |
409 | * Return 0 on success, else on fail. |
410 | */ |
411 | int crypto_engine_start(struct crypto_engine *engine) |
412 | { |
413 | unsigned long flags; |
414 | |
415 | spin_lock_irqsave(&engine->queue_lock, flags); |
416 | |
417 | if (engine->running || engine->busy) { |
418 | spin_unlock_irqrestore(lock: &engine->queue_lock, flags); |
419 | return -EBUSY; |
420 | } |
421 | |
422 | engine->running = true; |
423 | spin_unlock_irqrestore(lock: &engine->queue_lock, flags); |
424 | |
425 | kthread_queue_work(worker: engine->kworker, work: &engine->pump_requests); |
426 | |
427 | return 0; |
428 | } |
429 | EXPORT_SYMBOL_GPL(crypto_engine_start); |
430 | |
431 | /** |
432 | * crypto_engine_stop - stop the hardware engine |
433 | * @engine: the hardware engine need to be stopped |
434 | * |
435 | * Return 0 on success, else on fail. |
436 | */ |
437 | int crypto_engine_stop(struct crypto_engine *engine) |
438 | { |
439 | unsigned long flags; |
440 | unsigned int limit = 500; |
441 | int ret = 0; |
442 | |
443 | spin_lock_irqsave(&engine->queue_lock, flags); |
444 | |
445 | /* |
446 | * If the engine queue is not empty or the engine is on busy state, |
447 | * we need to wait for a while to pump the requests of engine queue. |
448 | */ |
449 | while ((crypto_queue_len(queue: &engine->queue) || engine->busy) && limit--) { |
450 | spin_unlock_irqrestore(lock: &engine->queue_lock, flags); |
451 | msleep(msecs: 20); |
452 | spin_lock_irqsave(&engine->queue_lock, flags); |
453 | } |
454 | |
455 | if (crypto_queue_len(queue: &engine->queue) || engine->busy) |
456 | ret = -EBUSY; |
457 | else |
458 | engine->running = false; |
459 | |
460 | spin_unlock_irqrestore(lock: &engine->queue_lock, flags); |
461 | |
462 | if (ret) |
463 | dev_warn(engine->dev, "could not stop engine\n" ); |
464 | |
465 | return ret; |
466 | } |
467 | EXPORT_SYMBOL_GPL(crypto_engine_stop); |
468 | |
469 | /** |
470 | * crypto_engine_alloc_init_and_set - allocate crypto hardware engine structure |
471 | * and initialize it by setting the maximum number of entries in the software |
472 | * crypto-engine queue. |
473 | * @dev: the device attached with one hardware engine |
474 | * @retry_support: whether hardware has support for retry mechanism |
475 | * @cbk_do_batch: pointer to a callback function to be invoked when executing |
476 | * a batch of requests. |
477 | * This has the form: |
478 | * callback(struct crypto_engine *engine) |
479 | * where: |
480 | * engine: the crypto engine structure. |
481 | * @rt: whether this queue is set to run as a realtime task |
482 | * @qlen: maximum size of the crypto-engine queue |
483 | * |
484 | * This must be called from context that can sleep. |
485 | * Return: the crypto engine structure on success, else NULL. |
486 | */ |
487 | struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev, |
488 | bool retry_support, |
489 | int (*cbk_do_batch)(struct crypto_engine *engine), |
490 | bool rt, int qlen) |
491 | { |
492 | struct crypto_engine *engine; |
493 | |
494 | if (!dev) |
495 | return NULL; |
496 | |
497 | engine = devm_kzalloc(dev, size: sizeof(*engine), GFP_KERNEL); |
498 | if (!engine) |
499 | return NULL; |
500 | |
501 | engine->dev = dev; |
502 | engine->rt = rt; |
503 | engine->running = false; |
504 | engine->busy = false; |
505 | engine->idling = false; |
506 | engine->retry_support = retry_support; |
507 | engine->priv_data = dev; |
508 | /* |
509 | * Batch requests is possible only if |
510 | * hardware has support for retry mechanism. |
511 | */ |
512 | engine->do_batch_requests = retry_support ? cbk_do_batch : NULL; |
513 | |
514 | snprintf(buf: engine->name, size: sizeof(engine->name), |
515 | fmt: "%s-engine" , dev_name(dev)); |
516 | |
517 | crypto_init_queue(queue: &engine->queue, max_qlen: qlen); |
518 | spin_lock_init(&engine->queue_lock); |
519 | |
520 | engine->kworker = kthread_create_worker(flags: 0, namefmt: "%s" , engine->name); |
521 | if (IS_ERR(ptr: engine->kworker)) { |
522 | dev_err(dev, "failed to create crypto request pump task\n" ); |
523 | return NULL; |
524 | } |
525 | kthread_init_work(&engine->pump_requests, crypto_pump_work); |
526 | |
527 | if (engine->rt) { |
528 | dev_info(dev, "will run requests pump with realtime priority\n" ); |
529 | sched_set_fifo(p: engine->kworker->task); |
530 | } |
531 | |
532 | return engine; |
533 | } |
534 | EXPORT_SYMBOL_GPL(crypto_engine_alloc_init_and_set); |
535 | |
536 | /** |
537 | * crypto_engine_alloc_init - allocate crypto hardware engine structure and |
538 | * initialize it. |
539 | * @dev: the device attached with one hardware engine |
540 | * @rt: whether this queue is set to run as a realtime task |
541 | * |
542 | * This must be called from context that can sleep. |
543 | * Return: the crypto engine structure on success, else NULL. |
544 | */ |
545 | struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt) |
546 | { |
547 | return crypto_engine_alloc_init_and_set(dev, false, NULL, rt, |
548 | CRYPTO_ENGINE_MAX_QLEN); |
549 | } |
550 | EXPORT_SYMBOL_GPL(crypto_engine_alloc_init); |
551 | |
552 | /** |
553 | * crypto_engine_exit - free the resources of hardware engine when exit |
554 | * @engine: the hardware engine need to be freed |
555 | */ |
556 | void crypto_engine_exit(struct crypto_engine *engine) |
557 | { |
558 | int ret; |
559 | |
560 | ret = crypto_engine_stop(engine); |
561 | if (ret) |
562 | return; |
563 | |
564 | kthread_destroy_worker(worker: engine->kworker); |
565 | } |
566 | EXPORT_SYMBOL_GPL(crypto_engine_exit); |
567 | |
568 | int crypto_engine_register_aead(struct aead_engine_alg *alg) |
569 | { |
570 | if (!alg->op.do_one_request) |
571 | return -EINVAL; |
572 | |
573 | alg->base.base.cra_flags |= CRYPTO_ALG_ENGINE; |
574 | |
575 | return crypto_register_aead(alg: &alg->base); |
576 | } |
577 | EXPORT_SYMBOL_GPL(crypto_engine_register_aead); |
578 | |
579 | void crypto_engine_unregister_aead(struct aead_engine_alg *alg) |
580 | { |
581 | crypto_unregister_aead(alg: &alg->base); |
582 | } |
583 | EXPORT_SYMBOL_GPL(crypto_engine_unregister_aead); |
584 | |
585 | int crypto_engine_register_aeads(struct aead_engine_alg *algs, int count) |
586 | { |
587 | int i, ret; |
588 | |
589 | for (i = 0; i < count; i++) { |
590 | ret = crypto_engine_register_aead(&algs[i]); |
591 | if (ret) |
592 | goto err; |
593 | } |
594 | |
595 | return 0; |
596 | |
597 | err: |
598 | crypto_engine_unregister_aeads(algs, count: i); |
599 | |
600 | return ret; |
601 | } |
602 | EXPORT_SYMBOL_GPL(crypto_engine_register_aeads); |
603 | |
604 | void crypto_engine_unregister_aeads(struct aead_engine_alg *algs, int count) |
605 | { |
606 | int i; |
607 | |
608 | for (i = count - 1; i >= 0; --i) |
609 | crypto_engine_unregister_aead(&algs[i]); |
610 | } |
611 | EXPORT_SYMBOL_GPL(crypto_engine_unregister_aeads); |
612 | |
613 | int crypto_engine_register_ahash(struct ahash_engine_alg *alg) |
614 | { |
615 | if (!alg->op.do_one_request) |
616 | return -EINVAL; |
617 | |
618 | alg->base.halg.base.cra_flags |= CRYPTO_ALG_ENGINE; |
619 | |
620 | return crypto_register_ahash(alg: &alg->base); |
621 | } |
622 | EXPORT_SYMBOL_GPL(crypto_engine_register_ahash); |
623 | |
624 | void crypto_engine_unregister_ahash(struct ahash_engine_alg *alg) |
625 | { |
626 | crypto_unregister_ahash(alg: &alg->base); |
627 | } |
628 | EXPORT_SYMBOL_GPL(crypto_engine_unregister_ahash); |
629 | |
630 | int crypto_engine_register_ahashes(struct ahash_engine_alg *algs, int count) |
631 | { |
632 | int i, ret; |
633 | |
634 | for (i = 0; i < count; i++) { |
635 | ret = crypto_engine_register_ahash(&algs[i]); |
636 | if (ret) |
637 | goto err; |
638 | } |
639 | |
640 | return 0; |
641 | |
642 | err: |
643 | crypto_engine_unregister_ahashes(algs, count: i); |
644 | |
645 | return ret; |
646 | } |
647 | EXPORT_SYMBOL_GPL(crypto_engine_register_ahashes); |
648 | |
649 | void crypto_engine_unregister_ahashes(struct ahash_engine_alg *algs, |
650 | int count) |
651 | { |
652 | int i; |
653 | |
654 | for (i = count - 1; i >= 0; --i) |
655 | crypto_engine_unregister_ahash(&algs[i]); |
656 | } |
657 | EXPORT_SYMBOL_GPL(crypto_engine_unregister_ahashes); |
658 | |
659 | int crypto_engine_register_akcipher(struct akcipher_engine_alg *alg) |
660 | { |
661 | if (!alg->op.do_one_request) |
662 | return -EINVAL; |
663 | |
664 | alg->base.base.cra_flags |= CRYPTO_ALG_ENGINE; |
665 | |
666 | return crypto_register_akcipher(alg: &alg->base); |
667 | } |
668 | EXPORT_SYMBOL_GPL(crypto_engine_register_akcipher); |
669 | |
670 | void crypto_engine_unregister_akcipher(struct akcipher_engine_alg *alg) |
671 | { |
672 | crypto_unregister_akcipher(alg: &alg->base); |
673 | } |
674 | EXPORT_SYMBOL_GPL(crypto_engine_unregister_akcipher); |
675 | |
676 | int crypto_engine_register_kpp(struct kpp_engine_alg *alg) |
677 | { |
678 | if (!alg->op.do_one_request) |
679 | return -EINVAL; |
680 | |
681 | alg->base.base.cra_flags |= CRYPTO_ALG_ENGINE; |
682 | |
683 | return crypto_register_kpp(alg: &alg->base); |
684 | } |
685 | EXPORT_SYMBOL_GPL(crypto_engine_register_kpp); |
686 | |
687 | void crypto_engine_unregister_kpp(struct kpp_engine_alg *alg) |
688 | { |
689 | crypto_unregister_kpp(alg: &alg->base); |
690 | } |
691 | EXPORT_SYMBOL_GPL(crypto_engine_unregister_kpp); |
692 | |
693 | int crypto_engine_register_skcipher(struct skcipher_engine_alg *alg) |
694 | { |
695 | if (!alg->op.do_one_request) |
696 | return -EINVAL; |
697 | |
698 | alg->base.base.cra_flags |= CRYPTO_ALG_ENGINE; |
699 | |
700 | return crypto_register_skcipher(alg: &alg->base); |
701 | } |
702 | EXPORT_SYMBOL_GPL(crypto_engine_register_skcipher); |
703 | |
704 | void crypto_engine_unregister_skcipher(struct skcipher_engine_alg *alg) |
705 | { |
706 | return crypto_unregister_skcipher(alg: &alg->base); |
707 | } |
708 | EXPORT_SYMBOL_GPL(crypto_engine_unregister_skcipher); |
709 | |
710 | int crypto_engine_register_skciphers(struct skcipher_engine_alg *algs, |
711 | int count) |
712 | { |
713 | int i, ret; |
714 | |
715 | for (i = 0; i < count; i++) { |
716 | ret = crypto_engine_register_skcipher(&algs[i]); |
717 | if (ret) |
718 | goto err; |
719 | } |
720 | |
721 | return 0; |
722 | |
723 | err: |
724 | crypto_engine_unregister_skciphers(algs, count: i); |
725 | |
726 | return ret; |
727 | } |
728 | EXPORT_SYMBOL_GPL(crypto_engine_register_skciphers); |
729 | |
730 | void crypto_engine_unregister_skciphers(struct skcipher_engine_alg *algs, |
731 | int count) |
732 | { |
733 | int i; |
734 | |
735 | for (i = count - 1; i >= 0; --i) |
736 | crypto_engine_unregister_skcipher(&algs[i]); |
737 | } |
738 | EXPORT_SYMBOL_GPL(crypto_engine_unregister_skciphers); |
739 | |
740 | MODULE_LICENSE("GPL" ); |
741 | MODULE_DESCRIPTION("Crypto hardware engine framework" ); |
742 | |