1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * Scatterlist Cryptographic API. |
4 | * |
5 | * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> |
6 | * Copyright (c) 2002 David S. Miller (davem@redhat.com) |
7 | * Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au> |
8 | * |
9 | * Portions derived from Cryptoapi, by Alexander Kjeldaas <astor@fast.no> |
10 | * and Nettle, by Niels Möller. |
11 | */ |
12 | |
13 | #include <linux/err.h> |
14 | #include <linux/errno.h> |
15 | #include <linux/jump_label.h> |
16 | #include <linux/kernel.h> |
17 | #include <linux/kmod.h> |
18 | #include <linux/module.h> |
19 | #include <linux/param.h> |
20 | #include <linux/sched/signal.h> |
21 | #include <linux/slab.h> |
22 | #include <linux/string.h> |
23 | #include <linux/completion.h> |
24 | #include "internal.h" |
25 | |
26 | LIST_HEAD(crypto_alg_list); |
27 | EXPORT_SYMBOL_GPL(crypto_alg_list); |
28 | DECLARE_RWSEM(crypto_alg_sem); |
29 | EXPORT_SYMBOL_GPL(crypto_alg_sem); |
30 | |
31 | BLOCKING_NOTIFIER_HEAD(crypto_chain); |
32 | EXPORT_SYMBOL_GPL(crypto_chain); |
33 | |
34 | #ifndef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS |
35 | DEFINE_STATIC_KEY_FALSE(__crypto_boot_test_finished); |
36 | EXPORT_SYMBOL_GPL(__crypto_boot_test_finished); |
37 | #endif |
38 | |
39 | static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg); |
40 | |
41 | struct crypto_alg *crypto_mod_get(struct crypto_alg *alg) |
42 | { |
43 | return try_module_get(module: alg->cra_module) ? crypto_alg_get(alg) : NULL; |
44 | } |
45 | EXPORT_SYMBOL_GPL(crypto_mod_get); |
46 | |
47 | void crypto_mod_put(struct crypto_alg *alg) |
48 | { |
49 | struct module *module = alg->cra_module; |
50 | |
51 | crypto_alg_put(alg); |
52 | module_put(module); |
53 | } |
54 | EXPORT_SYMBOL_GPL(crypto_mod_put); |
55 | |
56 | static struct crypto_alg *__crypto_alg_lookup(const char *name, u32 type, |
57 | u32 mask) |
58 | { |
59 | struct crypto_alg *q, *alg = NULL; |
60 | int best = -2; |
61 | |
62 | list_for_each_entry(q, &crypto_alg_list, cra_list) { |
63 | int exact, fuzzy; |
64 | |
65 | if (crypto_is_moribund(alg: q)) |
66 | continue; |
67 | |
68 | if ((q->cra_flags ^ type) & mask) |
69 | continue; |
70 | |
71 | if (crypto_is_larval(alg: q) && |
72 | !crypto_is_test_larval(larval: (struct crypto_larval *)q) && |
73 | ((struct crypto_larval *)q)->mask != mask) |
74 | continue; |
75 | |
76 | exact = !strcmp(q->cra_driver_name, name); |
77 | fuzzy = !strcmp(q->cra_name, name); |
78 | if (!exact && !(fuzzy && q->cra_priority > best)) |
79 | continue; |
80 | |
81 | if (unlikely(!crypto_mod_get(q))) |
82 | continue; |
83 | |
84 | best = q->cra_priority; |
85 | if (alg) |
86 | crypto_mod_put(alg); |
87 | alg = q; |
88 | |
89 | if (exact) |
90 | break; |
91 | } |
92 | |
93 | return alg; |
94 | } |
95 | |
96 | static void crypto_larval_destroy(struct crypto_alg *alg) |
97 | { |
98 | struct crypto_larval *larval = (void *)alg; |
99 | |
100 | BUG_ON(!crypto_is_larval(alg)); |
101 | if (!IS_ERR_OR_NULL(ptr: larval->adult)) |
102 | crypto_mod_put(larval->adult); |
103 | kfree(objp: larval); |
104 | } |
105 | |
106 | struct crypto_larval *crypto_larval_alloc(const char *name, u32 type, u32 mask) |
107 | { |
108 | struct crypto_larval *larval; |
109 | |
110 | larval = kzalloc(size: sizeof(*larval), GFP_KERNEL); |
111 | if (!larval) |
112 | return ERR_PTR(error: -ENOMEM); |
113 | |
114 | larval->mask = mask; |
115 | larval->alg.cra_flags = CRYPTO_ALG_LARVAL | type; |
116 | larval->alg.cra_priority = -1; |
117 | larval->alg.cra_destroy = crypto_larval_destroy; |
118 | |
119 | strscpy(larval->alg.cra_name, name, CRYPTO_MAX_ALG_NAME); |
120 | init_completion(x: &larval->completion); |
121 | |
122 | return larval; |
123 | } |
124 | EXPORT_SYMBOL_GPL(crypto_larval_alloc); |
125 | |
126 | static struct crypto_alg *crypto_larval_add(const char *name, u32 type, |
127 | u32 mask) |
128 | { |
129 | struct crypto_alg *alg; |
130 | struct crypto_larval *larval; |
131 | |
132 | larval = crypto_larval_alloc(name, type, mask); |
133 | if (IS_ERR(ptr: larval)) |
134 | return ERR_CAST(ptr: larval); |
135 | |
136 | refcount_set(r: &larval->alg.cra_refcnt, n: 2); |
137 | |
138 | down_write(sem: &crypto_alg_sem); |
139 | alg = __crypto_alg_lookup(name, type, mask); |
140 | if (!alg) { |
141 | alg = &larval->alg; |
142 | list_add(new: &alg->cra_list, head: &crypto_alg_list); |
143 | } |
144 | up_write(sem: &crypto_alg_sem); |
145 | |
146 | if (alg != &larval->alg) { |
147 | kfree(objp: larval); |
148 | if (crypto_is_larval(alg)) |
149 | alg = crypto_larval_wait(alg); |
150 | } |
151 | |
152 | return alg; |
153 | } |
154 | |
155 | void crypto_larval_kill(struct crypto_alg *alg) |
156 | { |
157 | struct crypto_larval *larval = (void *)alg; |
158 | |
159 | down_write(sem: &crypto_alg_sem); |
160 | list_del(entry: &alg->cra_list); |
161 | up_write(sem: &crypto_alg_sem); |
162 | complete_all(&larval->completion); |
163 | crypto_alg_put(alg); |
164 | } |
165 | EXPORT_SYMBOL_GPL(crypto_larval_kill); |
166 | |
167 | void crypto_wait_for_test(struct crypto_larval *larval) |
168 | { |
169 | int err; |
170 | |
171 | err = crypto_probing_notify(val: CRYPTO_MSG_ALG_REGISTER, v: larval->adult); |
172 | if (WARN_ON_ONCE(err != NOTIFY_STOP)) |
173 | goto out; |
174 | |
175 | err = wait_for_completion_killable(x: &larval->completion); |
176 | WARN_ON(err); |
177 | out: |
178 | crypto_larval_kill(&larval->alg); |
179 | } |
180 | EXPORT_SYMBOL_GPL(crypto_wait_for_test); |
181 | |
182 | static void crypto_start_test(struct crypto_larval *larval) |
183 | { |
184 | if (!crypto_is_test_larval(larval)) |
185 | return; |
186 | |
187 | if (larval->test_started) |
188 | return; |
189 | |
190 | down_write(sem: &crypto_alg_sem); |
191 | if (larval->test_started) { |
192 | up_write(sem: &crypto_alg_sem); |
193 | return; |
194 | } |
195 | |
196 | larval->test_started = true; |
197 | up_write(sem: &crypto_alg_sem); |
198 | |
199 | crypto_wait_for_test(larval); |
200 | } |
201 | |
202 | static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg) |
203 | { |
204 | struct crypto_larval *larval = (void *)alg; |
205 | long timeout; |
206 | |
207 | if (!crypto_boot_test_finished()) |
208 | crypto_start_test(larval); |
209 | |
210 | timeout = wait_for_completion_killable_timeout( |
211 | x: &larval->completion, timeout: 60 * HZ); |
212 | |
213 | alg = larval->adult; |
214 | if (timeout < 0) |
215 | alg = ERR_PTR(error: -EINTR); |
216 | else if (!timeout) |
217 | alg = ERR_PTR(error: -ETIMEDOUT); |
218 | else if (!alg) |
219 | alg = ERR_PTR(error: -ENOENT); |
220 | else if (IS_ERR(ptr: alg)) |
221 | ; |
222 | else if (crypto_is_test_larval(larval) && |
223 | !(alg->cra_flags & CRYPTO_ALG_TESTED)) |
224 | alg = ERR_PTR(error: -EAGAIN); |
225 | else if (alg->cra_flags & CRYPTO_ALG_FIPS_INTERNAL) |
226 | alg = ERR_PTR(error: -EAGAIN); |
227 | else if (!crypto_mod_get(alg)) |
228 | alg = ERR_PTR(error: -EAGAIN); |
229 | crypto_mod_put(&larval->alg); |
230 | |
231 | return alg; |
232 | } |
233 | |
234 | static struct crypto_alg *crypto_alg_lookup(const char *name, u32 type, |
235 | u32 mask) |
236 | { |
237 | const u32 fips = CRYPTO_ALG_FIPS_INTERNAL; |
238 | struct crypto_alg *alg; |
239 | u32 test = 0; |
240 | |
241 | if (!((type | mask) & CRYPTO_ALG_TESTED)) |
242 | test |= CRYPTO_ALG_TESTED; |
243 | |
244 | down_read(sem: &crypto_alg_sem); |
245 | alg = __crypto_alg_lookup(name, type: (type | test) & ~fips, |
246 | mask: (mask | test) & ~fips); |
247 | if (alg) { |
248 | if (((type | mask) ^ fips) & fips) |
249 | mask |= fips; |
250 | mask &= fips; |
251 | |
252 | if (!crypto_is_larval(alg) && |
253 | ((type ^ alg->cra_flags) & mask)) { |
254 | /* Algorithm is disallowed in FIPS mode. */ |
255 | crypto_mod_put(alg); |
256 | alg = ERR_PTR(error: -ENOENT); |
257 | } |
258 | } else if (test) { |
259 | alg = __crypto_alg_lookup(name, type, mask); |
260 | if (alg && !crypto_is_larval(alg)) { |
261 | /* Test failed */ |
262 | crypto_mod_put(alg); |
263 | alg = ERR_PTR(error: -ELIBBAD); |
264 | } |
265 | } |
266 | up_read(sem: &crypto_alg_sem); |
267 | |
268 | return alg; |
269 | } |
270 | |
271 | static struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, |
272 | u32 mask) |
273 | { |
274 | struct crypto_alg *alg; |
275 | |
276 | if (!name) |
277 | return ERR_PTR(error: -ENOENT); |
278 | |
279 | type &= ~(CRYPTO_ALG_LARVAL | CRYPTO_ALG_DEAD); |
280 | mask &= ~(CRYPTO_ALG_LARVAL | CRYPTO_ALG_DEAD); |
281 | |
282 | alg = crypto_alg_lookup(name, type, mask); |
283 | if (!alg && !(mask & CRYPTO_NOLOAD)) { |
284 | request_module("crypto-%s" , name); |
285 | |
286 | if (!((type ^ CRYPTO_ALG_NEED_FALLBACK) & mask & |
287 | CRYPTO_ALG_NEED_FALLBACK)) |
288 | request_module("crypto-%s-all" , name); |
289 | |
290 | alg = crypto_alg_lookup(name, type, mask); |
291 | } |
292 | |
293 | if (!IS_ERR_OR_NULL(ptr: alg) && crypto_is_larval(alg)) |
294 | alg = crypto_larval_wait(alg); |
295 | else if (!alg) |
296 | alg = crypto_larval_add(name, type, mask); |
297 | |
298 | return alg; |
299 | } |
300 | |
301 | int crypto_probing_notify(unsigned long val, void *v) |
302 | { |
303 | int ok; |
304 | |
305 | ok = blocking_notifier_call_chain(nh: &crypto_chain, val, v); |
306 | if (ok == NOTIFY_DONE) { |
307 | request_module("cryptomgr" ); |
308 | ok = blocking_notifier_call_chain(nh: &crypto_chain, val, v); |
309 | } |
310 | |
311 | return ok; |
312 | } |
313 | EXPORT_SYMBOL_GPL(crypto_probing_notify); |
314 | |
315 | struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask) |
316 | { |
317 | struct crypto_alg *alg; |
318 | struct crypto_alg *larval; |
319 | int ok; |
320 | |
321 | /* |
322 | * If the internal flag is set for a cipher, require a caller to |
323 | * invoke the cipher with the internal flag to use that cipher. |
324 | * Also, if a caller wants to allocate a cipher that may or may |
325 | * not be an internal cipher, use type | CRYPTO_ALG_INTERNAL and |
326 | * !(mask & CRYPTO_ALG_INTERNAL). |
327 | */ |
328 | if (!((type | mask) & CRYPTO_ALG_INTERNAL)) |
329 | mask |= CRYPTO_ALG_INTERNAL; |
330 | |
331 | larval = crypto_larval_lookup(name, type, mask); |
332 | if (IS_ERR(ptr: larval) || !crypto_is_larval(alg: larval)) |
333 | return larval; |
334 | |
335 | ok = crypto_probing_notify(CRYPTO_MSG_ALG_REQUEST, larval); |
336 | |
337 | if (ok == NOTIFY_STOP) |
338 | alg = crypto_larval_wait(alg: larval); |
339 | else { |
340 | crypto_mod_put(larval); |
341 | alg = ERR_PTR(error: -ENOENT); |
342 | } |
343 | crypto_larval_kill(larval); |
344 | return alg; |
345 | } |
346 | EXPORT_SYMBOL_GPL(crypto_alg_mod_lookup); |
347 | |
348 | static void crypto_exit_ops(struct crypto_tfm *tfm) |
349 | { |
350 | const struct crypto_type *type = tfm->__crt_alg->cra_type; |
351 | |
352 | if (type && tfm->exit) |
353 | tfm->exit(tfm); |
354 | } |
355 | |
356 | static unsigned int crypto_ctxsize(struct crypto_alg *alg, u32 type, u32 mask) |
357 | { |
358 | const struct crypto_type *type_obj = alg->cra_type; |
359 | unsigned int len; |
360 | |
361 | len = alg->cra_alignmask & ~(crypto_tfm_ctx_alignment() - 1); |
362 | if (type_obj) |
363 | return len + type_obj->ctxsize(alg, type, mask); |
364 | |
365 | switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) { |
366 | default: |
367 | BUG(); |
368 | |
369 | case CRYPTO_ALG_TYPE_CIPHER: |
370 | len += crypto_cipher_ctxsize(alg); |
371 | break; |
372 | |
373 | case CRYPTO_ALG_TYPE_COMPRESS: |
374 | len += crypto_compress_ctxsize(alg); |
375 | break; |
376 | } |
377 | |
378 | return len; |
379 | } |
380 | |
381 | void crypto_shoot_alg(struct crypto_alg *alg) |
382 | { |
383 | down_write(sem: &crypto_alg_sem); |
384 | alg->cra_flags |= CRYPTO_ALG_DYING; |
385 | up_write(sem: &crypto_alg_sem); |
386 | } |
387 | EXPORT_SYMBOL_GPL(crypto_shoot_alg); |
388 | |
389 | struct crypto_tfm *__crypto_alloc_tfmgfp(struct crypto_alg *alg, u32 type, |
390 | u32 mask, gfp_t gfp) |
391 | { |
392 | struct crypto_tfm *tfm; |
393 | unsigned int tfm_size; |
394 | int err = -ENOMEM; |
395 | |
396 | tfm_size = sizeof(*tfm) + crypto_ctxsize(alg, type, mask); |
397 | tfm = kzalloc(size: tfm_size, flags: gfp); |
398 | if (tfm == NULL) |
399 | goto out_err; |
400 | |
401 | tfm->__crt_alg = alg; |
402 | refcount_set(r: &tfm->refcnt, n: 1); |
403 | |
404 | if (!tfm->exit && alg->cra_init && (err = alg->cra_init(tfm))) |
405 | goto cra_init_failed; |
406 | |
407 | goto out; |
408 | |
409 | cra_init_failed: |
410 | crypto_exit_ops(tfm); |
411 | if (err == -EAGAIN) |
412 | crypto_shoot_alg(alg); |
413 | kfree(objp: tfm); |
414 | out_err: |
415 | tfm = ERR_PTR(error: err); |
416 | out: |
417 | return tfm; |
418 | } |
419 | EXPORT_SYMBOL_GPL(__crypto_alloc_tfmgfp); |
420 | |
421 | struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type, |
422 | u32 mask) |
423 | { |
424 | return __crypto_alloc_tfmgfp(alg, type, mask, GFP_KERNEL); |
425 | } |
426 | EXPORT_SYMBOL_GPL(__crypto_alloc_tfm); |
427 | |
428 | /* |
429 | * crypto_alloc_base - Locate algorithm and allocate transform |
430 | * @alg_name: Name of algorithm |
431 | * @type: Type of algorithm |
432 | * @mask: Mask for type comparison |
433 | * |
434 | * This function should not be used by new algorithm types. |
435 | * Please use crypto_alloc_tfm instead. |
436 | * |
437 | * crypto_alloc_base() will first attempt to locate an already loaded |
438 | * algorithm. If that fails and the kernel supports dynamically loadable |
439 | * modules, it will then attempt to load a module of the same name or |
440 | * alias. If that fails it will send a query to any loaded crypto manager |
441 | * to construct an algorithm on the fly. A refcount is grabbed on the |
442 | * algorithm which is then associated with the new transform. |
443 | * |
444 | * The returned transform is of a non-determinate type. Most people |
445 | * should use one of the more specific allocation functions such as |
446 | * crypto_alloc_skcipher(). |
447 | * |
448 | * In case of error the return value is an error pointer. |
449 | */ |
450 | struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask) |
451 | { |
452 | struct crypto_tfm *tfm; |
453 | int err; |
454 | |
455 | for (;;) { |
456 | struct crypto_alg *alg; |
457 | |
458 | alg = crypto_alg_mod_lookup(alg_name, type, mask); |
459 | if (IS_ERR(ptr: alg)) { |
460 | err = PTR_ERR(ptr: alg); |
461 | goto err; |
462 | } |
463 | |
464 | tfm = __crypto_alloc_tfm(alg, type, mask); |
465 | if (!IS_ERR(ptr: tfm)) |
466 | return tfm; |
467 | |
468 | crypto_mod_put(alg); |
469 | err = PTR_ERR(ptr: tfm); |
470 | |
471 | err: |
472 | if (err != -EAGAIN) |
473 | break; |
474 | if (fatal_signal_pending(current)) { |
475 | err = -EINTR; |
476 | break; |
477 | } |
478 | } |
479 | |
480 | return ERR_PTR(error: err); |
481 | } |
482 | EXPORT_SYMBOL_GPL(crypto_alloc_base); |
483 | |
484 | static void *crypto_alloc_tfmmem(struct crypto_alg *alg, |
485 | const struct crypto_type *frontend, int node, |
486 | gfp_t gfp) |
487 | { |
488 | struct crypto_tfm *tfm; |
489 | unsigned int tfmsize; |
490 | unsigned int total; |
491 | char *mem; |
492 | |
493 | tfmsize = frontend->tfmsize; |
494 | total = tfmsize + sizeof(*tfm) + frontend->extsize(alg); |
495 | |
496 | mem = kzalloc_node(size: total, flags: gfp, node); |
497 | if (mem == NULL) |
498 | return ERR_PTR(error: -ENOMEM); |
499 | |
500 | tfm = (struct crypto_tfm *)(mem + tfmsize); |
501 | tfm->__crt_alg = alg; |
502 | tfm->node = node; |
503 | refcount_set(r: &tfm->refcnt, n: 1); |
504 | |
505 | return mem; |
506 | } |
507 | |
508 | void *crypto_create_tfm_node(struct crypto_alg *alg, |
509 | const struct crypto_type *frontend, |
510 | int node) |
511 | { |
512 | struct crypto_tfm *tfm; |
513 | char *mem; |
514 | int err; |
515 | |
516 | mem = crypto_alloc_tfmmem(alg, frontend, node, GFP_KERNEL); |
517 | if (IS_ERR(ptr: mem)) |
518 | goto out; |
519 | |
520 | tfm = (struct crypto_tfm *)(mem + frontend->tfmsize); |
521 | |
522 | err = frontend->init_tfm(tfm); |
523 | if (err) |
524 | goto out_free_tfm; |
525 | |
526 | if (!tfm->exit && alg->cra_init && (err = alg->cra_init(tfm))) |
527 | goto cra_init_failed; |
528 | |
529 | goto out; |
530 | |
531 | cra_init_failed: |
532 | crypto_exit_ops(tfm); |
533 | out_free_tfm: |
534 | if (err == -EAGAIN) |
535 | crypto_shoot_alg(alg); |
536 | kfree(objp: mem); |
537 | mem = ERR_PTR(error: err); |
538 | out: |
539 | return mem; |
540 | } |
541 | EXPORT_SYMBOL_GPL(crypto_create_tfm_node); |
542 | |
543 | void *crypto_clone_tfm(const struct crypto_type *frontend, |
544 | struct crypto_tfm *otfm) |
545 | { |
546 | struct crypto_alg *alg = otfm->__crt_alg; |
547 | struct crypto_tfm *tfm; |
548 | char *mem; |
549 | |
550 | mem = ERR_PTR(error: -ESTALE); |
551 | if (unlikely(!crypto_mod_get(alg))) |
552 | goto out; |
553 | |
554 | mem = crypto_alloc_tfmmem(alg, frontend, node: otfm->node, GFP_ATOMIC); |
555 | if (IS_ERR(ptr: mem)) { |
556 | crypto_mod_put(alg); |
557 | goto out; |
558 | } |
559 | |
560 | tfm = (struct crypto_tfm *)(mem + frontend->tfmsize); |
561 | tfm->crt_flags = otfm->crt_flags; |
562 | tfm->exit = otfm->exit; |
563 | |
564 | out: |
565 | return mem; |
566 | } |
567 | EXPORT_SYMBOL_GPL(crypto_clone_tfm); |
568 | |
569 | struct crypto_alg *crypto_find_alg(const char *alg_name, |
570 | const struct crypto_type *frontend, |
571 | u32 type, u32 mask) |
572 | { |
573 | if (frontend) { |
574 | type &= frontend->maskclear; |
575 | mask &= frontend->maskclear; |
576 | type |= frontend->type; |
577 | mask |= frontend->maskset; |
578 | } |
579 | |
580 | return crypto_alg_mod_lookup(alg_name, type, mask); |
581 | } |
582 | EXPORT_SYMBOL_GPL(crypto_find_alg); |
583 | |
584 | /* |
585 | * crypto_alloc_tfm_node - Locate algorithm and allocate transform |
586 | * @alg_name: Name of algorithm |
587 | * @frontend: Frontend algorithm type |
588 | * @type: Type of algorithm |
589 | * @mask: Mask for type comparison |
590 | * @node: NUMA node in which users desire to put requests, if node is |
591 | * NUMA_NO_NODE, it means users have no special requirement. |
592 | * |
593 | * crypto_alloc_tfm() will first attempt to locate an already loaded |
594 | * algorithm. If that fails and the kernel supports dynamically loadable |
595 | * modules, it will then attempt to load a module of the same name or |
596 | * alias. If that fails it will send a query to any loaded crypto manager |
597 | * to construct an algorithm on the fly. A refcount is grabbed on the |
598 | * algorithm which is then associated with the new transform. |
599 | * |
600 | * The returned transform is of a non-determinate type. Most people |
601 | * should use one of the more specific allocation functions such as |
602 | * crypto_alloc_skcipher(). |
603 | * |
604 | * In case of error the return value is an error pointer. |
605 | */ |
606 | |
607 | void *crypto_alloc_tfm_node(const char *alg_name, |
608 | const struct crypto_type *frontend, u32 type, u32 mask, |
609 | int node) |
610 | { |
611 | void *tfm; |
612 | int err; |
613 | |
614 | for (;;) { |
615 | struct crypto_alg *alg; |
616 | |
617 | alg = crypto_find_alg(alg_name, frontend, type, mask); |
618 | if (IS_ERR(ptr: alg)) { |
619 | err = PTR_ERR(ptr: alg); |
620 | goto err; |
621 | } |
622 | |
623 | tfm = crypto_create_tfm_node(alg, frontend, node); |
624 | if (!IS_ERR(ptr: tfm)) |
625 | return tfm; |
626 | |
627 | crypto_mod_put(alg); |
628 | err = PTR_ERR(ptr: tfm); |
629 | |
630 | err: |
631 | if (err != -EAGAIN) |
632 | break; |
633 | if (fatal_signal_pending(current)) { |
634 | err = -EINTR; |
635 | break; |
636 | } |
637 | } |
638 | |
639 | return ERR_PTR(error: err); |
640 | } |
641 | EXPORT_SYMBOL_GPL(crypto_alloc_tfm_node); |
642 | |
643 | /* |
644 | * crypto_destroy_tfm - Free crypto transform |
645 | * @mem: Start of tfm slab |
646 | * @tfm: Transform to free |
647 | * |
648 | * This function frees up the transform and any associated resources, |
649 | * then drops the refcount on the associated algorithm. |
650 | */ |
651 | void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm) |
652 | { |
653 | struct crypto_alg *alg; |
654 | |
655 | if (IS_ERR_OR_NULL(ptr: mem)) |
656 | return; |
657 | |
658 | if (!refcount_dec_and_test(r: &tfm->refcnt)) |
659 | return; |
660 | alg = tfm->__crt_alg; |
661 | |
662 | if (!tfm->exit && alg->cra_exit) |
663 | alg->cra_exit(tfm); |
664 | crypto_exit_ops(tfm); |
665 | crypto_mod_put(alg); |
666 | kfree_sensitive(objp: mem); |
667 | } |
668 | EXPORT_SYMBOL_GPL(crypto_destroy_tfm); |
669 | |
670 | int crypto_has_alg(const char *name, u32 type, u32 mask) |
671 | { |
672 | int ret = 0; |
673 | struct crypto_alg *alg = crypto_alg_mod_lookup(name, type, mask); |
674 | |
675 | if (!IS_ERR(ptr: alg)) { |
676 | crypto_mod_put(alg); |
677 | ret = 1; |
678 | } |
679 | |
680 | return ret; |
681 | } |
682 | EXPORT_SYMBOL_GPL(crypto_has_alg); |
683 | |
684 | void crypto_req_done(void *data, int err) |
685 | { |
686 | struct crypto_wait *wait = data; |
687 | |
688 | if (err == -EINPROGRESS) |
689 | return; |
690 | |
691 | wait->err = err; |
692 | complete(&wait->completion); |
693 | } |
694 | EXPORT_SYMBOL_GPL(crypto_req_done); |
695 | |
696 | MODULE_DESCRIPTION("Cryptographic core API" ); |
697 | MODULE_LICENSE("GPL" ); |
698 | |