1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Copyright (c) 2016-2017 HiSilicon Limited. */ |
3 | #include <linux/crypto.h> |
4 | #include <linux/dma-mapping.h> |
5 | #include <linux/dmapool.h> |
6 | #include <linux/module.h> |
7 | #include <linux/mutex.h> |
8 | #include <linux/slab.h> |
9 | |
10 | #include <crypto/aes.h> |
11 | #include <crypto/algapi.h> |
12 | #include <crypto/internal/des.h> |
13 | #include <crypto/skcipher.h> |
14 | #include <crypto/xts.h> |
15 | #include <crypto/internal/skcipher.h> |
16 | |
17 | #include "sec_drv.h" |
18 | |
19 | #define SEC_MAX_CIPHER_KEY 64 |
20 | #define SEC_REQ_LIMIT SZ_32M |
21 | |
22 | struct sec_c_alg_cfg { |
23 | unsigned c_alg : 3; |
24 | unsigned c_mode : 3; |
25 | unsigned key_len : 2; |
26 | unsigned c_width : 2; |
27 | }; |
28 | |
29 | static const struct sec_c_alg_cfg sec_c_alg_cfgs[] = { |
30 | [SEC_C_DES_ECB_64] = { |
31 | .c_alg = SEC_C_ALG_DES, |
32 | .c_mode = SEC_C_MODE_ECB, |
33 | .key_len = SEC_KEY_LEN_DES, |
34 | }, |
35 | [SEC_C_DES_CBC_64] = { |
36 | .c_alg = SEC_C_ALG_DES, |
37 | .c_mode = SEC_C_MODE_CBC, |
38 | .key_len = SEC_KEY_LEN_DES, |
39 | }, |
40 | [SEC_C_3DES_ECB_192_3KEY] = { |
41 | .c_alg = SEC_C_ALG_3DES, |
42 | .c_mode = SEC_C_MODE_ECB, |
43 | .key_len = SEC_KEY_LEN_3DES_3_KEY, |
44 | }, |
45 | [SEC_C_3DES_ECB_192_2KEY] = { |
46 | .c_alg = SEC_C_ALG_3DES, |
47 | .c_mode = SEC_C_MODE_ECB, |
48 | .key_len = SEC_KEY_LEN_3DES_2_KEY, |
49 | }, |
50 | [SEC_C_3DES_CBC_192_3KEY] = { |
51 | .c_alg = SEC_C_ALG_3DES, |
52 | .c_mode = SEC_C_MODE_CBC, |
53 | .key_len = SEC_KEY_LEN_3DES_3_KEY, |
54 | }, |
55 | [SEC_C_3DES_CBC_192_2KEY] = { |
56 | .c_alg = SEC_C_ALG_3DES, |
57 | .c_mode = SEC_C_MODE_CBC, |
58 | .key_len = SEC_KEY_LEN_3DES_2_KEY, |
59 | }, |
60 | [SEC_C_AES_ECB_128] = { |
61 | .c_alg = SEC_C_ALG_AES, |
62 | .c_mode = SEC_C_MODE_ECB, |
63 | .key_len = SEC_KEY_LEN_AES_128, |
64 | }, |
65 | [SEC_C_AES_ECB_192] = { |
66 | .c_alg = SEC_C_ALG_AES, |
67 | .c_mode = SEC_C_MODE_ECB, |
68 | .key_len = SEC_KEY_LEN_AES_192, |
69 | }, |
70 | [SEC_C_AES_ECB_256] = { |
71 | .c_alg = SEC_C_ALG_AES, |
72 | .c_mode = SEC_C_MODE_ECB, |
73 | .key_len = SEC_KEY_LEN_AES_256, |
74 | }, |
75 | [SEC_C_AES_CBC_128] = { |
76 | .c_alg = SEC_C_ALG_AES, |
77 | .c_mode = SEC_C_MODE_CBC, |
78 | .key_len = SEC_KEY_LEN_AES_128, |
79 | }, |
80 | [SEC_C_AES_CBC_192] = { |
81 | .c_alg = SEC_C_ALG_AES, |
82 | .c_mode = SEC_C_MODE_CBC, |
83 | .key_len = SEC_KEY_LEN_AES_192, |
84 | }, |
85 | [SEC_C_AES_CBC_256] = { |
86 | .c_alg = SEC_C_ALG_AES, |
87 | .c_mode = SEC_C_MODE_CBC, |
88 | .key_len = SEC_KEY_LEN_AES_256, |
89 | }, |
90 | [SEC_C_AES_CTR_128] = { |
91 | .c_alg = SEC_C_ALG_AES, |
92 | .c_mode = SEC_C_MODE_CTR, |
93 | .key_len = SEC_KEY_LEN_AES_128, |
94 | }, |
95 | [SEC_C_AES_CTR_192] = { |
96 | .c_alg = SEC_C_ALG_AES, |
97 | .c_mode = SEC_C_MODE_CTR, |
98 | .key_len = SEC_KEY_LEN_AES_192, |
99 | }, |
100 | [SEC_C_AES_CTR_256] = { |
101 | .c_alg = SEC_C_ALG_AES, |
102 | .c_mode = SEC_C_MODE_CTR, |
103 | .key_len = SEC_KEY_LEN_AES_256, |
104 | }, |
105 | [SEC_C_AES_XTS_128] = { |
106 | .c_alg = SEC_C_ALG_AES, |
107 | .c_mode = SEC_C_MODE_XTS, |
108 | .key_len = SEC_KEY_LEN_AES_128, |
109 | }, |
110 | [SEC_C_AES_XTS_256] = { |
111 | .c_alg = SEC_C_ALG_AES, |
112 | .c_mode = SEC_C_MODE_XTS, |
113 | .key_len = SEC_KEY_LEN_AES_256, |
114 | }, |
115 | [SEC_C_NULL] = { |
116 | }, |
117 | }; |
118 | |
119 | /* |
120 | * Mutex used to ensure safe operation of reference count of |
121 | * alg providers |
122 | */ |
123 | static DEFINE_MUTEX(algs_lock); |
124 | static unsigned int active_devs; |
125 | |
126 | static void sec_alg_skcipher_init_template(struct sec_alg_tfm_ctx *ctx, |
127 | struct sec_bd_info *req, |
128 | enum sec_cipher_alg alg) |
129 | { |
130 | const struct sec_c_alg_cfg *cfg = &sec_c_alg_cfgs[alg]; |
131 | |
132 | memset(req, 0, sizeof(*req)); |
133 | req->w0 |= cfg->c_mode << SEC_BD_W0_C_MODE_S; |
134 | req->w1 |= cfg->c_alg << SEC_BD_W1_C_ALG_S; |
135 | req->w3 |= cfg->key_len << SEC_BD_W3_C_KEY_LEN_S; |
136 | req->w0 |= cfg->c_width << SEC_BD_W0_C_WIDTH_S; |
137 | |
138 | req->cipher_key_addr_lo = lower_32_bits(ctx->pkey); |
139 | req->cipher_key_addr_hi = upper_32_bits(ctx->pkey); |
140 | } |
141 | |
142 | static void sec_alg_skcipher_init_context(struct crypto_skcipher *atfm, |
143 | const u8 *key, |
144 | unsigned int keylen, |
145 | enum sec_cipher_alg alg) |
146 | { |
147 | struct crypto_tfm *tfm = crypto_skcipher_tfm(tfm: atfm); |
148 | struct sec_alg_tfm_ctx *ctx = crypto_tfm_ctx(tfm); |
149 | |
150 | ctx->cipher_alg = alg; |
151 | memcpy(ctx->key, key, keylen); |
152 | sec_alg_skcipher_init_template(ctx, req: &ctx->req_template, |
153 | alg: ctx->cipher_alg); |
154 | } |
155 | |
156 | static void sec_free_hw_sgl(struct sec_hw_sgl *hw_sgl, |
157 | dma_addr_t psec_sgl, struct sec_dev_info *info) |
158 | { |
159 | struct sec_hw_sgl *sgl_current, *sgl_next; |
160 | dma_addr_t sgl_next_dma; |
161 | |
162 | sgl_current = hw_sgl; |
163 | while (sgl_current) { |
164 | sgl_next = sgl_current->next; |
165 | sgl_next_dma = sgl_current->next_sgl; |
166 | |
167 | dma_pool_free(pool: info->hw_sgl_pool, vaddr: sgl_current, addr: psec_sgl); |
168 | |
169 | sgl_current = sgl_next; |
170 | psec_sgl = sgl_next_dma; |
171 | } |
172 | } |
173 | |
174 | static int sec_alloc_and_fill_hw_sgl(struct sec_hw_sgl **sec_sgl, |
175 | dma_addr_t *psec_sgl, |
176 | struct scatterlist *sgl, |
177 | int count, |
178 | struct sec_dev_info *info, |
179 | gfp_t gfp) |
180 | { |
181 | struct sec_hw_sgl *sgl_current = NULL; |
182 | struct sec_hw_sgl *sgl_next; |
183 | dma_addr_t sgl_next_dma; |
184 | struct scatterlist *sg; |
185 | int ret, sge_index, i; |
186 | |
187 | if (!count) |
188 | return -EINVAL; |
189 | |
190 | for_each_sg(sgl, sg, count, i) { |
191 | sge_index = i % SEC_MAX_SGE_NUM; |
192 | if (sge_index == 0) { |
193 | sgl_next = dma_pool_zalloc(pool: info->hw_sgl_pool, |
194 | mem_flags: gfp, handle: &sgl_next_dma); |
195 | if (!sgl_next) { |
196 | ret = -ENOMEM; |
197 | goto err_free_hw_sgls; |
198 | } |
199 | |
200 | if (!sgl_current) { /* First one */ |
201 | *psec_sgl = sgl_next_dma; |
202 | *sec_sgl = sgl_next; |
203 | } else { /* Chained */ |
204 | sgl_current->entry_sum_in_sgl = SEC_MAX_SGE_NUM; |
205 | sgl_current->next_sgl = sgl_next_dma; |
206 | sgl_current->next = sgl_next; |
207 | } |
208 | sgl_current = sgl_next; |
209 | } |
210 | sgl_current->sge_entries[sge_index].buf = sg_dma_address(sg); |
211 | sgl_current->sge_entries[sge_index].len = sg_dma_len(sg); |
212 | sgl_current->data_bytes_in_sgl += sg_dma_len(sg); |
213 | } |
214 | sgl_current->entry_sum_in_sgl = count % SEC_MAX_SGE_NUM; |
215 | sgl_current->next_sgl = 0; |
216 | (*sec_sgl)->entry_sum_in_chain = count; |
217 | |
218 | return 0; |
219 | |
220 | err_free_hw_sgls: |
221 | sec_free_hw_sgl(hw_sgl: *sec_sgl, psec_sgl: *psec_sgl, info); |
222 | *psec_sgl = 0; |
223 | |
224 | return ret; |
225 | } |
226 | |
227 | static int sec_alg_skcipher_setkey(struct crypto_skcipher *tfm, |
228 | const u8 *key, unsigned int keylen, |
229 | enum sec_cipher_alg alg) |
230 | { |
231 | struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); |
232 | struct device *dev = ctx->queue->dev_info->dev; |
233 | |
234 | mutex_lock(&ctx->lock); |
235 | if (ctx->key) { |
236 | /* rekeying */ |
237 | memset(ctx->key, 0, SEC_MAX_CIPHER_KEY); |
238 | } else { |
239 | /* new key */ |
240 | ctx->key = dma_alloc_coherent(dev, SEC_MAX_CIPHER_KEY, |
241 | dma_handle: &ctx->pkey, GFP_KERNEL); |
242 | if (!ctx->key) { |
243 | mutex_unlock(lock: &ctx->lock); |
244 | return -ENOMEM; |
245 | } |
246 | } |
247 | mutex_unlock(lock: &ctx->lock); |
248 | sec_alg_skcipher_init_context(atfm: tfm, key, keylen, alg); |
249 | |
250 | return 0; |
251 | } |
252 | |
253 | static int sec_alg_skcipher_setkey_aes_ecb(struct crypto_skcipher *tfm, |
254 | const u8 *key, unsigned int keylen) |
255 | { |
256 | enum sec_cipher_alg alg; |
257 | |
258 | switch (keylen) { |
259 | case AES_KEYSIZE_128: |
260 | alg = SEC_C_AES_ECB_128; |
261 | break; |
262 | case AES_KEYSIZE_192: |
263 | alg = SEC_C_AES_ECB_192; |
264 | break; |
265 | case AES_KEYSIZE_256: |
266 | alg = SEC_C_AES_ECB_256; |
267 | break; |
268 | default: |
269 | return -EINVAL; |
270 | } |
271 | |
272 | return sec_alg_skcipher_setkey(tfm, key, keylen, alg); |
273 | } |
274 | |
275 | static int sec_alg_skcipher_setkey_aes_cbc(struct crypto_skcipher *tfm, |
276 | const u8 *key, unsigned int keylen) |
277 | { |
278 | enum sec_cipher_alg alg; |
279 | |
280 | switch (keylen) { |
281 | case AES_KEYSIZE_128: |
282 | alg = SEC_C_AES_CBC_128; |
283 | break; |
284 | case AES_KEYSIZE_192: |
285 | alg = SEC_C_AES_CBC_192; |
286 | break; |
287 | case AES_KEYSIZE_256: |
288 | alg = SEC_C_AES_CBC_256; |
289 | break; |
290 | default: |
291 | return -EINVAL; |
292 | } |
293 | |
294 | return sec_alg_skcipher_setkey(tfm, key, keylen, alg); |
295 | } |
296 | |
297 | static int sec_alg_skcipher_setkey_aes_ctr(struct crypto_skcipher *tfm, |
298 | const u8 *key, unsigned int keylen) |
299 | { |
300 | enum sec_cipher_alg alg; |
301 | |
302 | switch (keylen) { |
303 | case AES_KEYSIZE_128: |
304 | alg = SEC_C_AES_CTR_128; |
305 | break; |
306 | case AES_KEYSIZE_192: |
307 | alg = SEC_C_AES_CTR_192; |
308 | break; |
309 | case AES_KEYSIZE_256: |
310 | alg = SEC_C_AES_CTR_256; |
311 | break; |
312 | default: |
313 | return -EINVAL; |
314 | } |
315 | |
316 | return sec_alg_skcipher_setkey(tfm, key, keylen, alg); |
317 | } |
318 | |
319 | static int sec_alg_skcipher_setkey_aes_xts(struct crypto_skcipher *tfm, |
320 | const u8 *key, unsigned int keylen) |
321 | { |
322 | enum sec_cipher_alg alg; |
323 | int ret; |
324 | |
325 | ret = xts_verify_key(tfm, key, keylen); |
326 | if (ret) |
327 | return ret; |
328 | |
329 | switch (keylen) { |
330 | case AES_KEYSIZE_128 * 2: |
331 | alg = SEC_C_AES_XTS_128; |
332 | break; |
333 | case AES_KEYSIZE_256 * 2: |
334 | alg = SEC_C_AES_XTS_256; |
335 | break; |
336 | default: |
337 | return -EINVAL; |
338 | } |
339 | |
340 | return sec_alg_skcipher_setkey(tfm, key, keylen, alg); |
341 | } |
342 | |
343 | static int sec_alg_skcipher_setkey_des_ecb(struct crypto_skcipher *tfm, |
344 | const u8 *key, unsigned int keylen) |
345 | { |
346 | return verify_skcipher_des_key(tfm, key) ?: |
347 | sec_alg_skcipher_setkey(tfm, key, keylen, alg: SEC_C_DES_ECB_64); |
348 | } |
349 | |
350 | static int sec_alg_skcipher_setkey_des_cbc(struct crypto_skcipher *tfm, |
351 | const u8 *key, unsigned int keylen) |
352 | { |
353 | return verify_skcipher_des_key(tfm, key) ?: |
354 | sec_alg_skcipher_setkey(tfm, key, keylen, alg: SEC_C_DES_CBC_64); |
355 | } |
356 | |
357 | static int sec_alg_skcipher_setkey_3des_ecb(struct crypto_skcipher *tfm, |
358 | const u8 *key, unsigned int keylen) |
359 | { |
360 | return verify_skcipher_des3_key(tfm, key) ?: |
361 | sec_alg_skcipher_setkey(tfm, key, keylen, |
362 | alg: SEC_C_3DES_ECB_192_3KEY); |
363 | } |
364 | |
365 | static int sec_alg_skcipher_setkey_3des_cbc(struct crypto_skcipher *tfm, |
366 | const u8 *key, unsigned int keylen) |
367 | { |
368 | return verify_skcipher_des3_key(tfm, key) ?: |
369 | sec_alg_skcipher_setkey(tfm, key, keylen, |
370 | alg: SEC_C_3DES_CBC_192_3KEY); |
371 | } |
372 | |
373 | static void sec_alg_free_el(struct sec_request_el *el, |
374 | struct sec_dev_info *info) |
375 | { |
376 | sec_free_hw_sgl(hw_sgl: el->out, psec_sgl: el->dma_out, info); |
377 | sec_free_hw_sgl(hw_sgl: el->in, psec_sgl: el->dma_in, info); |
378 | kfree(objp: el->sgl_in); |
379 | kfree(objp: el->sgl_out); |
380 | kfree(objp: el); |
381 | } |
382 | |
383 | /* queuelock must be held */ |
384 | static int sec_send_request(struct sec_request *sec_req, struct sec_queue *queue) |
385 | { |
386 | struct sec_request_el *el, *temp; |
387 | int ret = 0; |
388 | |
389 | mutex_lock(&sec_req->lock); |
390 | list_for_each_entry_safe(el, temp, &sec_req->elements, head) { |
391 | /* |
392 | * Add to hardware queue only under following circumstances |
393 | * 1) Software and hardware queue empty so no chain dependencies |
394 | * 2) No dependencies as new IV - (check software queue empty |
395 | * to maintain order) |
396 | * 3) No dependencies because the mode does no chaining. |
397 | * |
398 | * In other cases first insert onto the software queue which |
399 | * is then emptied as requests complete |
400 | */ |
401 | if (!queue->havesoftqueue || |
402 | (kfifo_is_empty(&queue->softqueue) && |
403 | sec_queue_empty(queue))) { |
404 | ret = sec_queue_send(queue, msg: &el->req, ctx: sec_req); |
405 | if (ret == -EAGAIN) { |
406 | /* Wait unti we can send then try again */ |
407 | /* DEAD if here - should not happen */ |
408 | ret = -EBUSY; |
409 | goto err_unlock; |
410 | } |
411 | } else { |
412 | kfifo_put(&queue->softqueue, el); |
413 | } |
414 | } |
415 | err_unlock: |
416 | mutex_unlock(lock: &sec_req->lock); |
417 | |
418 | return ret; |
419 | } |
420 | |
421 | static void sec_skcipher_alg_callback(struct sec_bd_info *sec_resp, |
422 | struct crypto_async_request *req_base) |
423 | { |
424 | struct skcipher_request *skreq = container_of(req_base, |
425 | struct skcipher_request, |
426 | base); |
427 | struct sec_request *sec_req = skcipher_request_ctx(req: skreq); |
428 | struct sec_request *backlog_req; |
429 | struct sec_request_el *sec_req_el, *nextrequest; |
430 | struct sec_alg_tfm_ctx *ctx = sec_req->tfm_ctx; |
431 | struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(req: skreq); |
432 | struct device *dev = ctx->queue->dev_info->dev; |
433 | int icv_or_skey_en, ret; |
434 | bool done; |
435 | |
436 | sec_req_el = list_first_entry(&sec_req->elements, struct sec_request_el, |
437 | head); |
438 | icv_or_skey_en = (sec_resp->w0 & SEC_BD_W0_ICV_OR_SKEY_EN_M) >> |
439 | SEC_BD_W0_ICV_OR_SKEY_EN_S; |
440 | if (sec_resp->w1 & SEC_BD_W1_BD_INVALID || icv_or_skey_en == 3) { |
441 | dev_err(dev, "Got an invalid answer %lu %d\n" , |
442 | sec_resp->w1 & SEC_BD_W1_BD_INVALID, |
443 | icv_or_skey_en); |
444 | sec_req->err = -EINVAL; |
445 | /* |
446 | * We need to muddle on to avoid getting stuck with elements |
447 | * on the queue. Error will be reported so requester so |
448 | * it should be able to handle appropriately. |
449 | */ |
450 | } |
451 | |
452 | spin_lock_bh(lock: &ctx->queue->queuelock); |
453 | /* Put the IV in place for chained cases */ |
454 | switch (ctx->cipher_alg) { |
455 | case SEC_C_AES_CBC_128: |
456 | case SEC_C_AES_CBC_192: |
457 | case SEC_C_AES_CBC_256: |
458 | if (sec_req_el->req.w0 & SEC_BD_W0_DE) |
459 | sg_pcopy_to_buffer(sgl: sec_req_el->sgl_out, |
460 | nents: sg_nents(sg: sec_req_el->sgl_out), |
461 | buf: skreq->iv, |
462 | buflen: crypto_skcipher_ivsize(tfm: atfm), |
463 | skip: sec_req_el->el_length - |
464 | crypto_skcipher_ivsize(tfm: atfm)); |
465 | else |
466 | sg_pcopy_to_buffer(sgl: sec_req_el->sgl_in, |
467 | nents: sg_nents(sg: sec_req_el->sgl_in), |
468 | buf: skreq->iv, |
469 | buflen: crypto_skcipher_ivsize(tfm: atfm), |
470 | skip: sec_req_el->el_length - |
471 | crypto_skcipher_ivsize(tfm: atfm)); |
472 | /* No need to sync to the device as coherent DMA */ |
473 | break; |
474 | case SEC_C_AES_CTR_128: |
475 | case SEC_C_AES_CTR_192: |
476 | case SEC_C_AES_CTR_256: |
477 | crypto_inc(a: skreq->iv, size: 16); |
478 | break; |
479 | default: |
480 | /* Do not update */ |
481 | break; |
482 | } |
483 | |
484 | if (ctx->queue->havesoftqueue && |
485 | !kfifo_is_empty(&ctx->queue->softqueue) && |
486 | sec_queue_empty(queue: ctx->queue)) { |
487 | ret = kfifo_get(&ctx->queue->softqueue, &nextrequest); |
488 | if (ret <= 0) |
489 | dev_err(dev, |
490 | "Error getting next element from kfifo %d\n" , |
491 | ret); |
492 | else |
493 | /* We know there is space so this cannot fail */ |
494 | sec_queue_send(queue: ctx->queue, msg: &nextrequest->req, |
495 | ctx: nextrequest->sec_req); |
496 | } else if (!list_empty(head: &ctx->backlog)) { |
497 | /* Need to verify there is room first */ |
498 | backlog_req = list_first_entry(&ctx->backlog, |
499 | typeof(*backlog_req), |
500 | backlog_head); |
501 | if (sec_queue_can_enqueue(queue: ctx->queue, |
502 | num: backlog_req->num_elements) || |
503 | (ctx->queue->havesoftqueue && |
504 | kfifo_avail(&ctx->queue->softqueue) > |
505 | backlog_req->num_elements)) { |
506 | sec_send_request(sec_req: backlog_req, queue: ctx->queue); |
507 | crypto_request_complete(req: backlog_req->req_base, |
508 | err: -EINPROGRESS); |
509 | list_del(entry: &backlog_req->backlog_head); |
510 | } |
511 | } |
512 | spin_unlock_bh(lock: &ctx->queue->queuelock); |
513 | |
514 | mutex_lock(&sec_req->lock); |
515 | list_del(entry: &sec_req_el->head); |
516 | mutex_unlock(lock: &sec_req->lock); |
517 | sec_alg_free_el(el: sec_req_el, info: ctx->queue->dev_info); |
518 | |
519 | /* |
520 | * Request is done. |
521 | * The dance is needed as the lock is freed in the completion |
522 | */ |
523 | mutex_lock(&sec_req->lock); |
524 | done = list_empty(head: &sec_req->elements); |
525 | mutex_unlock(lock: &sec_req->lock); |
526 | if (done) { |
527 | if (crypto_skcipher_ivsize(tfm: atfm)) { |
528 | dma_unmap_single(dev, sec_req->dma_iv, |
529 | crypto_skcipher_ivsize(atfm), |
530 | DMA_TO_DEVICE); |
531 | } |
532 | dma_unmap_sg(dev, skreq->src, sec_req->len_in, |
533 | DMA_BIDIRECTIONAL); |
534 | if (skreq->src != skreq->dst) |
535 | dma_unmap_sg(dev, skreq->dst, sec_req->len_out, |
536 | DMA_BIDIRECTIONAL); |
537 | skcipher_request_complete(req: skreq, err: sec_req->err); |
538 | } |
539 | } |
540 | |
541 | void sec_alg_callback(struct sec_bd_info *resp, void *shadow) |
542 | { |
543 | struct sec_request *sec_req = shadow; |
544 | |
545 | sec_req->cb(resp, sec_req->req_base); |
546 | } |
547 | |
548 | static int sec_alg_alloc_and_calc_split_sizes(int length, size_t **split_sizes, |
549 | int *steps, gfp_t gfp) |
550 | { |
551 | size_t *sizes; |
552 | int i; |
553 | |
554 | /* Split into suitable sized blocks */ |
555 | *steps = roundup(length, SEC_REQ_LIMIT) / SEC_REQ_LIMIT; |
556 | sizes = kcalloc(n: *steps, size: sizeof(*sizes), flags: gfp); |
557 | if (!sizes) |
558 | return -ENOMEM; |
559 | |
560 | for (i = 0; i < *steps - 1; i++) |
561 | sizes[i] = SEC_REQ_LIMIT; |
562 | sizes[*steps - 1] = length - SEC_REQ_LIMIT * (*steps - 1); |
563 | *split_sizes = sizes; |
564 | |
565 | return 0; |
566 | } |
567 | |
568 | static int sec_map_and_split_sg(struct scatterlist *sgl, size_t *split_sizes, |
569 | int steps, struct scatterlist ***splits, |
570 | int **splits_nents, |
571 | int sgl_len_in, |
572 | struct device *dev, gfp_t gfp) |
573 | { |
574 | int ret, count; |
575 | |
576 | count = dma_map_sg(dev, sgl, sgl_len_in, DMA_BIDIRECTIONAL); |
577 | if (!count) |
578 | return -EINVAL; |
579 | |
580 | *splits = kcalloc(n: steps, size: sizeof(struct scatterlist *), flags: gfp); |
581 | if (!*splits) { |
582 | ret = -ENOMEM; |
583 | goto err_unmap_sg; |
584 | } |
585 | *splits_nents = kcalloc(n: steps, size: sizeof(int), flags: gfp); |
586 | if (!*splits_nents) { |
587 | ret = -ENOMEM; |
588 | goto err_free_splits; |
589 | } |
590 | |
591 | /* output the scatter list before and after this */ |
592 | ret = sg_split(in: sgl, in_mapped_nents: count, skip: 0, nb_splits: steps, split_sizes, |
593 | out: *splits, out_mapped_nents: *splits_nents, gfp_mask: gfp); |
594 | if (ret) { |
595 | ret = -ENOMEM; |
596 | goto err_free_splits_nents; |
597 | } |
598 | |
599 | return 0; |
600 | |
601 | err_free_splits_nents: |
602 | kfree(objp: *splits_nents); |
603 | err_free_splits: |
604 | kfree(objp: *splits); |
605 | err_unmap_sg: |
606 | dma_unmap_sg(dev, sgl, sgl_len_in, DMA_BIDIRECTIONAL); |
607 | |
608 | return ret; |
609 | } |
610 | |
611 | /* |
612 | * Reverses the sec_map_and_split_sg call for messages not yet added to |
613 | * the queues. |
614 | */ |
615 | static void sec_unmap_sg_on_err(struct scatterlist *sgl, int steps, |
616 | struct scatterlist **splits, int *splits_nents, |
617 | int sgl_len_in, struct device *dev) |
618 | { |
619 | int i; |
620 | |
621 | for (i = 0; i < steps; i++) |
622 | kfree(objp: splits[i]); |
623 | kfree(objp: splits_nents); |
624 | kfree(objp: splits); |
625 | |
626 | dma_unmap_sg(dev, sgl, sgl_len_in, DMA_BIDIRECTIONAL); |
627 | } |
628 | |
629 | static struct sec_request_el |
630 | *sec_alg_alloc_and_fill_el(struct sec_bd_info *template, int encrypt, |
631 | int el_size, bool different_dest, |
632 | struct scatterlist *sgl_in, int n_ents_in, |
633 | struct scatterlist *sgl_out, int n_ents_out, |
634 | struct sec_dev_info *info, gfp_t gfp) |
635 | { |
636 | struct sec_request_el *el; |
637 | struct sec_bd_info *req; |
638 | int ret; |
639 | |
640 | el = kzalloc(size: sizeof(*el), flags: gfp); |
641 | if (!el) |
642 | return ERR_PTR(error: -ENOMEM); |
643 | el->el_length = el_size; |
644 | req = &el->req; |
645 | memcpy(req, template, sizeof(*req)); |
646 | |
647 | req->w0 &= ~SEC_BD_W0_CIPHER_M; |
648 | if (encrypt) |
649 | req->w0 |= SEC_CIPHER_ENCRYPT << SEC_BD_W0_CIPHER_S; |
650 | else |
651 | req->w0 |= SEC_CIPHER_DECRYPT << SEC_BD_W0_CIPHER_S; |
652 | |
653 | req->w0 &= ~SEC_BD_W0_C_GRAN_SIZE_19_16_M; |
654 | req->w0 |= ((el_size >> 16) << SEC_BD_W0_C_GRAN_SIZE_19_16_S) & |
655 | SEC_BD_W0_C_GRAN_SIZE_19_16_M; |
656 | |
657 | req->w0 &= ~SEC_BD_W0_C_GRAN_SIZE_21_20_M; |
658 | req->w0 |= ((el_size >> 20) << SEC_BD_W0_C_GRAN_SIZE_21_20_S) & |
659 | SEC_BD_W0_C_GRAN_SIZE_21_20_M; |
660 | |
661 | /* Writing whole u32 so no need to take care of masking */ |
662 | req->w2 = ((1 << SEC_BD_W2_GRAN_NUM_S) & SEC_BD_W2_GRAN_NUM_M) | |
663 | ((el_size << SEC_BD_W2_C_GRAN_SIZE_15_0_S) & |
664 | SEC_BD_W2_C_GRAN_SIZE_15_0_M); |
665 | |
666 | req->w3 &= ~SEC_BD_W3_CIPHER_LEN_OFFSET_M; |
667 | req->w1 |= SEC_BD_W1_ADDR_TYPE; |
668 | |
669 | el->sgl_in = sgl_in; |
670 | |
671 | ret = sec_alloc_and_fill_hw_sgl(sec_sgl: &el->in, psec_sgl: &el->dma_in, sgl: el->sgl_in, |
672 | count: n_ents_in, info, gfp); |
673 | if (ret) |
674 | goto err_free_el; |
675 | |
676 | req->data_addr_lo = lower_32_bits(el->dma_in); |
677 | req->data_addr_hi = upper_32_bits(el->dma_in); |
678 | |
679 | if (different_dest) { |
680 | el->sgl_out = sgl_out; |
681 | ret = sec_alloc_and_fill_hw_sgl(sec_sgl: &el->out, psec_sgl: &el->dma_out, |
682 | sgl: el->sgl_out, |
683 | count: n_ents_out, info, gfp); |
684 | if (ret) |
685 | goto err_free_hw_sgl_in; |
686 | |
687 | req->w0 |= SEC_BD_W0_DE; |
688 | req->cipher_destin_addr_lo = lower_32_bits(el->dma_out); |
689 | req->cipher_destin_addr_hi = upper_32_bits(el->dma_out); |
690 | |
691 | } else { |
692 | req->w0 &= ~SEC_BD_W0_DE; |
693 | req->cipher_destin_addr_lo = lower_32_bits(el->dma_in); |
694 | req->cipher_destin_addr_hi = upper_32_bits(el->dma_in); |
695 | } |
696 | |
697 | return el; |
698 | |
699 | err_free_hw_sgl_in: |
700 | sec_free_hw_sgl(hw_sgl: el->in, psec_sgl: el->dma_in, info); |
701 | err_free_el: |
702 | kfree(objp: el); |
703 | |
704 | return ERR_PTR(error: ret); |
705 | } |
706 | |
707 | static int sec_alg_skcipher_crypto(struct skcipher_request *skreq, |
708 | bool encrypt) |
709 | { |
710 | struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(req: skreq); |
711 | struct crypto_tfm *tfm = crypto_skcipher_tfm(tfm: atfm); |
712 | struct sec_alg_tfm_ctx *ctx = crypto_tfm_ctx(tfm); |
713 | struct sec_queue *queue = ctx->queue; |
714 | struct sec_request *sec_req = skcipher_request_ctx(req: skreq); |
715 | struct sec_dev_info *info = queue->dev_info; |
716 | int i, ret, steps; |
717 | size_t *split_sizes; |
718 | struct scatterlist **splits_in; |
719 | struct scatterlist **splits_out = NULL; |
720 | int *splits_in_nents; |
721 | int *splits_out_nents = NULL; |
722 | struct sec_request_el *el, *temp; |
723 | bool split = skreq->src != skreq->dst; |
724 | gfp_t gfp = skreq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC; |
725 | |
726 | mutex_init(&sec_req->lock); |
727 | sec_req->req_base = &skreq->base; |
728 | sec_req->err = 0; |
729 | /* SGL mapping out here to allow us to break it up as necessary */ |
730 | sec_req->len_in = sg_nents(sg: skreq->src); |
731 | |
732 | ret = sec_alg_alloc_and_calc_split_sizes(length: skreq->cryptlen, split_sizes: &split_sizes, |
733 | steps: &steps, gfp); |
734 | if (ret) |
735 | return ret; |
736 | sec_req->num_elements = steps; |
737 | ret = sec_map_and_split_sg(sgl: skreq->src, split_sizes, steps, splits: &splits_in, |
738 | splits_nents: &splits_in_nents, sgl_len_in: sec_req->len_in, |
739 | dev: info->dev, gfp); |
740 | if (ret) |
741 | goto err_free_split_sizes; |
742 | |
743 | if (split) { |
744 | sec_req->len_out = sg_nents(sg: skreq->dst); |
745 | ret = sec_map_and_split_sg(sgl: skreq->dst, split_sizes, steps, |
746 | splits: &splits_out, splits_nents: &splits_out_nents, |
747 | sgl_len_in: sec_req->len_out, dev: info->dev, gfp); |
748 | if (ret) |
749 | goto err_unmap_in_sg; |
750 | } |
751 | /* Shared info stored in seq_req - applies to all BDs */ |
752 | sec_req->tfm_ctx = ctx; |
753 | sec_req->cb = sec_skcipher_alg_callback; |
754 | INIT_LIST_HEAD(list: &sec_req->elements); |
755 | |
756 | /* |
757 | * Future optimization. |
758 | * In the chaining case we can't use a dma pool bounce buffer |
759 | * but in the case where we know there is no chaining we can |
760 | */ |
761 | if (crypto_skcipher_ivsize(tfm: atfm)) { |
762 | sec_req->dma_iv = dma_map_single(info->dev, skreq->iv, |
763 | crypto_skcipher_ivsize(atfm), |
764 | DMA_TO_DEVICE); |
765 | if (dma_mapping_error(dev: info->dev, dma_addr: sec_req->dma_iv)) { |
766 | ret = -ENOMEM; |
767 | goto err_unmap_out_sg; |
768 | } |
769 | } |
770 | |
771 | /* Set them all up then queue - cleaner error handling. */ |
772 | for (i = 0; i < steps; i++) { |
773 | el = sec_alg_alloc_and_fill_el(template: &ctx->req_template, |
774 | encrypt: encrypt ? 1 : 0, |
775 | el_size: split_sizes[i], |
776 | different_dest: skreq->src != skreq->dst, |
777 | sgl_in: splits_in[i], n_ents_in: splits_in_nents[i], |
778 | sgl_out: split ? splits_out[i] : NULL, |
779 | n_ents_out: split ? splits_out_nents[i] : 0, |
780 | info, gfp); |
781 | if (IS_ERR(ptr: el)) { |
782 | ret = PTR_ERR(ptr: el); |
783 | goto err_free_elements; |
784 | } |
785 | el->req.cipher_iv_addr_lo = lower_32_bits(sec_req->dma_iv); |
786 | el->req.cipher_iv_addr_hi = upper_32_bits(sec_req->dma_iv); |
787 | el->sec_req = sec_req; |
788 | list_add_tail(new: &el->head, head: &sec_req->elements); |
789 | } |
790 | |
791 | /* |
792 | * Only attempt to queue if the whole lot can fit in the queue - |
793 | * we can't successfully cleanup after a partial queing so this |
794 | * must succeed or fail atomically. |
795 | * |
796 | * Big hammer test of both software and hardware queues - could be |
797 | * more refined but this is unlikely to happen so no need. |
798 | */ |
799 | |
800 | /* Grab a big lock for a long time to avoid concurrency issues */ |
801 | spin_lock_bh(lock: &queue->queuelock); |
802 | |
803 | /* |
804 | * Can go on to queue if we have space in either: |
805 | * 1) The hardware queue and no software queue |
806 | * 2) The software queue |
807 | * AND there is nothing in the backlog. If there is backlog we |
808 | * have to only queue to the backlog queue and return busy. |
809 | */ |
810 | if ((!sec_queue_can_enqueue(queue, num: steps) && |
811 | (!queue->havesoftqueue || |
812 | kfifo_avail(&queue->softqueue) > steps)) || |
813 | !list_empty(head: &ctx->backlog)) { |
814 | ret = -EBUSY; |
815 | if ((skreq->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { |
816 | list_add_tail(new: &sec_req->backlog_head, head: &ctx->backlog); |
817 | spin_unlock_bh(lock: &queue->queuelock); |
818 | goto out; |
819 | } |
820 | |
821 | spin_unlock_bh(lock: &queue->queuelock); |
822 | goto err_free_elements; |
823 | } |
824 | ret = sec_send_request(sec_req, queue); |
825 | spin_unlock_bh(lock: &queue->queuelock); |
826 | if (ret) |
827 | goto err_free_elements; |
828 | |
829 | ret = -EINPROGRESS; |
830 | out: |
831 | /* Cleanup - all elements in pointer arrays have been copied */ |
832 | kfree(objp: splits_in_nents); |
833 | kfree(objp: splits_in); |
834 | kfree(objp: splits_out_nents); |
835 | kfree(objp: splits_out); |
836 | kfree(objp: split_sizes); |
837 | return ret; |
838 | |
839 | err_free_elements: |
840 | list_for_each_entry_safe(el, temp, &sec_req->elements, head) { |
841 | list_del(entry: &el->head); |
842 | sec_alg_free_el(el, info); |
843 | } |
844 | if (crypto_skcipher_ivsize(tfm: atfm)) |
845 | dma_unmap_single(info->dev, sec_req->dma_iv, |
846 | crypto_skcipher_ivsize(atfm), |
847 | DMA_BIDIRECTIONAL); |
848 | err_unmap_out_sg: |
849 | if (split) |
850 | sec_unmap_sg_on_err(sgl: skreq->dst, steps, splits: splits_out, |
851 | splits_nents: splits_out_nents, sgl_len_in: sec_req->len_out, |
852 | dev: info->dev); |
853 | err_unmap_in_sg: |
854 | sec_unmap_sg_on_err(sgl: skreq->src, steps, splits: splits_in, splits_nents: splits_in_nents, |
855 | sgl_len_in: sec_req->len_in, dev: info->dev); |
856 | err_free_split_sizes: |
857 | kfree(objp: split_sizes); |
858 | |
859 | return ret; |
860 | } |
861 | |
862 | static int sec_alg_skcipher_encrypt(struct skcipher_request *req) |
863 | { |
864 | return sec_alg_skcipher_crypto(skreq: req, encrypt: true); |
865 | } |
866 | |
867 | static int sec_alg_skcipher_decrypt(struct skcipher_request *req) |
868 | { |
869 | return sec_alg_skcipher_crypto(skreq: req, encrypt: false); |
870 | } |
871 | |
872 | static int sec_alg_skcipher_init(struct crypto_skcipher *tfm) |
873 | { |
874 | struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); |
875 | |
876 | mutex_init(&ctx->lock); |
877 | INIT_LIST_HEAD(list: &ctx->backlog); |
878 | crypto_skcipher_set_reqsize(skcipher: tfm, reqsize: sizeof(struct sec_request)); |
879 | |
880 | ctx->queue = sec_queue_alloc_start_safe(); |
881 | if (IS_ERR(ptr: ctx->queue)) |
882 | return PTR_ERR(ptr: ctx->queue); |
883 | |
884 | spin_lock_init(&ctx->queue->queuelock); |
885 | ctx->queue->havesoftqueue = false; |
886 | |
887 | return 0; |
888 | } |
889 | |
890 | static void sec_alg_skcipher_exit(struct crypto_skcipher *tfm) |
891 | { |
892 | struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); |
893 | struct device *dev = ctx->queue->dev_info->dev; |
894 | |
895 | if (ctx->key) { |
896 | memzero_explicit(s: ctx->key, SEC_MAX_CIPHER_KEY); |
897 | dma_free_coherent(dev, SEC_MAX_CIPHER_KEY, cpu_addr: ctx->key, |
898 | dma_handle: ctx->pkey); |
899 | } |
900 | sec_queue_stop_release(queue: ctx->queue); |
901 | } |
902 | |
903 | static int sec_alg_skcipher_init_with_queue(struct crypto_skcipher *tfm) |
904 | { |
905 | struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); |
906 | int ret; |
907 | |
908 | ret = sec_alg_skcipher_init(tfm); |
909 | if (ret) |
910 | return ret; |
911 | |
912 | INIT_KFIFO(ctx->queue->softqueue); |
913 | ret = kfifo_alloc(&ctx->queue->softqueue, 512, GFP_KERNEL); |
914 | if (ret) { |
915 | sec_alg_skcipher_exit(tfm); |
916 | return ret; |
917 | } |
918 | ctx->queue->havesoftqueue = true; |
919 | |
920 | return 0; |
921 | } |
922 | |
923 | static void sec_alg_skcipher_exit_with_queue(struct crypto_skcipher *tfm) |
924 | { |
925 | struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); |
926 | |
927 | kfifo_free(&ctx->queue->softqueue); |
928 | sec_alg_skcipher_exit(tfm); |
929 | } |
930 | |
931 | static struct skcipher_alg sec_algs[] = { |
932 | { |
933 | .base = { |
934 | .cra_name = "ecb(aes)" , |
935 | .cra_driver_name = "hisi_sec_aes_ecb" , |
936 | .cra_priority = 4001, |
937 | .cra_flags = CRYPTO_ALG_ASYNC | |
938 | CRYPTO_ALG_ALLOCATES_MEMORY, |
939 | .cra_blocksize = AES_BLOCK_SIZE, |
940 | .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx), |
941 | .cra_alignmask = 0, |
942 | .cra_module = THIS_MODULE, |
943 | }, |
944 | .init = sec_alg_skcipher_init, |
945 | .exit = sec_alg_skcipher_exit, |
946 | .setkey = sec_alg_skcipher_setkey_aes_ecb, |
947 | .decrypt = sec_alg_skcipher_decrypt, |
948 | .encrypt = sec_alg_skcipher_encrypt, |
949 | .min_keysize = AES_MIN_KEY_SIZE, |
950 | .max_keysize = AES_MAX_KEY_SIZE, |
951 | .ivsize = 0, |
952 | }, { |
953 | .base = { |
954 | .cra_name = "cbc(aes)" , |
955 | .cra_driver_name = "hisi_sec_aes_cbc" , |
956 | .cra_priority = 4001, |
957 | .cra_flags = CRYPTO_ALG_ASYNC | |
958 | CRYPTO_ALG_ALLOCATES_MEMORY, |
959 | .cra_blocksize = AES_BLOCK_SIZE, |
960 | .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx), |
961 | .cra_alignmask = 0, |
962 | .cra_module = THIS_MODULE, |
963 | }, |
964 | .init = sec_alg_skcipher_init_with_queue, |
965 | .exit = sec_alg_skcipher_exit_with_queue, |
966 | .setkey = sec_alg_skcipher_setkey_aes_cbc, |
967 | .decrypt = sec_alg_skcipher_decrypt, |
968 | .encrypt = sec_alg_skcipher_encrypt, |
969 | .min_keysize = AES_MIN_KEY_SIZE, |
970 | .max_keysize = AES_MAX_KEY_SIZE, |
971 | .ivsize = AES_BLOCK_SIZE, |
972 | }, { |
973 | .base = { |
974 | .cra_name = "ctr(aes)" , |
975 | .cra_driver_name = "hisi_sec_aes_ctr" , |
976 | .cra_priority = 4001, |
977 | .cra_flags = CRYPTO_ALG_ASYNC | |
978 | CRYPTO_ALG_ALLOCATES_MEMORY, |
979 | .cra_blocksize = AES_BLOCK_SIZE, |
980 | .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx), |
981 | .cra_alignmask = 0, |
982 | .cra_module = THIS_MODULE, |
983 | }, |
984 | .init = sec_alg_skcipher_init_with_queue, |
985 | .exit = sec_alg_skcipher_exit_with_queue, |
986 | .setkey = sec_alg_skcipher_setkey_aes_ctr, |
987 | .decrypt = sec_alg_skcipher_decrypt, |
988 | .encrypt = sec_alg_skcipher_encrypt, |
989 | .min_keysize = AES_MIN_KEY_SIZE, |
990 | .max_keysize = AES_MAX_KEY_SIZE, |
991 | .ivsize = AES_BLOCK_SIZE, |
992 | }, { |
993 | .base = { |
994 | .cra_name = "xts(aes)" , |
995 | .cra_driver_name = "hisi_sec_aes_xts" , |
996 | .cra_priority = 4001, |
997 | .cra_flags = CRYPTO_ALG_ASYNC | |
998 | CRYPTO_ALG_ALLOCATES_MEMORY, |
999 | .cra_blocksize = AES_BLOCK_SIZE, |
1000 | .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx), |
1001 | .cra_alignmask = 0, |
1002 | .cra_module = THIS_MODULE, |
1003 | }, |
1004 | .init = sec_alg_skcipher_init, |
1005 | .exit = sec_alg_skcipher_exit, |
1006 | .setkey = sec_alg_skcipher_setkey_aes_xts, |
1007 | .decrypt = sec_alg_skcipher_decrypt, |
1008 | .encrypt = sec_alg_skcipher_encrypt, |
1009 | .min_keysize = 2 * AES_MIN_KEY_SIZE, |
1010 | .max_keysize = 2 * AES_MAX_KEY_SIZE, |
1011 | .ivsize = AES_BLOCK_SIZE, |
1012 | }, { |
1013 | /* Unable to find any test vectors so untested */ |
1014 | .base = { |
1015 | .cra_name = "ecb(des)" , |
1016 | .cra_driver_name = "hisi_sec_des_ecb" , |
1017 | .cra_priority = 4001, |
1018 | .cra_flags = CRYPTO_ALG_ASYNC | |
1019 | CRYPTO_ALG_ALLOCATES_MEMORY, |
1020 | .cra_blocksize = DES_BLOCK_SIZE, |
1021 | .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx), |
1022 | .cra_alignmask = 0, |
1023 | .cra_module = THIS_MODULE, |
1024 | }, |
1025 | .init = sec_alg_skcipher_init, |
1026 | .exit = sec_alg_skcipher_exit, |
1027 | .setkey = sec_alg_skcipher_setkey_des_ecb, |
1028 | .decrypt = sec_alg_skcipher_decrypt, |
1029 | .encrypt = sec_alg_skcipher_encrypt, |
1030 | .min_keysize = DES_KEY_SIZE, |
1031 | .max_keysize = DES_KEY_SIZE, |
1032 | .ivsize = 0, |
1033 | }, { |
1034 | .base = { |
1035 | .cra_name = "cbc(des)" , |
1036 | .cra_driver_name = "hisi_sec_des_cbc" , |
1037 | .cra_priority = 4001, |
1038 | .cra_flags = CRYPTO_ALG_ASYNC | |
1039 | CRYPTO_ALG_ALLOCATES_MEMORY, |
1040 | .cra_blocksize = DES_BLOCK_SIZE, |
1041 | .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx), |
1042 | .cra_alignmask = 0, |
1043 | .cra_module = THIS_MODULE, |
1044 | }, |
1045 | .init = sec_alg_skcipher_init_with_queue, |
1046 | .exit = sec_alg_skcipher_exit_with_queue, |
1047 | .setkey = sec_alg_skcipher_setkey_des_cbc, |
1048 | .decrypt = sec_alg_skcipher_decrypt, |
1049 | .encrypt = sec_alg_skcipher_encrypt, |
1050 | .min_keysize = DES_KEY_SIZE, |
1051 | .max_keysize = DES_KEY_SIZE, |
1052 | .ivsize = DES_BLOCK_SIZE, |
1053 | }, { |
1054 | .base = { |
1055 | .cra_name = "cbc(des3_ede)" , |
1056 | .cra_driver_name = "hisi_sec_3des_cbc" , |
1057 | .cra_priority = 4001, |
1058 | .cra_flags = CRYPTO_ALG_ASYNC | |
1059 | CRYPTO_ALG_ALLOCATES_MEMORY, |
1060 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
1061 | .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx), |
1062 | .cra_alignmask = 0, |
1063 | .cra_module = THIS_MODULE, |
1064 | }, |
1065 | .init = sec_alg_skcipher_init_with_queue, |
1066 | .exit = sec_alg_skcipher_exit_with_queue, |
1067 | .setkey = sec_alg_skcipher_setkey_3des_cbc, |
1068 | .decrypt = sec_alg_skcipher_decrypt, |
1069 | .encrypt = sec_alg_skcipher_encrypt, |
1070 | .min_keysize = DES3_EDE_KEY_SIZE, |
1071 | .max_keysize = DES3_EDE_KEY_SIZE, |
1072 | .ivsize = DES3_EDE_BLOCK_SIZE, |
1073 | }, { |
1074 | .base = { |
1075 | .cra_name = "ecb(des3_ede)" , |
1076 | .cra_driver_name = "hisi_sec_3des_ecb" , |
1077 | .cra_priority = 4001, |
1078 | .cra_flags = CRYPTO_ALG_ASYNC | |
1079 | CRYPTO_ALG_ALLOCATES_MEMORY, |
1080 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
1081 | .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx), |
1082 | .cra_alignmask = 0, |
1083 | .cra_module = THIS_MODULE, |
1084 | }, |
1085 | .init = sec_alg_skcipher_init, |
1086 | .exit = sec_alg_skcipher_exit, |
1087 | .setkey = sec_alg_skcipher_setkey_3des_ecb, |
1088 | .decrypt = sec_alg_skcipher_decrypt, |
1089 | .encrypt = sec_alg_skcipher_encrypt, |
1090 | .min_keysize = DES3_EDE_KEY_SIZE, |
1091 | .max_keysize = DES3_EDE_KEY_SIZE, |
1092 | .ivsize = 0, |
1093 | } |
1094 | }; |
1095 | |
1096 | int sec_algs_register(void) |
1097 | { |
1098 | int ret = 0; |
1099 | |
1100 | mutex_lock(&algs_lock); |
1101 | if (++active_devs != 1) |
1102 | goto unlock; |
1103 | |
1104 | ret = crypto_register_skciphers(algs: sec_algs, ARRAY_SIZE(sec_algs)); |
1105 | if (ret) |
1106 | --active_devs; |
1107 | unlock: |
1108 | mutex_unlock(lock: &algs_lock); |
1109 | |
1110 | return ret; |
1111 | } |
1112 | |
1113 | void sec_algs_unregister(void) |
1114 | { |
1115 | mutex_lock(&algs_lock); |
1116 | if (--active_devs != 0) |
1117 | goto unlock; |
1118 | crypto_unregister_skciphers(algs: sec_algs, ARRAY_SIZE(sec_algs)); |
1119 | |
1120 | unlock: |
1121 | mutex_unlock(lock: &algs_lock); |
1122 | } |
1123 | |