1 | /* |
2 | * This file is part of the Chelsio T6 Crypto driver for Linux. |
3 | * |
4 | * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved. |
5 | * |
6 | * This software is available to you under a choice of one of two |
7 | * licenses. You may choose to be licensed under the terms of the GNU |
8 | * General Public License (GPL) Version 2, available from the file |
9 | * COPYING in the main directory of this source tree, or the |
10 | * OpenIB.org BSD license below: |
11 | * |
12 | * Redistribution and use in source and binary forms, with or |
13 | * without modification, are permitted provided that the following |
14 | * conditions are met: |
15 | * |
16 | * - Redistributions of source code must retain the above |
17 | * copyright notice, this list of conditions and the following |
18 | * disclaimer. |
19 | * |
20 | * - Redistributions in binary form must reproduce the above |
21 | * copyright notice, this list of conditions and the following |
22 | * disclaimer in the documentation and/or other materials |
23 | * provided with the distribution. |
24 | * |
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
32 | * SOFTWARE. |
33 | * |
34 | * Written and Maintained by: |
35 | * Manoj Malviya (manojmalviya@chelsio.com) |
36 | * Atul Gupta (atul.gupta@chelsio.com) |
37 | * Jitendra Lulla (jlulla@chelsio.com) |
38 | * Yeshaswi M R Gowda (yeshaswi@chelsio.com) |
39 | * Harsh Jain (harsh@chelsio.com) |
40 | */ |
41 | |
42 | #define pr_fmt(fmt) "chcr:" fmt |
43 | |
44 | #include <linux/kernel.h> |
45 | #include <linux/module.h> |
46 | #include <linux/crypto.h> |
47 | #include <linux/skbuff.h> |
48 | #include <linux/rtnetlink.h> |
49 | #include <linux/highmem.h> |
50 | #include <linux/scatterlist.h> |
51 | |
52 | #include <crypto/aes.h> |
53 | #include <crypto/algapi.h> |
54 | #include <crypto/hash.h> |
55 | #include <crypto/gcm.h> |
56 | #include <crypto/sha1.h> |
57 | #include <crypto/sha2.h> |
58 | #include <crypto/authenc.h> |
59 | #include <crypto/ctr.h> |
60 | #include <crypto/gf128mul.h> |
61 | #include <crypto/internal/aead.h> |
62 | #include <crypto/null.h> |
63 | #include <crypto/internal/skcipher.h> |
64 | #include <crypto/aead.h> |
65 | #include <crypto/scatterwalk.h> |
66 | #include <crypto/internal/hash.h> |
67 | |
68 | #include "t4fw_api.h" |
69 | #include "t4_msg.h" |
70 | #include "chcr_core.h" |
71 | #include "chcr_algo.h" |
72 | #include "chcr_crypto.h" |
73 | |
74 | #define IV AES_BLOCK_SIZE |
75 | |
76 | static unsigned int sgl_ent_len[] = { |
77 | 0, 0, 16, 24, 40, 48, 64, 72, 88, |
78 | 96, 112, 120, 136, 144, 160, 168, 184, |
79 | 192, 208, 216, 232, 240, 256, 264, 280, |
80 | 288, 304, 312, 328, 336, 352, 360, 376 |
81 | }; |
82 | |
83 | static unsigned int dsgl_ent_len[] = { |
84 | 0, 32, 32, 48, 48, 64, 64, 80, 80, |
85 | 112, 112, 128, 128, 144, 144, 160, 160, |
86 | 192, 192, 208, 208, 224, 224, 240, 240, |
87 | 272, 272, 288, 288, 304, 304, 320, 320 |
88 | }; |
89 | |
90 | static u32 round_constant[11] = { |
91 | 0x01000000, 0x02000000, 0x04000000, 0x08000000, |
92 | 0x10000000, 0x20000000, 0x40000000, 0x80000000, |
93 | 0x1B000000, 0x36000000, 0x6C000000 |
94 | }; |
95 | |
96 | static int chcr_handle_cipher_resp(struct skcipher_request *req, |
97 | unsigned char *input, int err); |
98 | |
99 | static inline struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx) |
100 | { |
101 | return &ctx->crypto_ctx->aeadctx; |
102 | } |
103 | |
104 | static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx) |
105 | { |
106 | return &ctx->crypto_ctx->ablkctx; |
107 | } |
108 | |
109 | static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx) |
110 | { |
111 | return &ctx->crypto_ctx->hmacctx; |
112 | } |
113 | |
114 | static inline struct chcr_gcm_ctx *GCM_CTX(struct chcr_aead_ctx *gctx) |
115 | { |
116 | return gctx->ctx->gcm; |
117 | } |
118 | |
119 | static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx) |
120 | { |
121 | return gctx->ctx->authenc; |
122 | } |
123 | |
124 | static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx) |
125 | { |
126 | return container_of(ctx->dev, struct uld_ctx, dev); |
127 | } |
128 | |
129 | static inline void chcr_init_hctx_per_wr(struct chcr_ahash_req_ctx *reqctx) |
130 | { |
131 | memset(&reqctx->hctx_wr, 0, sizeof(struct chcr_hctx_per_wr)); |
132 | } |
133 | |
134 | static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen, |
135 | unsigned int entlen, |
136 | unsigned int skip) |
137 | { |
138 | int nents = 0; |
139 | unsigned int less; |
140 | unsigned int skip_len = 0; |
141 | |
142 | while (sg && skip) { |
143 | if (sg_dma_len(sg) <= skip) { |
144 | skip -= sg_dma_len(sg); |
145 | skip_len = 0; |
146 | sg = sg_next(sg); |
147 | } else { |
148 | skip_len = skip; |
149 | skip = 0; |
150 | } |
151 | } |
152 | |
153 | while (sg && reqlen) { |
154 | less = min(reqlen, sg_dma_len(sg) - skip_len); |
155 | nents += DIV_ROUND_UP(less, entlen); |
156 | reqlen -= less; |
157 | skip_len = 0; |
158 | sg = sg_next(sg); |
159 | } |
160 | return nents; |
161 | } |
162 | |
163 | static inline int get_aead_subtype(struct crypto_aead *aead) |
164 | { |
165 | struct aead_alg *alg = crypto_aead_alg(tfm: aead); |
166 | struct chcr_alg_template *chcr_crypto_alg = |
167 | container_of(alg, struct chcr_alg_template, alg.aead); |
168 | return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK; |
169 | } |
170 | |
171 | void chcr_verify_tag(struct aead_request *req, u8 *input, int *err) |
172 | { |
173 | u8 temp[SHA512_DIGEST_SIZE]; |
174 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
175 | int authsize = crypto_aead_authsize(tfm); |
176 | struct cpl_fw6_pld *fw6_pld; |
177 | int cmp = 0; |
178 | |
179 | fw6_pld = (struct cpl_fw6_pld *)input; |
180 | if ((get_aead_subtype(aead: tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) || |
181 | (get_aead_subtype(aead: tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) { |
182 | cmp = crypto_memneq(a: &fw6_pld->data[2], b: (fw6_pld + 1), size: authsize); |
183 | } else { |
184 | |
185 | sg_pcopy_to_buffer(sgl: req->src, nents: sg_nents(sg: req->src), buf: temp, |
186 | buflen: authsize, skip: req->assoclen + |
187 | req->cryptlen - authsize); |
188 | cmp = crypto_memneq(a: temp, b: (fw6_pld + 1), size: authsize); |
189 | } |
190 | if (cmp) |
191 | *err = -EBADMSG; |
192 | else |
193 | *err = 0; |
194 | } |
195 | |
196 | static int chcr_inc_wrcount(struct chcr_dev *dev) |
197 | { |
198 | if (dev->state == CHCR_DETACH) |
199 | return 1; |
200 | atomic_inc(v: &dev->inflight); |
201 | return 0; |
202 | } |
203 | |
204 | static inline void chcr_dec_wrcount(struct chcr_dev *dev) |
205 | { |
206 | atomic_dec(v: &dev->inflight); |
207 | } |
208 | |
209 | static inline int chcr_handle_aead_resp(struct aead_request *req, |
210 | unsigned char *input, |
211 | int err) |
212 | { |
213 | struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req); |
214 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
215 | struct chcr_dev *dev = a_ctx(tfm)->dev; |
216 | |
217 | chcr_aead_common_exit(req); |
218 | if (reqctx->verify == VERIFY_SW) { |
219 | chcr_verify_tag(req, input, err: &err); |
220 | reqctx->verify = VERIFY_HW; |
221 | } |
222 | chcr_dec_wrcount(dev); |
223 | aead_request_complete(req, err); |
224 | |
225 | return err; |
226 | } |
227 | |
228 | static void get_aes_decrypt_key(unsigned char *dec_key, |
229 | const unsigned char *key, |
230 | unsigned int keylength) |
231 | { |
232 | u32 temp; |
233 | u32 w_ring[MAX_NK]; |
234 | int i, j, k; |
235 | u8 nr, nk; |
236 | |
237 | switch (keylength) { |
238 | case AES_KEYLENGTH_128BIT: |
239 | nk = KEYLENGTH_4BYTES; |
240 | nr = NUMBER_OF_ROUNDS_10; |
241 | break; |
242 | case AES_KEYLENGTH_192BIT: |
243 | nk = KEYLENGTH_6BYTES; |
244 | nr = NUMBER_OF_ROUNDS_12; |
245 | break; |
246 | case AES_KEYLENGTH_256BIT: |
247 | nk = KEYLENGTH_8BYTES; |
248 | nr = NUMBER_OF_ROUNDS_14; |
249 | break; |
250 | default: |
251 | return; |
252 | } |
253 | for (i = 0; i < nk; i++) |
254 | w_ring[i] = get_unaligned_be32(p: &key[i * 4]); |
255 | |
256 | i = 0; |
257 | temp = w_ring[nk - 1]; |
258 | while (i + nk < (nr + 1) * 4) { |
259 | if (!(i % nk)) { |
260 | /* RotWord(temp) */ |
261 | temp = (temp << 8) | (temp >> 24); |
262 | temp = aes_ks_subword(w: temp); |
263 | temp ^= round_constant[i / nk]; |
264 | } else if (nk == 8 && (i % 4 == 0)) { |
265 | temp = aes_ks_subword(w: temp); |
266 | } |
267 | w_ring[i % nk] ^= temp; |
268 | temp = w_ring[i % nk]; |
269 | i++; |
270 | } |
271 | i--; |
272 | for (k = 0, j = i % nk; k < nk; k++) { |
273 | put_unaligned_be32(val: w_ring[j], p: &dec_key[k * 4]); |
274 | j--; |
275 | if (j < 0) |
276 | j += nk; |
277 | } |
278 | } |
279 | |
280 | static struct crypto_shash *chcr_alloc_shash(unsigned int ds) |
281 | { |
282 | struct crypto_shash *base_hash = ERR_PTR(error: -EINVAL); |
283 | |
284 | switch (ds) { |
285 | case SHA1_DIGEST_SIZE: |
286 | base_hash = crypto_alloc_shash(alg_name: "sha1" , type: 0, mask: 0); |
287 | break; |
288 | case SHA224_DIGEST_SIZE: |
289 | base_hash = crypto_alloc_shash(alg_name: "sha224" , type: 0, mask: 0); |
290 | break; |
291 | case SHA256_DIGEST_SIZE: |
292 | base_hash = crypto_alloc_shash(alg_name: "sha256" , type: 0, mask: 0); |
293 | break; |
294 | case SHA384_DIGEST_SIZE: |
295 | base_hash = crypto_alloc_shash(alg_name: "sha384" , type: 0, mask: 0); |
296 | break; |
297 | case SHA512_DIGEST_SIZE: |
298 | base_hash = crypto_alloc_shash(alg_name: "sha512" , type: 0, mask: 0); |
299 | break; |
300 | } |
301 | |
302 | return base_hash; |
303 | } |
304 | |
305 | static int chcr_compute_partial_hash(struct shash_desc *desc, |
306 | char *iopad, char *result_hash, |
307 | int digest_size) |
308 | { |
309 | struct sha1_state sha1_st; |
310 | struct sha256_state sha256_st; |
311 | struct sha512_state sha512_st; |
312 | int error; |
313 | |
314 | if (digest_size == SHA1_DIGEST_SIZE) { |
315 | error = crypto_shash_init(desc) ?: |
316 | crypto_shash_update(desc, data: iopad, SHA1_BLOCK_SIZE) ?: |
317 | crypto_shash_export(desc, out: (void *)&sha1_st); |
318 | memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE); |
319 | } else if (digest_size == SHA224_DIGEST_SIZE) { |
320 | error = crypto_shash_init(desc) ?: |
321 | crypto_shash_update(desc, data: iopad, SHA256_BLOCK_SIZE) ?: |
322 | crypto_shash_export(desc, out: (void *)&sha256_st); |
323 | memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE); |
324 | |
325 | } else if (digest_size == SHA256_DIGEST_SIZE) { |
326 | error = crypto_shash_init(desc) ?: |
327 | crypto_shash_update(desc, data: iopad, SHA256_BLOCK_SIZE) ?: |
328 | crypto_shash_export(desc, out: (void *)&sha256_st); |
329 | memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE); |
330 | |
331 | } else if (digest_size == SHA384_DIGEST_SIZE) { |
332 | error = crypto_shash_init(desc) ?: |
333 | crypto_shash_update(desc, data: iopad, SHA512_BLOCK_SIZE) ?: |
334 | crypto_shash_export(desc, out: (void *)&sha512_st); |
335 | memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE); |
336 | |
337 | } else if (digest_size == SHA512_DIGEST_SIZE) { |
338 | error = crypto_shash_init(desc) ?: |
339 | crypto_shash_update(desc, data: iopad, SHA512_BLOCK_SIZE) ?: |
340 | crypto_shash_export(desc, out: (void *)&sha512_st); |
341 | memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE); |
342 | } else { |
343 | error = -EINVAL; |
344 | pr_err("Unknown digest size %d\n" , digest_size); |
345 | } |
346 | return error; |
347 | } |
348 | |
349 | static void chcr_change_order(char *buf, int ds) |
350 | { |
351 | int i; |
352 | |
353 | if (ds == SHA512_DIGEST_SIZE) { |
354 | for (i = 0; i < (ds / sizeof(u64)); i++) |
355 | *((__be64 *)buf + i) = |
356 | cpu_to_be64(*((u64 *)buf + i)); |
357 | } else { |
358 | for (i = 0; i < (ds / sizeof(u32)); i++) |
359 | *((__be32 *)buf + i) = |
360 | cpu_to_be32(*((u32 *)buf + i)); |
361 | } |
362 | } |
363 | |
364 | static inline int is_hmac(struct crypto_tfm *tfm) |
365 | { |
366 | struct crypto_alg *alg = tfm->__crt_alg; |
367 | struct chcr_alg_template *chcr_crypto_alg = |
368 | container_of(__crypto_ahash_alg(alg), struct chcr_alg_template, |
369 | alg.hash); |
370 | if (chcr_crypto_alg->type == CRYPTO_ALG_TYPE_HMAC) |
371 | return 1; |
372 | return 0; |
373 | } |
374 | |
375 | static inline void dsgl_walk_init(struct dsgl_walk *walk, |
376 | struct cpl_rx_phys_dsgl *dsgl) |
377 | { |
378 | walk->dsgl = dsgl; |
379 | walk->nents = 0; |
380 | walk->to = (struct phys_sge_pairs *)(dsgl + 1); |
381 | } |
382 | |
383 | static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid, |
384 | int pci_chan_id) |
385 | { |
386 | struct cpl_rx_phys_dsgl *phys_cpl; |
387 | |
388 | phys_cpl = walk->dsgl; |
389 | |
390 | phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL) |
391 | | CPL_RX_PHYS_DSGL_ISRDMA_V(0)); |
392 | phys_cpl->pcirlxorder_to_noofsgentr = |
393 | htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) | |
394 | CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) | |
395 | CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) | |
396 | CPL_RX_PHYS_DSGL_PCITPHNT_V(0) | |
397 | CPL_RX_PHYS_DSGL_DCAID_V(0) | |
398 | CPL_RX_PHYS_DSGL_NOOFSGENTR_V(walk->nents)); |
399 | phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR; |
400 | phys_cpl->rss_hdr_int.qid = htons(qid); |
401 | phys_cpl->rss_hdr_int.hash_val = 0; |
402 | phys_cpl->rss_hdr_int.channel = pci_chan_id; |
403 | } |
404 | |
405 | static inline void dsgl_walk_add_page(struct dsgl_walk *walk, |
406 | size_t size, |
407 | dma_addr_t addr) |
408 | { |
409 | int j; |
410 | |
411 | if (!size) |
412 | return; |
413 | j = walk->nents; |
414 | walk->to->len[j % 8] = htons(size); |
415 | walk->to->addr[j % 8] = cpu_to_be64(addr); |
416 | j++; |
417 | if ((j % 8) == 0) |
418 | walk->to++; |
419 | walk->nents = j; |
420 | } |
421 | |
422 | static void dsgl_walk_add_sg(struct dsgl_walk *walk, |
423 | struct scatterlist *sg, |
424 | unsigned int slen, |
425 | unsigned int skip) |
426 | { |
427 | int skip_len = 0; |
428 | unsigned int left_size = slen, len = 0; |
429 | unsigned int j = walk->nents; |
430 | int offset, ent_len; |
431 | |
432 | if (!slen) |
433 | return; |
434 | while (sg && skip) { |
435 | if (sg_dma_len(sg) <= skip) { |
436 | skip -= sg_dma_len(sg); |
437 | skip_len = 0; |
438 | sg = sg_next(sg); |
439 | } else { |
440 | skip_len = skip; |
441 | skip = 0; |
442 | } |
443 | } |
444 | |
445 | while (left_size && sg) { |
446 | len = min_t(u32, left_size, sg_dma_len(sg) - skip_len); |
447 | offset = 0; |
448 | while (len) { |
449 | ent_len = min_t(u32, len, CHCR_DST_SG_SIZE); |
450 | walk->to->len[j % 8] = htons(ent_len); |
451 | walk->to->addr[j % 8] = cpu_to_be64(sg_dma_address(sg) + |
452 | offset + skip_len); |
453 | offset += ent_len; |
454 | len -= ent_len; |
455 | j++; |
456 | if ((j % 8) == 0) |
457 | walk->to++; |
458 | } |
459 | walk->last_sg = sg; |
460 | walk->last_sg_len = min_t(u32, left_size, sg_dma_len(sg) - |
461 | skip_len) + skip_len; |
462 | left_size -= min_t(u32, left_size, sg_dma_len(sg) - skip_len); |
463 | skip_len = 0; |
464 | sg = sg_next(sg); |
465 | } |
466 | walk->nents = j; |
467 | } |
468 | |
469 | static inline void ulptx_walk_init(struct ulptx_walk *walk, |
470 | struct ulptx_sgl *ulp) |
471 | { |
472 | walk->sgl = ulp; |
473 | walk->nents = 0; |
474 | walk->pair_idx = 0; |
475 | walk->pair = ulp->sge; |
476 | walk->last_sg = NULL; |
477 | walk->last_sg_len = 0; |
478 | } |
479 | |
480 | static inline void ulptx_walk_end(struct ulptx_walk *walk) |
481 | { |
482 | walk->sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | |
483 | ULPTX_NSGE_V(walk->nents)); |
484 | } |
485 | |
486 | |
487 | static inline void ulptx_walk_add_page(struct ulptx_walk *walk, |
488 | size_t size, |
489 | dma_addr_t addr) |
490 | { |
491 | if (!size) |
492 | return; |
493 | |
494 | if (walk->nents == 0) { |
495 | walk->sgl->len0 = cpu_to_be32(size); |
496 | walk->sgl->addr0 = cpu_to_be64(addr); |
497 | } else { |
498 | walk->pair->addr[walk->pair_idx] = cpu_to_be64(addr); |
499 | walk->pair->len[walk->pair_idx] = cpu_to_be32(size); |
500 | walk->pair_idx = !walk->pair_idx; |
501 | if (!walk->pair_idx) |
502 | walk->pair++; |
503 | } |
504 | walk->nents++; |
505 | } |
506 | |
507 | static void ulptx_walk_add_sg(struct ulptx_walk *walk, |
508 | struct scatterlist *sg, |
509 | unsigned int len, |
510 | unsigned int skip) |
511 | { |
512 | int small; |
513 | int skip_len = 0; |
514 | unsigned int sgmin; |
515 | |
516 | if (!len) |
517 | return; |
518 | while (sg && skip) { |
519 | if (sg_dma_len(sg) <= skip) { |
520 | skip -= sg_dma_len(sg); |
521 | skip_len = 0; |
522 | sg = sg_next(sg); |
523 | } else { |
524 | skip_len = skip; |
525 | skip = 0; |
526 | } |
527 | } |
528 | WARN(!sg, "SG should not be null here\n" ); |
529 | if (sg && (walk->nents == 0)) { |
530 | small = min_t(unsigned int, sg_dma_len(sg) - skip_len, len); |
531 | sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE); |
532 | walk->sgl->len0 = cpu_to_be32(sgmin); |
533 | walk->sgl->addr0 = cpu_to_be64(sg_dma_address(sg) + skip_len); |
534 | walk->nents++; |
535 | len -= sgmin; |
536 | walk->last_sg = sg; |
537 | walk->last_sg_len = sgmin + skip_len; |
538 | skip_len += sgmin; |
539 | if (sg_dma_len(sg) == skip_len) { |
540 | sg = sg_next(sg); |
541 | skip_len = 0; |
542 | } |
543 | } |
544 | |
545 | while (sg && len) { |
546 | small = min(sg_dma_len(sg) - skip_len, len); |
547 | sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE); |
548 | walk->pair->len[walk->pair_idx] = cpu_to_be32(sgmin); |
549 | walk->pair->addr[walk->pair_idx] = |
550 | cpu_to_be64(sg_dma_address(sg) + skip_len); |
551 | walk->pair_idx = !walk->pair_idx; |
552 | walk->nents++; |
553 | if (!walk->pair_idx) |
554 | walk->pair++; |
555 | len -= sgmin; |
556 | skip_len += sgmin; |
557 | walk->last_sg = sg; |
558 | walk->last_sg_len = skip_len; |
559 | if (sg_dma_len(sg) == skip_len) { |
560 | sg = sg_next(sg); |
561 | skip_len = 0; |
562 | } |
563 | } |
564 | } |
565 | |
566 | static inline int get_cryptoalg_subtype(struct crypto_skcipher *tfm) |
567 | { |
568 | struct skcipher_alg *alg = crypto_skcipher_alg(tfm); |
569 | struct chcr_alg_template *chcr_crypto_alg = |
570 | container_of(alg, struct chcr_alg_template, alg.skcipher); |
571 | |
572 | return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK; |
573 | } |
574 | |
575 | static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx) |
576 | { |
577 | struct adapter *adap = netdev2adap(dev); |
578 | struct sge_uld_txq_info *txq_info = |
579 | adap->sge.uld_txq_info[CXGB4_TX_CRYPTO]; |
580 | struct sge_uld_txq *txq; |
581 | int ret = 0; |
582 | |
583 | local_bh_disable(); |
584 | txq = &txq_info->uldtxq[idx]; |
585 | spin_lock(lock: &txq->sendq.lock); |
586 | if (txq->full) |
587 | ret = -1; |
588 | spin_unlock(lock: &txq->sendq.lock); |
589 | local_bh_enable(); |
590 | return ret; |
591 | } |
592 | |
593 | static int generate_copy_rrkey(struct ablk_ctx *ablkctx, |
594 | struct _key_ctx *key_ctx) |
595 | { |
596 | if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) { |
597 | memcpy(key_ctx->key, ablkctx->rrkey, ablkctx->enckey_len); |
598 | } else { |
599 | memcpy(key_ctx->key, |
600 | ablkctx->key + (ablkctx->enckey_len >> 1), |
601 | ablkctx->enckey_len >> 1); |
602 | memcpy(key_ctx->key + (ablkctx->enckey_len >> 1), |
603 | ablkctx->rrkey, ablkctx->enckey_len >> 1); |
604 | } |
605 | return 0; |
606 | } |
607 | |
608 | static int chcr_hash_ent_in_wr(struct scatterlist *src, |
609 | unsigned int minsg, |
610 | unsigned int space, |
611 | unsigned int srcskip) |
612 | { |
613 | int srclen = 0; |
614 | int srcsg = minsg; |
615 | int soffset = 0, sless; |
616 | |
617 | if (sg_dma_len(src) == srcskip) { |
618 | src = sg_next(src); |
619 | srcskip = 0; |
620 | } |
621 | while (src && space > (sgl_ent_len[srcsg + 1])) { |
622 | sless = min_t(unsigned int, sg_dma_len(src) - soffset - srcskip, |
623 | CHCR_SRC_SG_SIZE); |
624 | srclen += sless; |
625 | soffset += sless; |
626 | srcsg++; |
627 | if (sg_dma_len(src) == (soffset + srcskip)) { |
628 | src = sg_next(src); |
629 | soffset = 0; |
630 | srcskip = 0; |
631 | } |
632 | } |
633 | return srclen; |
634 | } |
635 | |
636 | static int chcr_sg_ent_in_wr(struct scatterlist *src, |
637 | struct scatterlist *dst, |
638 | unsigned int minsg, |
639 | unsigned int space, |
640 | unsigned int srcskip, |
641 | unsigned int dstskip) |
642 | { |
643 | int srclen = 0, dstlen = 0; |
644 | int srcsg = minsg, dstsg = minsg; |
645 | int offset = 0, soffset = 0, less, sless = 0; |
646 | |
647 | if (sg_dma_len(src) == srcskip) { |
648 | src = sg_next(src); |
649 | srcskip = 0; |
650 | } |
651 | if (sg_dma_len(dst) == dstskip) { |
652 | dst = sg_next(dst); |
653 | dstskip = 0; |
654 | } |
655 | |
656 | while (src && dst && |
657 | space > (sgl_ent_len[srcsg + 1] + dsgl_ent_len[dstsg])) { |
658 | sless = min_t(unsigned int, sg_dma_len(src) - srcskip - soffset, |
659 | CHCR_SRC_SG_SIZE); |
660 | srclen += sless; |
661 | srcsg++; |
662 | offset = 0; |
663 | while (dst && ((dstsg + 1) <= MAX_DSGL_ENT) && |
664 | space > (sgl_ent_len[srcsg] + dsgl_ent_len[dstsg + 1])) { |
665 | if (srclen <= dstlen) |
666 | break; |
667 | less = min_t(unsigned int, sg_dma_len(dst) - offset - |
668 | dstskip, CHCR_DST_SG_SIZE); |
669 | dstlen += less; |
670 | offset += less; |
671 | if ((offset + dstskip) == sg_dma_len(dst)) { |
672 | dst = sg_next(dst); |
673 | offset = 0; |
674 | } |
675 | dstsg++; |
676 | dstskip = 0; |
677 | } |
678 | soffset += sless; |
679 | if ((soffset + srcskip) == sg_dma_len(src)) { |
680 | src = sg_next(src); |
681 | srcskip = 0; |
682 | soffset = 0; |
683 | } |
684 | |
685 | } |
686 | return min(srclen, dstlen); |
687 | } |
688 | |
689 | static int chcr_cipher_fallback(struct crypto_skcipher *cipher, |
690 | struct skcipher_request *req, |
691 | u8 *iv, |
692 | unsigned short op_type) |
693 | { |
694 | struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req); |
695 | int err; |
696 | |
697 | skcipher_request_set_tfm(req: &reqctx->fallback_req, tfm: cipher); |
698 | skcipher_request_set_callback(req: &reqctx->fallback_req, flags: req->base.flags, |
699 | compl: req->base.complete, data: req->base.data); |
700 | skcipher_request_set_crypt(req: &reqctx->fallback_req, src: req->src, dst: req->dst, |
701 | cryptlen: req->cryptlen, iv); |
702 | |
703 | err = op_type ? crypto_skcipher_decrypt(req: &reqctx->fallback_req) : |
704 | crypto_skcipher_encrypt(req: &reqctx->fallback_req); |
705 | |
706 | return err; |
707 | |
708 | } |
709 | |
710 | static inline int get_qidxs(struct crypto_async_request *req, |
711 | unsigned int *txqidx, unsigned int *rxqidx) |
712 | { |
713 | struct crypto_tfm *tfm = req->tfm; |
714 | int ret = 0; |
715 | |
716 | switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) { |
717 | case CRYPTO_ALG_TYPE_AEAD: |
718 | { |
719 | struct aead_request *aead_req = |
720 | container_of(req, struct aead_request, base); |
721 | struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req: aead_req); |
722 | *txqidx = reqctx->txqidx; |
723 | *rxqidx = reqctx->rxqidx; |
724 | break; |
725 | } |
726 | case CRYPTO_ALG_TYPE_SKCIPHER: |
727 | { |
728 | struct skcipher_request *sk_req = |
729 | container_of(req, struct skcipher_request, base); |
730 | struct chcr_skcipher_req_ctx *reqctx = |
731 | skcipher_request_ctx(req: sk_req); |
732 | *txqidx = reqctx->txqidx; |
733 | *rxqidx = reqctx->rxqidx; |
734 | break; |
735 | } |
736 | case CRYPTO_ALG_TYPE_AHASH: |
737 | { |
738 | struct ahash_request *ahash_req = |
739 | container_of(req, struct ahash_request, base); |
740 | struct chcr_ahash_req_ctx *reqctx = |
741 | ahash_request_ctx(req: ahash_req); |
742 | *txqidx = reqctx->txqidx; |
743 | *rxqidx = reqctx->rxqidx; |
744 | break; |
745 | } |
746 | default: |
747 | ret = -EINVAL; |
748 | /* should never get here */ |
749 | BUG(); |
750 | break; |
751 | } |
752 | return ret; |
753 | } |
754 | |
755 | static inline void create_wreq(struct chcr_context *ctx, |
756 | struct chcr_wr *chcr_req, |
757 | struct crypto_async_request *req, |
758 | unsigned int imm, |
759 | int hash_sz, |
760 | unsigned int len16, |
761 | unsigned int sc_len, |
762 | unsigned int lcb) |
763 | { |
764 | struct uld_ctx *u_ctx = ULD_CTX(ctx); |
765 | unsigned int tx_channel_id, rx_channel_id; |
766 | unsigned int txqidx = 0, rxqidx = 0; |
767 | unsigned int qid, fid, portno; |
768 | |
769 | get_qidxs(req, txqidx: &txqidx, rxqidx: &rxqidx); |
770 | qid = u_ctx->lldi.rxq_ids[rxqidx]; |
771 | fid = u_ctx->lldi.rxq_ids[0]; |
772 | portno = rxqidx / ctx->rxq_perchan; |
773 | tx_channel_id = txqidx / ctx->txq_perchan; |
774 | rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[portno]); |
775 | |
776 | |
777 | chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE; |
778 | chcr_req->wreq.pld_size_hash_size = |
779 | htonl(FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz)); |
780 | chcr_req->wreq.len16_pkd = |
781 | htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(len16, 16))); |
782 | chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req); |
783 | chcr_req->wreq.rx_chid_to_rx_q_id = FILL_WR_RX_Q_ID(rx_channel_id, qid, |
784 | !!lcb, txqidx); |
785 | |
786 | chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(tx_channel_id, fid); |
787 | chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) - |
788 | ((sizeof(chcr_req->wreq)) >> 4))); |
789 | chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(!imm); |
790 | chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) + |
791 | sizeof(chcr_req->key_ctx) + sc_len); |
792 | } |
793 | |
794 | /** |
795 | * create_cipher_wr - form the WR for cipher operations |
796 | * @wrparam: Container for create_cipher_wr()'s parameters |
797 | */ |
798 | static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam) |
799 | { |
800 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req: wrparam->req); |
801 | struct chcr_context *ctx = c_ctx(tfm); |
802 | struct uld_ctx *u_ctx = ULD_CTX(ctx); |
803 | struct ablk_ctx *ablkctx = ABLK_CTX(ctx); |
804 | struct sk_buff *skb = NULL; |
805 | struct chcr_wr *chcr_req; |
806 | struct cpl_rx_phys_dsgl *phys_cpl; |
807 | struct ulptx_sgl *ulptx; |
808 | struct chcr_skcipher_req_ctx *reqctx = |
809 | skcipher_request_ctx(req: wrparam->req); |
810 | unsigned int temp = 0, transhdr_len, dst_size; |
811 | int error; |
812 | int nents; |
813 | unsigned int kctx_len; |
814 | gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? |
815 | GFP_KERNEL : GFP_ATOMIC; |
816 | struct adapter *adap = padap(dev: ctx->dev); |
817 | unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan; |
818 | |
819 | rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]); |
820 | nents = sg_nents_xlen(sg: reqctx->dstsg, reqlen: wrparam->bytes, CHCR_DST_SG_SIZE, |
821 | skip: reqctx->dst_ofst); |
822 | dst_size = get_space_for_phys_dsgl(sgl_entr: nents); |
823 | kctx_len = roundup(ablkctx->enckey_len, 16); |
824 | transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size); |
825 | nents = sg_nents_xlen(sg: reqctx->srcsg, reqlen: wrparam->bytes, |
826 | CHCR_SRC_SG_SIZE, skip: reqctx->src_ofst); |
827 | temp = reqctx->imm ? roundup(wrparam->bytes, 16) : |
828 | (sgl_len(n: nents) * 8); |
829 | transhdr_len += temp; |
830 | transhdr_len = roundup(transhdr_len, 16); |
831 | skb = alloc_skb(size: SGE_MAX_WR_LEN, priority: flags); |
832 | if (!skb) { |
833 | error = -ENOMEM; |
834 | goto err; |
835 | } |
836 | chcr_req = __skb_put_zero(skb, len: transhdr_len); |
837 | chcr_req->sec_cpl.op_ivinsrtofst = |
838 | FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1); |
839 | |
840 | chcr_req->sec_cpl.pldlen = htonl(IV + wrparam->bytes); |
841 | chcr_req->sec_cpl.aadstart_cipherstop_hi = |
842 | FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, IV + 1, 0); |
843 | |
844 | chcr_req->sec_cpl.cipherstop_lo_authinsert = |
845 | FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0); |
846 | chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, 0, |
847 | ablkctx->ciph_mode, |
848 | 0, 0, IV >> 1); |
849 | chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0, |
850 | 0, 1, dst_size); |
851 | |
852 | chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr; |
853 | if ((reqctx->op == CHCR_DECRYPT_OP) && |
854 | (!(get_cryptoalg_subtype(tfm) == |
855 | CRYPTO_ALG_SUB_TYPE_CTR)) && |
856 | (!(get_cryptoalg_subtype(tfm) == |
857 | CRYPTO_ALG_SUB_TYPE_CTR_RFC3686))) { |
858 | generate_copy_rrkey(ablkctx, key_ctx: &chcr_req->key_ctx); |
859 | } else { |
860 | if ((ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) || |
861 | (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CTR)) { |
862 | memcpy(chcr_req->key_ctx.key, ablkctx->key, |
863 | ablkctx->enckey_len); |
864 | } else { |
865 | memcpy(chcr_req->key_ctx.key, ablkctx->key + |
866 | (ablkctx->enckey_len >> 1), |
867 | ablkctx->enckey_len >> 1); |
868 | memcpy(chcr_req->key_ctx.key + |
869 | (ablkctx->enckey_len >> 1), |
870 | ablkctx->key, |
871 | ablkctx->enckey_len >> 1); |
872 | } |
873 | } |
874 | phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len); |
875 | ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size); |
876 | chcr_add_cipher_src_ent(req: wrparam->req, ulptx, wrparam); |
877 | chcr_add_cipher_dst_ent(req: wrparam->req, phys_cpl, wrparam, qid: wrparam->qid); |
878 | |
879 | atomic_inc(v: &adap->chcr_stats.cipher_rqst); |
880 | temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + kctx_len + IV |
881 | + (reqctx->imm ? (wrparam->bytes) : 0); |
882 | create_wreq(ctx: c_ctx(tfm), chcr_req, req: &(wrparam->req->base), imm: reqctx->imm, hash_sz: 0, |
883 | len16: transhdr_len, sc_len: temp, |
884 | lcb: ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC); |
885 | reqctx->skb = skb; |
886 | |
887 | if (reqctx->op && (ablkctx->ciph_mode == |
888 | CHCR_SCMD_CIPHER_MODE_AES_CBC)) |
889 | sg_pcopy_to_buffer(sgl: wrparam->req->src, |
890 | nents: sg_nents(sg: wrparam->req->src), buf: wrparam->req->iv, buflen: 16, |
891 | skip: reqctx->processed + wrparam->bytes - AES_BLOCK_SIZE); |
892 | |
893 | return skb; |
894 | err: |
895 | return ERR_PTR(error); |
896 | } |
897 | |
898 | static inline int chcr_keyctx_ck_size(unsigned int keylen) |
899 | { |
900 | int ck_size = 0; |
901 | |
902 | if (keylen == AES_KEYSIZE_128) |
903 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128; |
904 | else if (keylen == AES_KEYSIZE_192) |
905 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192; |
906 | else if (keylen == AES_KEYSIZE_256) |
907 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; |
908 | else |
909 | ck_size = 0; |
910 | |
911 | return ck_size; |
912 | } |
913 | static int chcr_cipher_fallback_setkey(struct crypto_skcipher *cipher, |
914 | const u8 *key, |
915 | unsigned int keylen) |
916 | { |
917 | struct ablk_ctx *ablkctx = ABLK_CTX(ctx: c_ctx(tfm: cipher)); |
918 | |
919 | crypto_skcipher_clear_flags(tfm: ablkctx->sw_cipher, |
920 | CRYPTO_TFM_REQ_MASK); |
921 | crypto_skcipher_set_flags(tfm: ablkctx->sw_cipher, |
922 | flags: cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK); |
923 | return crypto_skcipher_setkey(tfm: ablkctx->sw_cipher, key, keylen); |
924 | } |
925 | |
926 | static int chcr_aes_cbc_setkey(struct crypto_skcipher *cipher, |
927 | const u8 *key, |
928 | unsigned int keylen) |
929 | { |
930 | struct ablk_ctx *ablkctx = ABLK_CTX(ctx: c_ctx(tfm: cipher)); |
931 | unsigned int ck_size, context_size; |
932 | u16 alignment = 0; |
933 | int err; |
934 | |
935 | err = chcr_cipher_fallback_setkey(cipher, key, keylen); |
936 | if (err) |
937 | goto badkey_err; |
938 | |
939 | ck_size = chcr_keyctx_ck_size(keylen); |
940 | alignment = ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192 ? 8 : 0; |
941 | memcpy(ablkctx->key, key, keylen); |
942 | ablkctx->enckey_len = keylen; |
943 | get_aes_decrypt_key(dec_key: ablkctx->rrkey, key: ablkctx->key, keylength: keylen << 3); |
944 | context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + |
945 | keylen + alignment) >> 4; |
946 | |
947 | ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, |
948 | 0, 0, context_size); |
949 | ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC; |
950 | return 0; |
951 | badkey_err: |
952 | ablkctx->enckey_len = 0; |
953 | |
954 | return err; |
955 | } |
956 | |
957 | static int chcr_aes_ctr_setkey(struct crypto_skcipher *cipher, |
958 | const u8 *key, |
959 | unsigned int keylen) |
960 | { |
961 | struct ablk_ctx *ablkctx = ABLK_CTX(ctx: c_ctx(tfm: cipher)); |
962 | unsigned int ck_size, context_size; |
963 | u16 alignment = 0; |
964 | int err; |
965 | |
966 | err = chcr_cipher_fallback_setkey(cipher, key, keylen); |
967 | if (err) |
968 | goto badkey_err; |
969 | ck_size = chcr_keyctx_ck_size(keylen); |
970 | alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0; |
971 | memcpy(ablkctx->key, key, keylen); |
972 | ablkctx->enckey_len = keylen; |
973 | context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + |
974 | keylen + alignment) >> 4; |
975 | |
976 | ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, |
977 | 0, 0, context_size); |
978 | ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR; |
979 | |
980 | return 0; |
981 | badkey_err: |
982 | ablkctx->enckey_len = 0; |
983 | |
984 | return err; |
985 | } |
986 | |
987 | static int chcr_aes_rfc3686_setkey(struct crypto_skcipher *cipher, |
988 | const u8 *key, |
989 | unsigned int keylen) |
990 | { |
991 | struct ablk_ctx *ablkctx = ABLK_CTX(ctx: c_ctx(tfm: cipher)); |
992 | unsigned int ck_size, context_size; |
993 | u16 alignment = 0; |
994 | int err; |
995 | |
996 | if (keylen < CTR_RFC3686_NONCE_SIZE) |
997 | return -EINVAL; |
998 | memcpy(ablkctx->nonce, key + (keylen - CTR_RFC3686_NONCE_SIZE), |
999 | CTR_RFC3686_NONCE_SIZE); |
1000 | |
1001 | keylen -= CTR_RFC3686_NONCE_SIZE; |
1002 | err = chcr_cipher_fallback_setkey(cipher, key, keylen); |
1003 | if (err) |
1004 | goto badkey_err; |
1005 | |
1006 | ck_size = chcr_keyctx_ck_size(keylen); |
1007 | alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0; |
1008 | memcpy(ablkctx->key, key, keylen); |
1009 | ablkctx->enckey_len = keylen; |
1010 | context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + |
1011 | keylen + alignment) >> 4; |
1012 | |
1013 | ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, |
1014 | 0, 0, context_size); |
1015 | ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR; |
1016 | |
1017 | return 0; |
1018 | badkey_err: |
1019 | ablkctx->enckey_len = 0; |
1020 | |
1021 | return err; |
1022 | } |
1023 | static void ctr_add_iv(u8 *dstiv, u8 *srciv, u32 add) |
1024 | { |
1025 | unsigned int size = AES_BLOCK_SIZE; |
1026 | __be32 *b = (__be32 *)(dstiv + size); |
1027 | u32 c, prev; |
1028 | |
1029 | memcpy(dstiv, srciv, AES_BLOCK_SIZE); |
1030 | for (; size >= 4; size -= 4) { |
1031 | prev = be32_to_cpu(*--b); |
1032 | c = prev + add; |
1033 | *b = cpu_to_be32(c); |
1034 | if (prev < c) |
1035 | break; |
1036 | add = 1; |
1037 | } |
1038 | |
1039 | } |
1040 | |
1041 | static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes) |
1042 | { |
1043 | __be32 *b = (__be32 *)(iv + AES_BLOCK_SIZE); |
1044 | u64 c; |
1045 | u32 temp = be32_to_cpu(*--b); |
1046 | |
1047 | temp = ~temp; |
1048 | c = (u64)temp + 1; // No of block can processed without overflow |
1049 | if ((bytes / AES_BLOCK_SIZE) >= c) |
1050 | bytes = c * AES_BLOCK_SIZE; |
1051 | return bytes; |
1052 | } |
1053 | |
1054 | static int chcr_update_tweak(struct skcipher_request *req, u8 *iv, |
1055 | u32 isfinal) |
1056 | { |
1057 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
1058 | struct ablk_ctx *ablkctx = ABLK_CTX(ctx: c_ctx(tfm)); |
1059 | struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req); |
1060 | struct crypto_aes_ctx aes; |
1061 | int ret, i; |
1062 | u8 *key; |
1063 | unsigned int keylen; |
1064 | int round = reqctx->last_req_len / AES_BLOCK_SIZE; |
1065 | int round8 = round / 8; |
1066 | |
1067 | memcpy(iv, reqctx->iv, AES_BLOCK_SIZE); |
1068 | |
1069 | keylen = ablkctx->enckey_len / 2; |
1070 | key = ablkctx->key + keylen; |
1071 | /* For a 192 bit key remove the padded zeroes which was |
1072 | * added in chcr_xts_setkey |
1073 | */ |
1074 | if (KEY_CONTEXT_CK_SIZE_G(ntohl(ablkctx->key_ctx_hdr)) |
1075 | == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) |
1076 | ret = aes_expandkey(ctx: &aes, in_key: key, key_len: keylen - 8); |
1077 | else |
1078 | ret = aes_expandkey(ctx: &aes, in_key: key, key_len: keylen); |
1079 | if (ret) |
1080 | return ret; |
1081 | aes_encrypt(ctx: &aes, out: iv, in: iv); |
1082 | for (i = 0; i < round8; i++) |
1083 | gf128mul_x8_ble(r: (le128 *)iv, x: (le128 *)iv); |
1084 | |
1085 | for (i = 0; i < (round % 8); i++) |
1086 | gf128mul_x_ble(r: (le128 *)iv, x: (le128 *)iv); |
1087 | |
1088 | if (!isfinal) |
1089 | aes_decrypt(ctx: &aes, out: iv, in: iv); |
1090 | |
1091 | memzero_explicit(s: &aes, count: sizeof(aes)); |
1092 | return 0; |
1093 | } |
1094 | |
1095 | static int chcr_update_cipher_iv(struct skcipher_request *req, |
1096 | struct cpl_fw6_pld *fw6_pld, u8 *iv) |
1097 | { |
1098 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
1099 | struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req); |
1100 | int subtype = get_cryptoalg_subtype(tfm); |
1101 | int ret = 0; |
1102 | |
1103 | if (subtype == CRYPTO_ALG_SUB_TYPE_CTR) |
1104 | ctr_add_iv(dstiv: iv, srciv: req->iv, add: (reqctx->processed / |
1105 | AES_BLOCK_SIZE)); |
1106 | else if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) |
1107 | *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE + |
1108 | CTR_RFC3686_IV_SIZE) = cpu_to_be32((reqctx->processed / |
1109 | AES_BLOCK_SIZE) + 1); |
1110 | else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS) |
1111 | ret = chcr_update_tweak(req, iv, isfinal: 0); |
1112 | else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) { |
1113 | if (reqctx->op) |
1114 | /*Updated before sending last WR*/ |
1115 | memcpy(iv, req->iv, AES_BLOCK_SIZE); |
1116 | else |
1117 | memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE); |
1118 | } |
1119 | |
1120 | return ret; |
1121 | |
1122 | } |
1123 | |
1124 | /* We need separate function for final iv because in rfc3686 Initial counter |
1125 | * starts from 1 and buffer size of iv is 8 byte only which remains constant |
1126 | * for subsequent update requests |
1127 | */ |
1128 | |
1129 | static int chcr_final_cipher_iv(struct skcipher_request *req, |
1130 | struct cpl_fw6_pld *fw6_pld, u8 *iv) |
1131 | { |
1132 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
1133 | struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req); |
1134 | int subtype = get_cryptoalg_subtype(tfm); |
1135 | int ret = 0; |
1136 | |
1137 | if (subtype == CRYPTO_ALG_SUB_TYPE_CTR) |
1138 | ctr_add_iv(dstiv: iv, srciv: req->iv, DIV_ROUND_UP(reqctx->processed, |
1139 | AES_BLOCK_SIZE)); |
1140 | else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS) { |
1141 | if (!reqctx->partial_req) |
1142 | memcpy(iv, reqctx->iv, AES_BLOCK_SIZE); |
1143 | else |
1144 | ret = chcr_update_tweak(req, iv, isfinal: 1); |
1145 | } |
1146 | else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) { |
1147 | /*Already updated for Decrypt*/ |
1148 | if (!reqctx->op) |
1149 | memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE); |
1150 | |
1151 | } |
1152 | return ret; |
1153 | |
1154 | } |
1155 | |
1156 | static int chcr_handle_cipher_resp(struct skcipher_request *req, |
1157 | unsigned char *input, int err) |
1158 | { |
1159 | struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req); |
1160 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
1161 | struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input; |
1162 | struct ablk_ctx *ablkctx = ABLK_CTX(ctx: c_ctx(tfm)); |
1163 | struct uld_ctx *u_ctx = ULD_CTX(ctx: c_ctx(tfm)); |
1164 | struct chcr_dev *dev = c_ctx(tfm)->dev; |
1165 | struct chcr_context *ctx = c_ctx(tfm); |
1166 | struct adapter *adap = padap(dev: ctx->dev); |
1167 | struct cipher_wr_param wrparam; |
1168 | struct sk_buff *skb; |
1169 | int bytes; |
1170 | |
1171 | if (err) |
1172 | goto unmap; |
1173 | if (req->cryptlen == reqctx->processed) { |
1174 | chcr_cipher_dma_unmap(dev: &ULD_CTX(ctx: c_ctx(tfm))->lldi.pdev->dev, |
1175 | req); |
1176 | err = chcr_final_cipher_iv(req, fw6_pld, iv: req->iv); |
1177 | goto complete; |
1178 | } |
1179 | |
1180 | if (!reqctx->imm) { |
1181 | bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 0, |
1182 | CIP_SPACE_LEFT(ablkctx->enckey_len), |
1183 | reqctx->src_ofst, reqctx->dst_ofst); |
1184 | if ((bytes + reqctx->processed) >= req->cryptlen) |
1185 | bytes = req->cryptlen - reqctx->processed; |
1186 | else |
1187 | bytes = rounddown(bytes, 16); |
1188 | } else { |
1189 | /*CTR mode counter overfloa*/ |
1190 | bytes = req->cryptlen - reqctx->processed; |
1191 | } |
1192 | err = chcr_update_cipher_iv(req, fw6_pld, iv: reqctx->iv); |
1193 | if (err) |
1194 | goto unmap; |
1195 | |
1196 | if (unlikely(bytes == 0)) { |
1197 | chcr_cipher_dma_unmap(dev: &ULD_CTX(ctx: c_ctx(tfm))->lldi.pdev->dev, |
1198 | req); |
1199 | memcpy(req->iv, reqctx->init_iv, IV); |
1200 | atomic_inc(v: &adap->chcr_stats.fallback); |
1201 | err = chcr_cipher_fallback(cipher: ablkctx->sw_cipher, req, iv: req->iv, |
1202 | op_type: reqctx->op); |
1203 | goto complete; |
1204 | } |
1205 | |
1206 | if (get_cryptoalg_subtype(tfm) == |
1207 | CRYPTO_ALG_SUB_TYPE_CTR) |
1208 | bytes = adjust_ctr_overflow(iv: reqctx->iv, bytes); |
1209 | wrparam.qid = u_ctx->lldi.rxq_ids[reqctx->rxqidx]; |
1210 | wrparam.req = req; |
1211 | wrparam.bytes = bytes; |
1212 | skb = create_cipher_wr(wrparam: &wrparam); |
1213 | if (IS_ERR(ptr: skb)) { |
1214 | pr_err("%s : Failed to form WR. No memory\n" , __func__); |
1215 | err = PTR_ERR(ptr: skb); |
1216 | goto unmap; |
1217 | } |
1218 | skb->dev = u_ctx->lldi.ports[0]; |
1219 | set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx); |
1220 | chcr_send_wr(skb); |
1221 | reqctx->last_req_len = bytes; |
1222 | reqctx->processed += bytes; |
1223 | if (get_cryptoalg_subtype(tfm) == |
1224 | CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags == |
1225 | CRYPTO_TFM_REQ_MAY_SLEEP ) { |
1226 | complete(&ctx->cbc_aes_aio_done); |
1227 | } |
1228 | return 0; |
1229 | unmap: |
1230 | chcr_cipher_dma_unmap(dev: &ULD_CTX(ctx: c_ctx(tfm))->lldi.pdev->dev, req); |
1231 | complete: |
1232 | if (get_cryptoalg_subtype(tfm) == |
1233 | CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags == |
1234 | CRYPTO_TFM_REQ_MAY_SLEEP ) { |
1235 | complete(&ctx->cbc_aes_aio_done); |
1236 | } |
1237 | chcr_dec_wrcount(dev); |
1238 | skcipher_request_complete(req, err); |
1239 | return err; |
1240 | } |
1241 | |
1242 | static int process_cipher(struct skcipher_request *req, |
1243 | unsigned short qid, |
1244 | struct sk_buff **skb, |
1245 | unsigned short op_type) |
1246 | { |
1247 | struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req); |
1248 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
1249 | unsigned int ivsize = crypto_skcipher_ivsize(tfm); |
1250 | struct ablk_ctx *ablkctx = ABLK_CTX(ctx: c_ctx(tfm)); |
1251 | struct adapter *adap = padap(dev: c_ctx(tfm)->dev); |
1252 | struct cipher_wr_param wrparam; |
1253 | int bytes, err = -EINVAL; |
1254 | int subtype; |
1255 | |
1256 | reqctx->processed = 0; |
1257 | reqctx->partial_req = 0; |
1258 | if (!req->iv) |
1259 | goto error; |
1260 | subtype = get_cryptoalg_subtype(tfm); |
1261 | if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) || |
1262 | (req->cryptlen == 0) || |
1263 | (req->cryptlen % crypto_skcipher_blocksize(tfm))) { |
1264 | if (req->cryptlen == 0 && subtype != CRYPTO_ALG_SUB_TYPE_XTS) |
1265 | goto fallback; |
1266 | else if (req->cryptlen % crypto_skcipher_blocksize(tfm) && |
1267 | subtype == CRYPTO_ALG_SUB_TYPE_XTS) |
1268 | goto fallback; |
1269 | pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n" , |
1270 | ablkctx->enckey_len, req->cryptlen, ivsize); |
1271 | goto error; |
1272 | } |
1273 | |
1274 | err = chcr_cipher_dma_map(dev: &ULD_CTX(ctx: c_ctx(tfm))->lldi.pdev->dev, req); |
1275 | if (err) |
1276 | goto error; |
1277 | if (req->cryptlen < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) + |
1278 | AES_MIN_KEY_SIZE + |
1279 | sizeof(struct cpl_rx_phys_dsgl) + |
1280 | /*Min dsgl size*/ |
1281 | 32))) { |
1282 | /* Can be sent as Imm*/ |
1283 | unsigned int dnents = 0, transhdr_len, phys_dsgl, kctx_len; |
1284 | |
1285 | dnents = sg_nents_xlen(sg: req->dst, reqlen: req->cryptlen, |
1286 | CHCR_DST_SG_SIZE, skip: 0); |
1287 | phys_dsgl = get_space_for_phys_dsgl(sgl_entr: dnents); |
1288 | kctx_len = roundup(ablkctx->enckey_len, 16); |
1289 | transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl); |
1290 | reqctx->imm = (transhdr_len + IV + req->cryptlen) <= |
1291 | SGE_MAX_WR_LEN; |
1292 | bytes = IV + req->cryptlen; |
1293 | |
1294 | } else { |
1295 | reqctx->imm = 0; |
1296 | } |
1297 | |
1298 | if (!reqctx->imm) { |
1299 | bytes = chcr_sg_ent_in_wr(req->src, req->dst, 0, |
1300 | CIP_SPACE_LEFT(ablkctx->enckey_len), |
1301 | 0, 0); |
1302 | if ((bytes + reqctx->processed) >= req->cryptlen) |
1303 | bytes = req->cryptlen - reqctx->processed; |
1304 | else |
1305 | bytes = rounddown(bytes, 16); |
1306 | } else { |
1307 | bytes = req->cryptlen; |
1308 | } |
1309 | if (subtype == CRYPTO_ALG_SUB_TYPE_CTR) { |
1310 | bytes = adjust_ctr_overflow(iv: req->iv, bytes); |
1311 | } |
1312 | if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) { |
1313 | memcpy(reqctx->iv, ablkctx->nonce, CTR_RFC3686_NONCE_SIZE); |
1314 | memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->iv, |
1315 | CTR_RFC3686_IV_SIZE); |
1316 | |
1317 | /* initialize counter portion of counter block */ |
1318 | *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE + |
1319 | CTR_RFC3686_IV_SIZE) = cpu_to_be32(1); |
1320 | memcpy(reqctx->init_iv, reqctx->iv, IV); |
1321 | |
1322 | } else { |
1323 | |
1324 | memcpy(reqctx->iv, req->iv, IV); |
1325 | memcpy(reqctx->init_iv, req->iv, IV); |
1326 | } |
1327 | if (unlikely(bytes == 0)) { |
1328 | chcr_cipher_dma_unmap(dev: &ULD_CTX(ctx: c_ctx(tfm))->lldi.pdev->dev, |
1329 | req); |
1330 | fallback: atomic_inc(v: &adap->chcr_stats.fallback); |
1331 | err = chcr_cipher_fallback(cipher: ablkctx->sw_cipher, req, |
1332 | iv: subtype == |
1333 | CRYPTO_ALG_SUB_TYPE_CTR_RFC3686 ? |
1334 | reqctx->iv : req->iv, |
1335 | op_type); |
1336 | goto error; |
1337 | } |
1338 | reqctx->op = op_type; |
1339 | reqctx->srcsg = req->src; |
1340 | reqctx->dstsg = req->dst; |
1341 | reqctx->src_ofst = 0; |
1342 | reqctx->dst_ofst = 0; |
1343 | wrparam.qid = qid; |
1344 | wrparam.req = req; |
1345 | wrparam.bytes = bytes; |
1346 | *skb = create_cipher_wr(wrparam: &wrparam); |
1347 | if (IS_ERR(ptr: *skb)) { |
1348 | err = PTR_ERR(ptr: *skb); |
1349 | goto unmap; |
1350 | } |
1351 | reqctx->processed = bytes; |
1352 | reqctx->last_req_len = bytes; |
1353 | reqctx->partial_req = !!(req->cryptlen - reqctx->processed); |
1354 | |
1355 | return 0; |
1356 | unmap: |
1357 | chcr_cipher_dma_unmap(dev: &ULD_CTX(ctx: c_ctx(tfm))->lldi.pdev->dev, req); |
1358 | error: |
1359 | return err; |
1360 | } |
1361 | |
1362 | static int chcr_aes_encrypt(struct skcipher_request *req) |
1363 | { |
1364 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
1365 | struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req); |
1366 | struct chcr_dev *dev = c_ctx(tfm)->dev; |
1367 | struct sk_buff *skb = NULL; |
1368 | int err; |
1369 | struct uld_ctx *u_ctx = ULD_CTX(ctx: c_ctx(tfm)); |
1370 | struct chcr_context *ctx = c_ctx(tfm); |
1371 | unsigned int cpu; |
1372 | |
1373 | cpu = get_cpu(); |
1374 | reqctx->txqidx = cpu % ctx->ntxq; |
1375 | reqctx->rxqidx = cpu % ctx->nrxq; |
1376 | put_cpu(); |
1377 | |
1378 | err = chcr_inc_wrcount(dev); |
1379 | if (err) |
1380 | return -ENXIO; |
1381 | if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], |
1382 | reqctx->txqidx) && |
1383 | (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) { |
1384 | err = -ENOSPC; |
1385 | goto error; |
1386 | } |
1387 | |
1388 | err = process_cipher(req, qid: u_ctx->lldi.rxq_ids[reqctx->rxqidx], |
1389 | skb: &skb, CHCR_ENCRYPT_OP); |
1390 | if (err || !skb) |
1391 | return err; |
1392 | skb->dev = u_ctx->lldi.ports[0]; |
1393 | set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx); |
1394 | chcr_send_wr(skb); |
1395 | if (get_cryptoalg_subtype(tfm) == |
1396 | CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags == |
1397 | CRYPTO_TFM_REQ_MAY_SLEEP ) { |
1398 | reqctx->partial_req = 1; |
1399 | wait_for_completion(&ctx->cbc_aes_aio_done); |
1400 | } |
1401 | return -EINPROGRESS; |
1402 | error: |
1403 | chcr_dec_wrcount(dev); |
1404 | return err; |
1405 | } |
1406 | |
1407 | static int chcr_aes_decrypt(struct skcipher_request *req) |
1408 | { |
1409 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
1410 | struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req); |
1411 | struct uld_ctx *u_ctx = ULD_CTX(ctx: c_ctx(tfm)); |
1412 | struct chcr_dev *dev = c_ctx(tfm)->dev; |
1413 | struct sk_buff *skb = NULL; |
1414 | int err; |
1415 | struct chcr_context *ctx = c_ctx(tfm); |
1416 | unsigned int cpu; |
1417 | |
1418 | cpu = get_cpu(); |
1419 | reqctx->txqidx = cpu % ctx->ntxq; |
1420 | reqctx->rxqidx = cpu % ctx->nrxq; |
1421 | put_cpu(); |
1422 | |
1423 | err = chcr_inc_wrcount(dev); |
1424 | if (err) |
1425 | return -ENXIO; |
1426 | |
1427 | if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], |
1428 | reqctx->txqidx) && |
1429 | (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) |
1430 | return -ENOSPC; |
1431 | err = process_cipher(req, qid: u_ctx->lldi.rxq_ids[reqctx->rxqidx], |
1432 | skb: &skb, CHCR_DECRYPT_OP); |
1433 | if (err || !skb) |
1434 | return err; |
1435 | skb->dev = u_ctx->lldi.ports[0]; |
1436 | set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx); |
1437 | chcr_send_wr(skb); |
1438 | return -EINPROGRESS; |
1439 | } |
1440 | static int chcr_device_init(struct chcr_context *ctx) |
1441 | { |
1442 | struct uld_ctx *u_ctx = NULL; |
1443 | int txq_perchan, ntxq; |
1444 | int err = 0, rxq_perchan; |
1445 | |
1446 | if (!ctx->dev) { |
1447 | u_ctx = assign_chcr_device(); |
1448 | if (!u_ctx) { |
1449 | err = -ENXIO; |
1450 | pr_err("chcr device assignment fails\n" ); |
1451 | goto out; |
1452 | } |
1453 | ctx->dev = &u_ctx->dev; |
1454 | ntxq = u_ctx->lldi.ntxq; |
1455 | rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan; |
1456 | txq_perchan = ntxq / u_ctx->lldi.nchan; |
1457 | ctx->ntxq = ntxq; |
1458 | ctx->nrxq = u_ctx->lldi.nrxq; |
1459 | ctx->rxq_perchan = rxq_perchan; |
1460 | ctx->txq_perchan = txq_perchan; |
1461 | } |
1462 | out: |
1463 | return err; |
1464 | } |
1465 | |
1466 | static int chcr_init_tfm(struct crypto_skcipher *tfm) |
1467 | { |
1468 | struct skcipher_alg *alg = crypto_skcipher_alg(tfm); |
1469 | struct chcr_context *ctx = crypto_skcipher_ctx(tfm); |
1470 | struct ablk_ctx *ablkctx = ABLK_CTX(ctx); |
1471 | |
1472 | ablkctx->sw_cipher = crypto_alloc_skcipher(alg_name: alg->base.cra_name, type: 0, |
1473 | CRYPTO_ALG_NEED_FALLBACK); |
1474 | if (IS_ERR(ptr: ablkctx->sw_cipher)) { |
1475 | pr_err("failed to allocate fallback for %s\n" , alg->base.cra_name); |
1476 | return PTR_ERR(ptr: ablkctx->sw_cipher); |
1477 | } |
1478 | init_completion(x: &ctx->cbc_aes_aio_done); |
1479 | crypto_skcipher_set_reqsize(skcipher: tfm, reqsize: sizeof(struct chcr_skcipher_req_ctx) + |
1480 | crypto_skcipher_reqsize(tfm: ablkctx->sw_cipher)); |
1481 | |
1482 | return chcr_device_init(ctx); |
1483 | } |
1484 | |
1485 | static int chcr_rfc3686_init(struct crypto_skcipher *tfm) |
1486 | { |
1487 | struct skcipher_alg *alg = crypto_skcipher_alg(tfm); |
1488 | struct chcr_context *ctx = crypto_skcipher_ctx(tfm); |
1489 | struct ablk_ctx *ablkctx = ABLK_CTX(ctx); |
1490 | |
1491 | /*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes)) |
1492 | * cannot be used as fallback in chcr_handle_cipher_response |
1493 | */ |
1494 | ablkctx->sw_cipher = crypto_alloc_skcipher(alg_name: "ctr(aes)" , type: 0, |
1495 | CRYPTO_ALG_NEED_FALLBACK); |
1496 | if (IS_ERR(ptr: ablkctx->sw_cipher)) { |
1497 | pr_err("failed to allocate fallback for %s\n" , alg->base.cra_name); |
1498 | return PTR_ERR(ptr: ablkctx->sw_cipher); |
1499 | } |
1500 | crypto_skcipher_set_reqsize(skcipher: tfm, reqsize: sizeof(struct chcr_skcipher_req_ctx) + |
1501 | crypto_skcipher_reqsize(tfm: ablkctx->sw_cipher)); |
1502 | return chcr_device_init(ctx); |
1503 | } |
1504 | |
1505 | |
1506 | static void chcr_exit_tfm(struct crypto_skcipher *tfm) |
1507 | { |
1508 | struct chcr_context *ctx = crypto_skcipher_ctx(tfm); |
1509 | struct ablk_ctx *ablkctx = ABLK_CTX(ctx); |
1510 | |
1511 | crypto_free_skcipher(tfm: ablkctx->sw_cipher); |
1512 | } |
1513 | |
1514 | static int get_alg_config(struct algo_param *params, |
1515 | unsigned int auth_size) |
1516 | { |
1517 | switch (auth_size) { |
1518 | case SHA1_DIGEST_SIZE: |
1519 | params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160; |
1520 | params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA1; |
1521 | params->result_size = SHA1_DIGEST_SIZE; |
1522 | break; |
1523 | case SHA224_DIGEST_SIZE: |
1524 | params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256; |
1525 | params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA224; |
1526 | params->result_size = SHA256_DIGEST_SIZE; |
1527 | break; |
1528 | case SHA256_DIGEST_SIZE: |
1529 | params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256; |
1530 | params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA256; |
1531 | params->result_size = SHA256_DIGEST_SIZE; |
1532 | break; |
1533 | case SHA384_DIGEST_SIZE: |
1534 | params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512; |
1535 | params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384; |
1536 | params->result_size = SHA512_DIGEST_SIZE; |
1537 | break; |
1538 | case SHA512_DIGEST_SIZE: |
1539 | params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512; |
1540 | params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512; |
1541 | params->result_size = SHA512_DIGEST_SIZE; |
1542 | break; |
1543 | default: |
1544 | pr_err("ERROR, unsupported digest size\n" ); |
1545 | return -EINVAL; |
1546 | } |
1547 | return 0; |
1548 | } |
1549 | |
1550 | static inline void chcr_free_shash(struct crypto_shash *base_hash) |
1551 | { |
1552 | crypto_free_shash(tfm: base_hash); |
1553 | } |
1554 | |
1555 | /** |
1556 | * create_hash_wr - Create hash work request |
1557 | * @req: Cipher req base |
1558 | * @param: Container for create_hash_wr()'s parameters |
1559 | */ |
1560 | static struct sk_buff *create_hash_wr(struct ahash_request *req, |
1561 | struct hash_wr_param *param) |
1562 | { |
1563 | struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); |
1564 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
1565 | struct chcr_context *ctx = h_ctx(tfm); |
1566 | struct hmac_ctx *hmacctx = HMAC_CTX(ctx); |
1567 | struct sk_buff *skb = NULL; |
1568 | struct uld_ctx *u_ctx = ULD_CTX(ctx); |
1569 | struct chcr_wr *chcr_req; |
1570 | struct ulptx_sgl *ulptx; |
1571 | unsigned int nents = 0, transhdr_len; |
1572 | unsigned int temp = 0; |
1573 | gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : |
1574 | GFP_ATOMIC; |
1575 | struct adapter *adap = padap(dev: h_ctx(tfm)->dev); |
1576 | int error = 0; |
1577 | unsigned int rx_channel_id = req_ctx->rxqidx / ctx->rxq_perchan; |
1578 | |
1579 | rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]); |
1580 | transhdr_len = HASH_TRANSHDR_SIZE(param->kctx_len); |
1581 | req_ctx->hctx_wr.imm = (transhdr_len + param->bfr_len + |
1582 | param->sg_len) <= SGE_MAX_WR_LEN; |
1583 | nents = sg_nents_xlen(sg: req_ctx->hctx_wr.srcsg, reqlen: param->sg_len, |
1584 | CHCR_SRC_SG_SIZE, skip: req_ctx->hctx_wr.src_ofst); |
1585 | nents += param->bfr_len ? 1 : 0; |
1586 | transhdr_len += req_ctx->hctx_wr.imm ? roundup(param->bfr_len + |
1587 | param->sg_len, 16) : (sgl_len(n: nents) * 8); |
1588 | transhdr_len = roundup(transhdr_len, 16); |
1589 | |
1590 | skb = alloc_skb(size: transhdr_len, priority: flags); |
1591 | if (!skb) |
1592 | return ERR_PTR(error: -ENOMEM); |
1593 | chcr_req = __skb_put_zero(skb, len: transhdr_len); |
1594 | |
1595 | chcr_req->sec_cpl.op_ivinsrtofst = |
1596 | FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 0); |
1597 | |
1598 | chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len); |
1599 | |
1600 | chcr_req->sec_cpl.aadstart_cipherstop_hi = |
1601 | FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0); |
1602 | chcr_req->sec_cpl.cipherstop_lo_authinsert = |
1603 | FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0); |
1604 | chcr_req->sec_cpl.seqno_numivs = |
1605 | FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode, |
1606 | param->opad_needed, 0); |
1607 | |
1608 | chcr_req->sec_cpl.ivgen_hdrlen = |
1609 | FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0); |
1610 | |
1611 | memcpy(chcr_req->key_ctx.key, req_ctx->partial_hash, |
1612 | param->alg_prm.result_size); |
1613 | |
1614 | if (param->opad_needed) |
1615 | memcpy(chcr_req->key_ctx.key + |
1616 | ((param->alg_prm.result_size <= 32) ? 32 : |
1617 | CHCR_HASH_MAX_DIGEST_SIZE), |
1618 | hmacctx->opad, param->alg_prm.result_size); |
1619 | |
1620 | chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY, |
1621 | param->alg_prm.mk_size, 0, |
1622 | param->opad_needed, |
1623 | ((param->kctx_len + |
1624 | sizeof(chcr_req->key_ctx)) >> 4)); |
1625 | chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1); |
1626 | ulptx = (struct ulptx_sgl *)((u8 *)(chcr_req + 1) + param->kctx_len + |
1627 | DUMMY_BYTES); |
1628 | if (param->bfr_len != 0) { |
1629 | req_ctx->hctx_wr.dma_addr = |
1630 | dma_map_single(&u_ctx->lldi.pdev->dev, req_ctx->reqbfr, |
1631 | param->bfr_len, DMA_TO_DEVICE); |
1632 | if (dma_mapping_error(dev: &u_ctx->lldi.pdev->dev, |
1633 | dma_addr: req_ctx->hctx_wr. dma_addr)) { |
1634 | error = -ENOMEM; |
1635 | goto err; |
1636 | } |
1637 | req_ctx->hctx_wr.dma_len = param->bfr_len; |
1638 | } else { |
1639 | req_ctx->hctx_wr.dma_addr = 0; |
1640 | } |
1641 | chcr_add_hash_src_ent(req, ulptx, param); |
1642 | /* Request upto max wr size */ |
1643 | temp = param->kctx_len + DUMMY_BYTES + (req_ctx->hctx_wr.imm ? |
1644 | (param->sg_len + param->bfr_len) : 0); |
1645 | atomic_inc(v: &adap->chcr_stats.digest_rqst); |
1646 | create_wreq(ctx: h_ctx(tfm), chcr_req, req: &req->base, imm: req_ctx->hctx_wr.imm, |
1647 | hash_sz: param->hash_size, len16: transhdr_len, |
1648 | sc_len: temp, lcb: 0); |
1649 | req_ctx->hctx_wr.skb = skb; |
1650 | return skb; |
1651 | err: |
1652 | kfree_skb(skb); |
1653 | return ERR_PTR(error); |
1654 | } |
1655 | |
1656 | static int chcr_ahash_update(struct ahash_request *req) |
1657 | { |
1658 | struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); |
1659 | struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req); |
1660 | struct uld_ctx *u_ctx = ULD_CTX(ctx: h_ctx(tfm: rtfm)); |
1661 | struct chcr_context *ctx = h_ctx(tfm: rtfm); |
1662 | struct chcr_dev *dev = h_ctx(tfm: rtfm)->dev; |
1663 | struct sk_buff *skb; |
1664 | u8 remainder = 0, bs; |
1665 | unsigned int nbytes = req->nbytes; |
1666 | struct hash_wr_param params; |
1667 | int error; |
1668 | unsigned int cpu; |
1669 | |
1670 | cpu = get_cpu(); |
1671 | req_ctx->txqidx = cpu % ctx->ntxq; |
1672 | req_ctx->rxqidx = cpu % ctx->nrxq; |
1673 | put_cpu(); |
1674 | |
1675 | bs = crypto_tfm_alg_blocksize(tfm: crypto_ahash_tfm(tfm: rtfm)); |
1676 | |
1677 | if (nbytes + req_ctx->reqlen >= bs) { |
1678 | remainder = (nbytes + req_ctx->reqlen) % bs; |
1679 | nbytes = nbytes + req_ctx->reqlen - remainder; |
1680 | } else { |
1681 | sg_pcopy_to_buffer(sgl: req->src, nents: sg_nents(sg: req->src), buf: req_ctx->reqbfr |
1682 | + req_ctx->reqlen, buflen: nbytes, skip: 0); |
1683 | req_ctx->reqlen += nbytes; |
1684 | return 0; |
1685 | } |
1686 | error = chcr_inc_wrcount(dev); |
1687 | if (error) |
1688 | return -ENXIO; |
1689 | /* Detach state for CHCR means lldi or padap is freed. Increasing |
1690 | * inflight count for dev guarantees that lldi and padap is valid |
1691 | */ |
1692 | if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], |
1693 | req_ctx->txqidx) && |
1694 | (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) { |
1695 | error = -ENOSPC; |
1696 | goto err; |
1697 | } |
1698 | |
1699 | chcr_init_hctx_per_wr(reqctx: req_ctx); |
1700 | error = chcr_hash_dma_map(dev: &u_ctx->lldi.pdev->dev, req); |
1701 | if (error) { |
1702 | error = -ENOMEM; |
1703 | goto err; |
1704 | } |
1705 | get_alg_config(params: ¶ms.alg_prm, auth_size: crypto_ahash_digestsize(tfm: rtfm)); |
1706 | params.kctx_len = roundup(params.alg_prm.result_size, 16); |
1707 | params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen, |
1708 | HASH_SPACE_LEFT(params.kctx_len), 0); |
1709 | if (params.sg_len > req->nbytes) |
1710 | params.sg_len = req->nbytes; |
1711 | params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs) - |
1712 | req_ctx->reqlen; |
1713 | params.opad_needed = 0; |
1714 | params.more = 1; |
1715 | params.last = 0; |
1716 | params.bfr_len = req_ctx->reqlen; |
1717 | params.scmd1 = 0; |
1718 | req_ctx->hctx_wr.srcsg = req->src; |
1719 | |
1720 | params.hash_size = params.alg_prm.result_size; |
1721 | req_ctx->data_len += params.sg_len + params.bfr_len; |
1722 | skb = create_hash_wr(req, param: ¶ms); |
1723 | if (IS_ERR(ptr: skb)) { |
1724 | error = PTR_ERR(ptr: skb); |
1725 | goto unmap; |
1726 | } |
1727 | |
1728 | req_ctx->hctx_wr.processed += params.sg_len; |
1729 | if (remainder) { |
1730 | /* Swap buffers */ |
1731 | swap(req_ctx->reqbfr, req_ctx->skbfr); |
1732 | sg_pcopy_to_buffer(sgl: req->src, nents: sg_nents(sg: req->src), |
1733 | buf: req_ctx->reqbfr, buflen: remainder, skip: req->nbytes - |
1734 | remainder); |
1735 | } |
1736 | req_ctx->reqlen = remainder; |
1737 | skb->dev = u_ctx->lldi.ports[0]; |
1738 | set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx); |
1739 | chcr_send_wr(skb); |
1740 | return -EINPROGRESS; |
1741 | unmap: |
1742 | chcr_hash_dma_unmap(dev: &u_ctx->lldi.pdev->dev, req); |
1743 | err: |
1744 | chcr_dec_wrcount(dev); |
1745 | return error; |
1746 | } |
1747 | |
1748 | static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1) |
1749 | { |
1750 | memset(bfr_ptr, 0, bs); |
1751 | *bfr_ptr = 0x80; |
1752 | if (bs == 64) |
1753 | *(__be64 *)(bfr_ptr + 56) = cpu_to_be64(scmd1 << 3); |
1754 | else |
1755 | *(__be64 *)(bfr_ptr + 120) = cpu_to_be64(scmd1 << 3); |
1756 | } |
1757 | |
1758 | static int chcr_ahash_final(struct ahash_request *req) |
1759 | { |
1760 | struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); |
1761 | struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req); |
1762 | struct chcr_dev *dev = h_ctx(tfm: rtfm)->dev; |
1763 | struct hash_wr_param params; |
1764 | struct sk_buff *skb; |
1765 | struct uld_ctx *u_ctx = ULD_CTX(ctx: h_ctx(tfm: rtfm)); |
1766 | struct chcr_context *ctx = h_ctx(tfm: rtfm); |
1767 | u8 bs = crypto_tfm_alg_blocksize(tfm: crypto_ahash_tfm(tfm: rtfm)); |
1768 | int error; |
1769 | unsigned int cpu; |
1770 | |
1771 | cpu = get_cpu(); |
1772 | req_ctx->txqidx = cpu % ctx->ntxq; |
1773 | req_ctx->rxqidx = cpu % ctx->nrxq; |
1774 | put_cpu(); |
1775 | |
1776 | error = chcr_inc_wrcount(dev); |
1777 | if (error) |
1778 | return -ENXIO; |
1779 | |
1780 | chcr_init_hctx_per_wr(reqctx: req_ctx); |
1781 | if (is_hmac(tfm: crypto_ahash_tfm(tfm: rtfm))) |
1782 | params.opad_needed = 1; |
1783 | else |
1784 | params.opad_needed = 0; |
1785 | params.sg_len = 0; |
1786 | req_ctx->hctx_wr.isfinal = 1; |
1787 | get_alg_config(params: ¶ms.alg_prm, auth_size: crypto_ahash_digestsize(tfm: rtfm)); |
1788 | params.kctx_len = roundup(params.alg_prm.result_size, 16); |
1789 | if (is_hmac(tfm: crypto_ahash_tfm(tfm: rtfm))) { |
1790 | params.opad_needed = 1; |
1791 | params.kctx_len *= 2; |
1792 | } else { |
1793 | params.opad_needed = 0; |
1794 | } |
1795 | |
1796 | req_ctx->hctx_wr.result = 1; |
1797 | params.bfr_len = req_ctx->reqlen; |
1798 | req_ctx->data_len += params.bfr_len + params.sg_len; |
1799 | req_ctx->hctx_wr.srcsg = req->src; |
1800 | if (req_ctx->reqlen == 0) { |
1801 | create_last_hash_block(bfr_ptr: req_ctx->reqbfr, bs, scmd1: req_ctx->data_len); |
1802 | params.last = 0; |
1803 | params.more = 1; |
1804 | params.scmd1 = 0; |
1805 | params.bfr_len = bs; |
1806 | |
1807 | } else { |
1808 | params.scmd1 = req_ctx->data_len; |
1809 | params.last = 1; |
1810 | params.more = 0; |
1811 | } |
1812 | params.hash_size = crypto_ahash_digestsize(tfm: rtfm); |
1813 | skb = create_hash_wr(req, param: ¶ms); |
1814 | if (IS_ERR(ptr: skb)) { |
1815 | error = PTR_ERR(ptr: skb); |
1816 | goto err; |
1817 | } |
1818 | req_ctx->reqlen = 0; |
1819 | skb->dev = u_ctx->lldi.ports[0]; |
1820 | set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx); |
1821 | chcr_send_wr(skb); |
1822 | return -EINPROGRESS; |
1823 | err: |
1824 | chcr_dec_wrcount(dev); |
1825 | return error; |
1826 | } |
1827 | |
1828 | static int chcr_ahash_finup(struct ahash_request *req) |
1829 | { |
1830 | struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); |
1831 | struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req); |
1832 | struct chcr_dev *dev = h_ctx(tfm: rtfm)->dev; |
1833 | struct uld_ctx *u_ctx = ULD_CTX(ctx: h_ctx(tfm: rtfm)); |
1834 | struct chcr_context *ctx = h_ctx(tfm: rtfm); |
1835 | struct sk_buff *skb; |
1836 | struct hash_wr_param params; |
1837 | u8 bs; |
1838 | int error; |
1839 | unsigned int cpu; |
1840 | |
1841 | cpu = get_cpu(); |
1842 | req_ctx->txqidx = cpu % ctx->ntxq; |
1843 | req_ctx->rxqidx = cpu % ctx->nrxq; |
1844 | put_cpu(); |
1845 | |
1846 | bs = crypto_tfm_alg_blocksize(tfm: crypto_ahash_tfm(tfm: rtfm)); |
1847 | error = chcr_inc_wrcount(dev); |
1848 | if (error) |
1849 | return -ENXIO; |
1850 | |
1851 | if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], |
1852 | req_ctx->txqidx) && |
1853 | (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) { |
1854 | error = -ENOSPC; |
1855 | goto err; |
1856 | } |
1857 | chcr_init_hctx_per_wr(reqctx: req_ctx); |
1858 | error = chcr_hash_dma_map(dev: &u_ctx->lldi.pdev->dev, req); |
1859 | if (error) { |
1860 | error = -ENOMEM; |
1861 | goto err; |
1862 | } |
1863 | |
1864 | get_alg_config(params: ¶ms.alg_prm, auth_size: crypto_ahash_digestsize(tfm: rtfm)); |
1865 | params.kctx_len = roundup(params.alg_prm.result_size, 16); |
1866 | if (is_hmac(tfm: crypto_ahash_tfm(tfm: rtfm))) { |
1867 | params.kctx_len *= 2; |
1868 | params.opad_needed = 1; |
1869 | } else { |
1870 | params.opad_needed = 0; |
1871 | } |
1872 | |
1873 | params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen, |
1874 | HASH_SPACE_LEFT(params.kctx_len), 0); |
1875 | if (params.sg_len < req->nbytes) { |
1876 | if (is_hmac(tfm: crypto_ahash_tfm(tfm: rtfm))) { |
1877 | params.kctx_len /= 2; |
1878 | params.opad_needed = 0; |
1879 | } |
1880 | params.last = 0; |
1881 | params.more = 1; |
1882 | params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs) |
1883 | - req_ctx->reqlen; |
1884 | params.hash_size = params.alg_prm.result_size; |
1885 | params.scmd1 = 0; |
1886 | } else { |
1887 | params.last = 1; |
1888 | params.more = 0; |
1889 | params.sg_len = req->nbytes; |
1890 | params.hash_size = crypto_ahash_digestsize(tfm: rtfm); |
1891 | params.scmd1 = req_ctx->data_len + req_ctx->reqlen + |
1892 | params.sg_len; |
1893 | } |
1894 | params.bfr_len = req_ctx->reqlen; |
1895 | req_ctx->data_len += params.bfr_len + params.sg_len; |
1896 | req_ctx->hctx_wr.result = 1; |
1897 | req_ctx->hctx_wr.srcsg = req->src; |
1898 | if ((req_ctx->reqlen + req->nbytes) == 0) { |
1899 | create_last_hash_block(bfr_ptr: req_ctx->reqbfr, bs, scmd1: req_ctx->data_len); |
1900 | params.last = 0; |
1901 | params.more = 1; |
1902 | params.scmd1 = 0; |
1903 | params.bfr_len = bs; |
1904 | } |
1905 | skb = create_hash_wr(req, param: ¶ms); |
1906 | if (IS_ERR(ptr: skb)) { |
1907 | error = PTR_ERR(ptr: skb); |
1908 | goto unmap; |
1909 | } |
1910 | req_ctx->reqlen = 0; |
1911 | req_ctx->hctx_wr.processed += params.sg_len; |
1912 | skb->dev = u_ctx->lldi.ports[0]; |
1913 | set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx); |
1914 | chcr_send_wr(skb); |
1915 | return -EINPROGRESS; |
1916 | unmap: |
1917 | chcr_hash_dma_unmap(dev: &u_ctx->lldi.pdev->dev, req); |
1918 | err: |
1919 | chcr_dec_wrcount(dev); |
1920 | return error; |
1921 | } |
1922 | |
1923 | static int chcr_hmac_init(struct ahash_request *areq); |
1924 | static int chcr_sha_init(struct ahash_request *areq); |
1925 | |
1926 | static int chcr_ahash_digest(struct ahash_request *req) |
1927 | { |
1928 | struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); |
1929 | struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req); |
1930 | struct chcr_dev *dev = h_ctx(tfm: rtfm)->dev; |
1931 | struct uld_ctx *u_ctx = ULD_CTX(ctx: h_ctx(tfm: rtfm)); |
1932 | struct chcr_context *ctx = h_ctx(tfm: rtfm); |
1933 | struct sk_buff *skb; |
1934 | struct hash_wr_param params; |
1935 | u8 bs; |
1936 | int error; |
1937 | unsigned int cpu; |
1938 | |
1939 | cpu = get_cpu(); |
1940 | req_ctx->txqidx = cpu % ctx->ntxq; |
1941 | req_ctx->rxqidx = cpu % ctx->nrxq; |
1942 | put_cpu(); |
1943 | |
1944 | if (is_hmac(tfm: crypto_ahash_tfm(tfm: rtfm))) |
1945 | chcr_hmac_init(areq: req); |
1946 | else |
1947 | chcr_sha_init(areq: req); |
1948 | |
1949 | bs = crypto_tfm_alg_blocksize(tfm: crypto_ahash_tfm(tfm: rtfm)); |
1950 | error = chcr_inc_wrcount(dev); |
1951 | if (error) |
1952 | return -ENXIO; |
1953 | |
1954 | if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], |
1955 | req_ctx->txqidx) && |
1956 | (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) { |
1957 | error = -ENOSPC; |
1958 | goto err; |
1959 | } |
1960 | |
1961 | chcr_init_hctx_per_wr(reqctx: req_ctx); |
1962 | error = chcr_hash_dma_map(dev: &u_ctx->lldi.pdev->dev, req); |
1963 | if (error) { |
1964 | error = -ENOMEM; |
1965 | goto err; |
1966 | } |
1967 | |
1968 | get_alg_config(params: ¶ms.alg_prm, auth_size: crypto_ahash_digestsize(tfm: rtfm)); |
1969 | params.kctx_len = roundup(params.alg_prm.result_size, 16); |
1970 | if (is_hmac(tfm: crypto_ahash_tfm(tfm: rtfm))) { |
1971 | params.kctx_len *= 2; |
1972 | params.opad_needed = 1; |
1973 | } else { |
1974 | params.opad_needed = 0; |
1975 | } |
1976 | params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen, |
1977 | HASH_SPACE_LEFT(params.kctx_len), 0); |
1978 | if (params.sg_len < req->nbytes) { |
1979 | if (is_hmac(tfm: crypto_ahash_tfm(tfm: rtfm))) { |
1980 | params.kctx_len /= 2; |
1981 | params.opad_needed = 0; |
1982 | } |
1983 | params.last = 0; |
1984 | params.more = 1; |
1985 | params.scmd1 = 0; |
1986 | params.sg_len = rounddown(params.sg_len, bs); |
1987 | params.hash_size = params.alg_prm.result_size; |
1988 | } else { |
1989 | params.sg_len = req->nbytes; |
1990 | params.hash_size = crypto_ahash_digestsize(tfm: rtfm); |
1991 | params.last = 1; |
1992 | params.more = 0; |
1993 | params.scmd1 = req->nbytes + req_ctx->data_len; |
1994 | |
1995 | } |
1996 | params.bfr_len = 0; |
1997 | req_ctx->hctx_wr.result = 1; |
1998 | req_ctx->hctx_wr.srcsg = req->src; |
1999 | req_ctx->data_len += params.bfr_len + params.sg_len; |
2000 | |
2001 | if (req->nbytes == 0) { |
2002 | create_last_hash_block(bfr_ptr: req_ctx->reqbfr, bs, scmd1: req_ctx->data_len); |
2003 | params.more = 1; |
2004 | params.bfr_len = bs; |
2005 | } |
2006 | |
2007 | skb = create_hash_wr(req, param: ¶ms); |
2008 | if (IS_ERR(ptr: skb)) { |
2009 | error = PTR_ERR(ptr: skb); |
2010 | goto unmap; |
2011 | } |
2012 | req_ctx->hctx_wr.processed += params.sg_len; |
2013 | skb->dev = u_ctx->lldi.ports[0]; |
2014 | set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx); |
2015 | chcr_send_wr(skb); |
2016 | return -EINPROGRESS; |
2017 | unmap: |
2018 | chcr_hash_dma_unmap(dev: &u_ctx->lldi.pdev->dev, req); |
2019 | err: |
2020 | chcr_dec_wrcount(dev); |
2021 | return error; |
2022 | } |
2023 | |
2024 | static int chcr_ahash_continue(struct ahash_request *req) |
2025 | { |
2026 | struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req); |
2027 | struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr; |
2028 | struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req); |
2029 | struct chcr_context *ctx = h_ctx(tfm: rtfm); |
2030 | struct uld_ctx *u_ctx = ULD_CTX(ctx); |
2031 | struct sk_buff *skb; |
2032 | struct hash_wr_param params; |
2033 | u8 bs; |
2034 | int error; |
2035 | unsigned int cpu; |
2036 | |
2037 | cpu = get_cpu(); |
2038 | reqctx->txqidx = cpu % ctx->ntxq; |
2039 | reqctx->rxqidx = cpu % ctx->nrxq; |
2040 | put_cpu(); |
2041 | |
2042 | bs = crypto_tfm_alg_blocksize(tfm: crypto_ahash_tfm(tfm: rtfm)); |
2043 | get_alg_config(params: ¶ms.alg_prm, auth_size: crypto_ahash_digestsize(tfm: rtfm)); |
2044 | params.kctx_len = roundup(params.alg_prm.result_size, 16); |
2045 | if (is_hmac(tfm: crypto_ahash_tfm(tfm: rtfm))) { |
2046 | params.kctx_len *= 2; |
2047 | params.opad_needed = 1; |
2048 | } else { |
2049 | params.opad_needed = 0; |
2050 | } |
2051 | params.sg_len = chcr_hash_ent_in_wr(hctx_wr->srcsg, 0, |
2052 | HASH_SPACE_LEFT(params.kctx_len), |
2053 | hctx_wr->src_ofst); |
2054 | if ((params.sg_len + hctx_wr->processed) > req->nbytes) |
2055 | params.sg_len = req->nbytes - hctx_wr->processed; |
2056 | if (!hctx_wr->result || |
2057 | ((params.sg_len + hctx_wr->processed) < req->nbytes)) { |
2058 | if (is_hmac(tfm: crypto_ahash_tfm(tfm: rtfm))) { |
2059 | params.kctx_len /= 2; |
2060 | params.opad_needed = 0; |
2061 | } |
2062 | params.last = 0; |
2063 | params.more = 1; |
2064 | params.sg_len = rounddown(params.sg_len, bs); |
2065 | params.hash_size = params.alg_prm.result_size; |
2066 | params.scmd1 = 0; |
2067 | } else { |
2068 | params.last = 1; |
2069 | params.more = 0; |
2070 | params.hash_size = crypto_ahash_digestsize(tfm: rtfm); |
2071 | params.scmd1 = reqctx->data_len + params.sg_len; |
2072 | } |
2073 | params.bfr_len = 0; |
2074 | reqctx->data_len += params.sg_len; |
2075 | skb = create_hash_wr(req, param: ¶ms); |
2076 | if (IS_ERR(ptr: skb)) { |
2077 | error = PTR_ERR(ptr: skb); |
2078 | goto err; |
2079 | } |
2080 | hctx_wr->processed += params.sg_len; |
2081 | skb->dev = u_ctx->lldi.ports[0]; |
2082 | set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx); |
2083 | chcr_send_wr(skb); |
2084 | return 0; |
2085 | err: |
2086 | return error; |
2087 | } |
2088 | |
2089 | static inline void chcr_handle_ahash_resp(struct ahash_request *req, |
2090 | unsigned char *input, |
2091 | int err) |
2092 | { |
2093 | struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req); |
2094 | struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr; |
2095 | int digestsize, updated_digestsize; |
2096 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
2097 | struct uld_ctx *u_ctx = ULD_CTX(ctx: h_ctx(tfm)); |
2098 | struct chcr_dev *dev = h_ctx(tfm)->dev; |
2099 | |
2100 | if (input == NULL) |
2101 | goto out; |
2102 | digestsize = crypto_ahash_digestsize(tfm: crypto_ahash_reqtfm(req)); |
2103 | updated_digestsize = digestsize; |
2104 | if (digestsize == SHA224_DIGEST_SIZE) |
2105 | updated_digestsize = SHA256_DIGEST_SIZE; |
2106 | else if (digestsize == SHA384_DIGEST_SIZE) |
2107 | updated_digestsize = SHA512_DIGEST_SIZE; |
2108 | |
2109 | if (hctx_wr->dma_addr) { |
2110 | dma_unmap_single(&u_ctx->lldi.pdev->dev, hctx_wr->dma_addr, |
2111 | hctx_wr->dma_len, DMA_TO_DEVICE); |
2112 | hctx_wr->dma_addr = 0; |
2113 | } |
2114 | if (hctx_wr->isfinal || ((hctx_wr->processed + reqctx->reqlen) == |
2115 | req->nbytes)) { |
2116 | if (hctx_wr->result == 1) { |
2117 | hctx_wr->result = 0; |
2118 | memcpy(req->result, input + sizeof(struct cpl_fw6_pld), |
2119 | digestsize); |
2120 | } else { |
2121 | memcpy(reqctx->partial_hash, |
2122 | input + sizeof(struct cpl_fw6_pld), |
2123 | updated_digestsize); |
2124 | |
2125 | } |
2126 | goto unmap; |
2127 | } |
2128 | memcpy(reqctx->partial_hash, input + sizeof(struct cpl_fw6_pld), |
2129 | updated_digestsize); |
2130 | |
2131 | err = chcr_ahash_continue(req); |
2132 | if (err) |
2133 | goto unmap; |
2134 | return; |
2135 | unmap: |
2136 | if (hctx_wr->is_sg_map) |
2137 | chcr_hash_dma_unmap(dev: &u_ctx->lldi.pdev->dev, req); |
2138 | |
2139 | |
2140 | out: |
2141 | chcr_dec_wrcount(dev); |
2142 | ahash_request_complete(req, err); |
2143 | } |
2144 | |
2145 | /* |
2146 | * chcr_handle_resp - Unmap the DMA buffers associated with the request |
2147 | * @req: crypto request |
2148 | */ |
2149 | int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input, |
2150 | int err) |
2151 | { |
2152 | struct crypto_tfm *tfm = req->tfm; |
2153 | struct chcr_context *ctx = crypto_tfm_ctx(tfm); |
2154 | struct adapter *adap = padap(dev: ctx->dev); |
2155 | |
2156 | switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) { |
2157 | case CRYPTO_ALG_TYPE_AEAD: |
2158 | err = chcr_handle_aead_resp(req: aead_request_cast(req), input, err); |
2159 | break; |
2160 | |
2161 | case CRYPTO_ALG_TYPE_SKCIPHER: |
2162 | chcr_handle_cipher_resp(req: skcipher_request_cast(req), |
2163 | input, err); |
2164 | break; |
2165 | case CRYPTO_ALG_TYPE_AHASH: |
2166 | chcr_handle_ahash_resp(req: ahash_request_cast(req), input, err); |
2167 | } |
2168 | atomic_inc(v: &adap->chcr_stats.complete); |
2169 | return err; |
2170 | } |
2171 | static int chcr_ahash_export(struct ahash_request *areq, void *out) |
2172 | { |
2173 | struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req: areq); |
2174 | struct chcr_ahash_req_ctx *state = out; |
2175 | |
2176 | state->reqlen = req_ctx->reqlen; |
2177 | state->data_len = req_ctx->data_len; |
2178 | memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen); |
2179 | memcpy(state->partial_hash, req_ctx->partial_hash, |
2180 | CHCR_HASH_MAX_DIGEST_SIZE); |
2181 | chcr_init_hctx_per_wr(reqctx: state); |
2182 | return 0; |
2183 | } |
2184 | |
2185 | static int chcr_ahash_import(struct ahash_request *areq, const void *in) |
2186 | { |
2187 | struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req: areq); |
2188 | struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in; |
2189 | |
2190 | req_ctx->reqlen = state->reqlen; |
2191 | req_ctx->data_len = state->data_len; |
2192 | req_ctx->reqbfr = req_ctx->bfr1; |
2193 | req_ctx->skbfr = req_ctx->bfr2; |
2194 | memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128); |
2195 | memcpy(req_ctx->partial_hash, state->partial_hash, |
2196 | CHCR_HASH_MAX_DIGEST_SIZE); |
2197 | chcr_init_hctx_per_wr(reqctx: req_ctx); |
2198 | return 0; |
2199 | } |
2200 | |
2201 | static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, |
2202 | unsigned int keylen) |
2203 | { |
2204 | struct hmac_ctx *hmacctx = HMAC_CTX(ctx: h_ctx(tfm)); |
2205 | unsigned int digestsize = crypto_ahash_digestsize(tfm); |
2206 | unsigned int bs = crypto_tfm_alg_blocksize(tfm: crypto_ahash_tfm(tfm)); |
2207 | unsigned int i, err = 0, updated_digestsize; |
2208 | |
2209 | SHASH_DESC_ON_STACK(shash, hmacctx->base_hash); |
2210 | |
2211 | /* use the key to calculate the ipad and opad. ipad will sent with the |
2212 | * first request's data. opad will be sent with the final hash result |
2213 | * ipad in hmacctx->ipad and opad in hmacctx->opad location |
2214 | */ |
2215 | shash->tfm = hmacctx->base_hash; |
2216 | if (keylen > bs) { |
2217 | err = crypto_shash_digest(desc: shash, data: key, len: keylen, |
2218 | out: hmacctx->ipad); |
2219 | if (err) |
2220 | goto out; |
2221 | keylen = digestsize; |
2222 | } else { |
2223 | memcpy(hmacctx->ipad, key, keylen); |
2224 | } |
2225 | memset(hmacctx->ipad + keylen, 0, bs - keylen); |
2226 | unsafe_memcpy(hmacctx->opad, hmacctx->ipad, bs, |
2227 | "fortified memcpy causes -Wrestrict warning" ); |
2228 | |
2229 | for (i = 0; i < bs / sizeof(int); i++) { |
2230 | *((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA; |
2231 | *((unsigned int *)(&hmacctx->opad) + i) ^= OPAD_DATA; |
2232 | } |
2233 | |
2234 | updated_digestsize = digestsize; |
2235 | if (digestsize == SHA224_DIGEST_SIZE) |
2236 | updated_digestsize = SHA256_DIGEST_SIZE; |
2237 | else if (digestsize == SHA384_DIGEST_SIZE) |
2238 | updated_digestsize = SHA512_DIGEST_SIZE; |
2239 | err = chcr_compute_partial_hash(desc: shash, iopad: hmacctx->ipad, |
2240 | result_hash: hmacctx->ipad, digest_size: digestsize); |
2241 | if (err) |
2242 | goto out; |
2243 | chcr_change_order(buf: hmacctx->ipad, ds: updated_digestsize); |
2244 | |
2245 | err = chcr_compute_partial_hash(desc: shash, iopad: hmacctx->opad, |
2246 | result_hash: hmacctx->opad, digest_size: digestsize); |
2247 | if (err) |
2248 | goto out; |
2249 | chcr_change_order(buf: hmacctx->opad, ds: updated_digestsize); |
2250 | out: |
2251 | return err; |
2252 | } |
2253 | |
2254 | static int chcr_aes_xts_setkey(struct crypto_skcipher *cipher, const u8 *key, |
2255 | unsigned int key_len) |
2256 | { |
2257 | struct ablk_ctx *ablkctx = ABLK_CTX(ctx: c_ctx(tfm: cipher)); |
2258 | unsigned short context_size = 0; |
2259 | int err; |
2260 | |
2261 | err = chcr_cipher_fallback_setkey(cipher, key, keylen: key_len); |
2262 | if (err) |
2263 | goto badkey_err; |
2264 | |
2265 | memcpy(ablkctx->key, key, key_len); |
2266 | ablkctx->enckey_len = key_len; |
2267 | get_aes_decrypt_key(dec_key: ablkctx->rrkey, key: ablkctx->key, keylength: key_len << 2); |
2268 | context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4; |
2269 | /* Both keys for xts must be aligned to 16 byte boundary |
2270 | * by padding with zeros. So for 24 byte keys padding 8 zeroes. |
2271 | */ |
2272 | if (key_len == 48) { |
2273 | context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len |
2274 | + 16) >> 4; |
2275 | memmove(ablkctx->key + 32, ablkctx->key + 24, 24); |
2276 | memset(ablkctx->key + 24, 0, 8); |
2277 | memset(ablkctx->key + 56, 0, 8); |
2278 | ablkctx->enckey_len = 64; |
2279 | ablkctx->key_ctx_hdr = |
2280 | FILL_KEY_CTX_HDR(CHCR_KEYCTX_CIPHER_KEY_SIZE_192, |
2281 | CHCR_KEYCTX_NO_KEY, 1, |
2282 | 0, context_size); |
2283 | } else { |
2284 | ablkctx->key_ctx_hdr = |
2285 | FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ? |
2286 | CHCR_KEYCTX_CIPHER_KEY_SIZE_128 : |
2287 | CHCR_KEYCTX_CIPHER_KEY_SIZE_256, |
2288 | CHCR_KEYCTX_NO_KEY, 1, |
2289 | 0, context_size); |
2290 | } |
2291 | ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS; |
2292 | return 0; |
2293 | badkey_err: |
2294 | ablkctx->enckey_len = 0; |
2295 | |
2296 | return err; |
2297 | } |
2298 | |
2299 | static int chcr_sha_init(struct ahash_request *areq) |
2300 | { |
2301 | struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req: areq); |
2302 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req: areq); |
2303 | int digestsize = crypto_ahash_digestsize(tfm); |
2304 | |
2305 | req_ctx->data_len = 0; |
2306 | req_ctx->reqlen = 0; |
2307 | req_ctx->reqbfr = req_ctx->bfr1; |
2308 | req_ctx->skbfr = req_ctx->bfr2; |
2309 | copy_hash_init_values(key: req_ctx->partial_hash, digestsize); |
2310 | |
2311 | return 0; |
2312 | } |
2313 | |
2314 | static int chcr_sha_cra_init(struct crypto_tfm *tfm) |
2315 | { |
2316 | crypto_ahash_set_reqsize(tfm: __crypto_ahash_cast(tfm), |
2317 | reqsize: sizeof(struct chcr_ahash_req_ctx)); |
2318 | return chcr_device_init(ctx: crypto_tfm_ctx(tfm)); |
2319 | } |
2320 | |
2321 | static int chcr_hmac_init(struct ahash_request *areq) |
2322 | { |
2323 | struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req: areq); |
2324 | struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req: areq); |
2325 | struct hmac_ctx *hmacctx = HMAC_CTX(ctx: h_ctx(tfm: rtfm)); |
2326 | unsigned int digestsize = crypto_ahash_digestsize(tfm: rtfm); |
2327 | unsigned int bs = crypto_tfm_alg_blocksize(tfm: crypto_ahash_tfm(tfm: rtfm)); |
2328 | |
2329 | chcr_sha_init(areq); |
2330 | req_ctx->data_len = bs; |
2331 | if (is_hmac(tfm: crypto_ahash_tfm(tfm: rtfm))) { |
2332 | if (digestsize == SHA224_DIGEST_SIZE) |
2333 | memcpy(req_ctx->partial_hash, hmacctx->ipad, |
2334 | SHA256_DIGEST_SIZE); |
2335 | else if (digestsize == SHA384_DIGEST_SIZE) |
2336 | memcpy(req_ctx->partial_hash, hmacctx->ipad, |
2337 | SHA512_DIGEST_SIZE); |
2338 | else |
2339 | memcpy(req_ctx->partial_hash, hmacctx->ipad, |
2340 | digestsize); |
2341 | } |
2342 | return 0; |
2343 | } |
2344 | |
2345 | static int chcr_hmac_cra_init(struct crypto_tfm *tfm) |
2346 | { |
2347 | struct chcr_context *ctx = crypto_tfm_ctx(tfm); |
2348 | struct hmac_ctx *hmacctx = HMAC_CTX(ctx); |
2349 | unsigned int digestsize = |
2350 | crypto_ahash_digestsize(tfm: __crypto_ahash_cast(tfm)); |
2351 | |
2352 | crypto_ahash_set_reqsize(tfm: __crypto_ahash_cast(tfm), |
2353 | reqsize: sizeof(struct chcr_ahash_req_ctx)); |
2354 | hmacctx->base_hash = chcr_alloc_shash(ds: digestsize); |
2355 | if (IS_ERR(ptr: hmacctx->base_hash)) |
2356 | return PTR_ERR(ptr: hmacctx->base_hash); |
2357 | return chcr_device_init(ctx: crypto_tfm_ctx(tfm)); |
2358 | } |
2359 | |
2360 | static void chcr_hmac_cra_exit(struct crypto_tfm *tfm) |
2361 | { |
2362 | struct chcr_context *ctx = crypto_tfm_ctx(tfm); |
2363 | struct hmac_ctx *hmacctx = HMAC_CTX(ctx); |
2364 | |
2365 | if (hmacctx->base_hash) { |
2366 | chcr_free_shash(base_hash: hmacctx->base_hash); |
2367 | hmacctx->base_hash = NULL; |
2368 | } |
2369 | } |
2370 | |
2371 | inline void chcr_aead_common_exit(struct aead_request *req) |
2372 | { |
2373 | struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req); |
2374 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
2375 | struct uld_ctx *u_ctx = ULD_CTX(ctx: a_ctx(tfm)); |
2376 | |
2377 | chcr_aead_dma_unmap(dev: &u_ctx->lldi.pdev->dev, req, op_type: reqctx->op); |
2378 | } |
2379 | |
2380 | static int chcr_aead_common_init(struct aead_request *req) |
2381 | { |
2382 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
2383 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx: a_ctx(tfm)); |
2384 | struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req); |
2385 | unsigned int authsize = crypto_aead_authsize(tfm); |
2386 | int error = -EINVAL; |
2387 | |
2388 | /* validate key size */ |
2389 | if (aeadctx->enckey_len == 0) |
2390 | goto err; |
2391 | if (reqctx->op && req->cryptlen < authsize) |
2392 | goto err; |
2393 | if (reqctx->b0_len) |
2394 | reqctx->scratch_pad = reqctx->iv + IV; |
2395 | else |
2396 | reqctx->scratch_pad = NULL; |
2397 | |
2398 | error = chcr_aead_dma_map(dev: &ULD_CTX(ctx: a_ctx(tfm))->lldi.pdev->dev, req, |
2399 | op_type: reqctx->op); |
2400 | if (error) { |
2401 | error = -ENOMEM; |
2402 | goto err; |
2403 | } |
2404 | |
2405 | return 0; |
2406 | err: |
2407 | return error; |
2408 | } |
2409 | |
2410 | static int chcr_aead_need_fallback(struct aead_request *req, int dst_nents, |
2411 | int aadmax, int wrlen, |
2412 | unsigned short op_type) |
2413 | { |
2414 | unsigned int authsize = crypto_aead_authsize(tfm: crypto_aead_reqtfm(req)); |
2415 | |
2416 | if (((req->cryptlen - (op_type ? authsize : 0)) == 0) || |
2417 | dst_nents > MAX_DSGL_ENT || |
2418 | (req->assoclen > aadmax) || |
2419 | (wrlen > SGE_MAX_WR_LEN)) |
2420 | return 1; |
2421 | return 0; |
2422 | } |
2423 | |
2424 | static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type) |
2425 | { |
2426 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
2427 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx: a_ctx(tfm)); |
2428 | struct aead_request *subreq = aead_request_ctx_dma(req); |
2429 | |
2430 | aead_request_set_tfm(req: subreq, tfm: aeadctx->sw_cipher); |
2431 | aead_request_set_callback(req: subreq, flags: req->base.flags, |
2432 | compl: req->base.complete, data: req->base.data); |
2433 | aead_request_set_crypt(req: subreq, src: req->src, dst: req->dst, cryptlen: req->cryptlen, |
2434 | iv: req->iv); |
2435 | aead_request_set_ad(req: subreq, assoclen: req->assoclen); |
2436 | return op_type ? crypto_aead_decrypt(req: subreq) : |
2437 | crypto_aead_encrypt(req: subreq); |
2438 | } |
2439 | |
2440 | static struct sk_buff *create_authenc_wr(struct aead_request *req, |
2441 | unsigned short qid, |
2442 | int size) |
2443 | { |
2444 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
2445 | struct chcr_context *ctx = a_ctx(tfm); |
2446 | struct uld_ctx *u_ctx = ULD_CTX(ctx); |
2447 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); |
2448 | struct chcr_authenc_ctx *actx = AUTHENC_CTX(gctx: aeadctx); |
2449 | struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req); |
2450 | struct sk_buff *skb = NULL; |
2451 | struct chcr_wr *chcr_req; |
2452 | struct cpl_rx_phys_dsgl *phys_cpl; |
2453 | struct ulptx_sgl *ulptx; |
2454 | unsigned int transhdr_len; |
2455 | unsigned int dst_size = 0, temp, subtype = get_aead_subtype(aead: tfm); |
2456 | unsigned int kctx_len = 0, dnents, snents; |
2457 | unsigned int authsize = crypto_aead_authsize(tfm); |
2458 | int error = -EINVAL; |
2459 | u8 *ivptr; |
2460 | int null = 0; |
2461 | gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : |
2462 | GFP_ATOMIC; |
2463 | struct adapter *adap = padap(dev: ctx->dev); |
2464 | unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan; |
2465 | |
2466 | rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]); |
2467 | if (req->cryptlen == 0) |
2468 | return NULL; |
2469 | |
2470 | reqctx->b0_len = 0; |
2471 | error = chcr_aead_common_init(req); |
2472 | if (error) |
2473 | return ERR_PTR(error); |
2474 | |
2475 | if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL || |
2476 | subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) { |
2477 | null = 1; |
2478 | } |
2479 | dnents = sg_nents_xlen(sg: req->dst, reqlen: req->assoclen + req->cryptlen + |
2480 | (reqctx->op ? -authsize : authsize), CHCR_DST_SG_SIZE, skip: 0); |
2481 | dnents += MIN_AUTH_SG; // For IV |
2482 | snents = sg_nents_xlen(sg: req->src, reqlen: req->assoclen + req->cryptlen, |
2483 | CHCR_SRC_SG_SIZE, skip: 0); |
2484 | dst_size = get_space_for_phys_dsgl(sgl_entr: dnents); |
2485 | kctx_len = (KEY_CONTEXT_CTX_LEN_G(ntohl(aeadctx->key_ctx_hdr)) << 4) |
2486 | - sizeof(chcr_req->key_ctx); |
2487 | transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size); |
2488 | reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) < |
2489 | SGE_MAX_WR_LEN; |
2490 | temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16) |
2491 | : (sgl_len(n: snents) * 8); |
2492 | transhdr_len += temp; |
2493 | transhdr_len = roundup(transhdr_len, 16); |
2494 | |
2495 | if (chcr_aead_need_fallback(req, dst_nents: dnents, T6_MAX_AAD_SIZE, |
2496 | wrlen: transhdr_len, op_type: reqctx->op)) { |
2497 | atomic_inc(v: &adap->chcr_stats.fallback); |
2498 | chcr_aead_common_exit(req); |
2499 | return ERR_PTR(error: chcr_aead_fallback(req, op_type: reqctx->op)); |
2500 | } |
2501 | skb = alloc_skb(size: transhdr_len, priority: flags); |
2502 | if (!skb) { |
2503 | error = -ENOMEM; |
2504 | goto err; |
2505 | } |
2506 | |
2507 | chcr_req = __skb_put_zero(skb, len: transhdr_len); |
2508 | |
2509 | temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize; |
2510 | |
2511 | /* |
2512 | * Input order is AAD,IV and Payload. where IV should be included as |
2513 | * the part of authdata. All other fields should be filled according |
2514 | * to the hardware spec |
2515 | */ |
2516 | chcr_req->sec_cpl.op_ivinsrtofst = |
2517 | FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1); |
2518 | chcr_req->sec_cpl.pldlen = htonl(req->assoclen + IV + req->cryptlen); |
2519 | chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI( |
2520 | null ? 0 : 1 + IV, |
2521 | null ? 0 : IV + req->assoclen, |
2522 | req->assoclen + IV + 1, |
2523 | (temp & 0x1F0) >> 4); |
2524 | chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT( |
2525 | temp & 0xF, |
2526 | null ? 0 : req->assoclen + IV + 1, |
2527 | temp, temp); |
2528 | if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL || |
2529 | subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA) |
2530 | temp = CHCR_SCMD_CIPHER_MODE_AES_CTR; |
2531 | else |
2532 | temp = CHCR_SCMD_CIPHER_MODE_AES_CBC; |
2533 | chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, |
2534 | (reqctx->op == CHCR_ENCRYPT_OP) ? 1 : 0, |
2535 | temp, |
2536 | actx->auth_mode, aeadctx->hmac_ctrl, |
2537 | IV >> 1); |
2538 | chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, |
2539 | 0, 0, dst_size); |
2540 | |
2541 | chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr; |
2542 | if (reqctx->op == CHCR_ENCRYPT_OP || |
2543 | subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA || |
2544 | subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) |
2545 | memcpy(chcr_req->key_ctx.key, aeadctx->key, |
2546 | aeadctx->enckey_len); |
2547 | else |
2548 | memcpy(chcr_req->key_ctx.key, actx->dec_rrkey, |
2549 | aeadctx->enckey_len); |
2550 | |
2551 | memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16), |
2552 | actx->h_iopad, kctx_len - roundup(aeadctx->enckey_len, 16)); |
2553 | phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len); |
2554 | ivptr = (u8 *)(phys_cpl + 1) + dst_size; |
2555 | ulptx = (struct ulptx_sgl *)(ivptr + IV); |
2556 | if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA || |
2557 | subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) { |
2558 | memcpy(ivptr, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE); |
2559 | memcpy(ivptr + CTR_RFC3686_NONCE_SIZE, req->iv, |
2560 | CTR_RFC3686_IV_SIZE); |
2561 | *(__be32 *)(ivptr + CTR_RFC3686_NONCE_SIZE + |
2562 | CTR_RFC3686_IV_SIZE) = cpu_to_be32(1); |
2563 | } else { |
2564 | memcpy(ivptr, req->iv, IV); |
2565 | } |
2566 | chcr_add_aead_dst_ent(req, phys_cpl, qid); |
2567 | chcr_add_aead_src_ent(req, ulptx); |
2568 | atomic_inc(v: &adap->chcr_stats.cipher_rqst); |
2569 | temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV + |
2570 | kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0); |
2571 | create_wreq(ctx: a_ctx(tfm), chcr_req, req: &req->base, imm: reqctx->imm, hash_sz: size, |
2572 | len16: transhdr_len, sc_len: temp, lcb: 0); |
2573 | reqctx->skb = skb; |
2574 | |
2575 | return skb; |
2576 | err: |
2577 | chcr_aead_common_exit(req); |
2578 | |
2579 | return ERR_PTR(error); |
2580 | } |
2581 | |
2582 | int chcr_aead_dma_map(struct device *dev, |
2583 | struct aead_request *req, |
2584 | unsigned short op_type) |
2585 | { |
2586 | int error; |
2587 | struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req); |
2588 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
2589 | unsigned int authsize = crypto_aead_authsize(tfm); |
2590 | int src_len, dst_len; |
2591 | |
2592 | /* calculate and handle src and dst sg length separately |
2593 | * for inplace and out-of place operations |
2594 | */ |
2595 | if (req->src == req->dst) { |
2596 | src_len = req->assoclen + req->cryptlen + (op_type ? |
2597 | 0 : authsize); |
2598 | dst_len = src_len; |
2599 | } else { |
2600 | src_len = req->assoclen + req->cryptlen; |
2601 | dst_len = req->assoclen + req->cryptlen + (op_type ? |
2602 | -authsize : authsize); |
2603 | } |
2604 | |
2605 | if (!req->cryptlen || !src_len || !dst_len) |
2606 | return 0; |
2607 | reqctx->iv_dma = dma_map_single(dev, reqctx->iv, (IV + reqctx->b0_len), |
2608 | DMA_BIDIRECTIONAL); |
2609 | if (dma_mapping_error(dev, dma_addr: reqctx->iv_dma)) |
2610 | return -ENOMEM; |
2611 | if (reqctx->b0_len) |
2612 | reqctx->b0_dma = reqctx->iv_dma + IV; |
2613 | else |
2614 | reqctx->b0_dma = 0; |
2615 | if (req->src == req->dst) { |
2616 | error = dma_map_sg(dev, req->src, |
2617 | sg_nents_for_len(req->src, src_len), |
2618 | DMA_BIDIRECTIONAL); |
2619 | if (!error) |
2620 | goto err; |
2621 | } else { |
2622 | error = dma_map_sg(dev, req->src, |
2623 | sg_nents_for_len(req->src, src_len), |
2624 | DMA_TO_DEVICE); |
2625 | if (!error) |
2626 | goto err; |
2627 | error = dma_map_sg(dev, req->dst, |
2628 | sg_nents_for_len(req->dst, dst_len), |
2629 | DMA_FROM_DEVICE); |
2630 | if (!error) { |
2631 | dma_unmap_sg(dev, req->src, |
2632 | sg_nents_for_len(req->src, src_len), |
2633 | DMA_TO_DEVICE); |
2634 | goto err; |
2635 | } |
2636 | } |
2637 | |
2638 | return 0; |
2639 | err: |
2640 | dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL); |
2641 | return -ENOMEM; |
2642 | } |
2643 | |
2644 | void chcr_aead_dma_unmap(struct device *dev, |
2645 | struct aead_request *req, |
2646 | unsigned short op_type) |
2647 | { |
2648 | struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req); |
2649 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
2650 | unsigned int authsize = crypto_aead_authsize(tfm); |
2651 | int src_len, dst_len; |
2652 | |
2653 | /* calculate and handle src and dst sg length separately |
2654 | * for inplace and out-of place operations |
2655 | */ |
2656 | if (req->src == req->dst) { |
2657 | src_len = req->assoclen + req->cryptlen + (op_type ? |
2658 | 0 : authsize); |
2659 | dst_len = src_len; |
2660 | } else { |
2661 | src_len = req->assoclen + req->cryptlen; |
2662 | dst_len = req->assoclen + req->cryptlen + (op_type ? |
2663 | -authsize : authsize); |
2664 | } |
2665 | |
2666 | if (!req->cryptlen || !src_len || !dst_len) |
2667 | return; |
2668 | |
2669 | dma_unmap_single(dev, reqctx->iv_dma, (IV + reqctx->b0_len), |
2670 | DMA_BIDIRECTIONAL); |
2671 | if (req->src == req->dst) { |
2672 | dma_unmap_sg(dev, req->src, |
2673 | sg_nents_for_len(req->src, src_len), |
2674 | DMA_BIDIRECTIONAL); |
2675 | } else { |
2676 | dma_unmap_sg(dev, req->src, |
2677 | sg_nents_for_len(req->src, src_len), |
2678 | DMA_TO_DEVICE); |
2679 | dma_unmap_sg(dev, req->dst, |
2680 | sg_nents_for_len(req->dst, dst_len), |
2681 | DMA_FROM_DEVICE); |
2682 | } |
2683 | } |
2684 | |
2685 | void chcr_add_aead_src_ent(struct aead_request *req, |
2686 | struct ulptx_sgl *ulptx) |
2687 | { |
2688 | struct ulptx_walk ulp_walk; |
2689 | struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req); |
2690 | |
2691 | if (reqctx->imm) { |
2692 | u8 *buf = (u8 *)ulptx; |
2693 | |
2694 | if (reqctx->b0_len) { |
2695 | memcpy(buf, reqctx->scratch_pad, reqctx->b0_len); |
2696 | buf += reqctx->b0_len; |
2697 | } |
2698 | sg_pcopy_to_buffer(sgl: req->src, nents: sg_nents(sg: req->src), |
2699 | buf, buflen: req->cryptlen + req->assoclen, skip: 0); |
2700 | } else { |
2701 | ulptx_walk_init(walk: &ulp_walk, ulp: ulptx); |
2702 | if (reqctx->b0_len) |
2703 | ulptx_walk_add_page(walk: &ulp_walk, size: reqctx->b0_len, |
2704 | addr: reqctx->b0_dma); |
2705 | ulptx_walk_add_sg(walk: &ulp_walk, sg: req->src, len: req->cryptlen + |
2706 | req->assoclen, skip: 0); |
2707 | ulptx_walk_end(walk: &ulp_walk); |
2708 | } |
2709 | } |
2710 | |
2711 | void chcr_add_aead_dst_ent(struct aead_request *req, |
2712 | struct cpl_rx_phys_dsgl *phys_cpl, |
2713 | unsigned short qid) |
2714 | { |
2715 | struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req); |
2716 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
2717 | struct dsgl_walk dsgl_walk; |
2718 | unsigned int authsize = crypto_aead_authsize(tfm); |
2719 | struct chcr_context *ctx = a_ctx(tfm); |
2720 | struct uld_ctx *u_ctx = ULD_CTX(ctx); |
2721 | u32 temp; |
2722 | unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan; |
2723 | |
2724 | rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]); |
2725 | dsgl_walk_init(walk: &dsgl_walk, dsgl: phys_cpl); |
2726 | dsgl_walk_add_page(walk: &dsgl_walk, IV + reqctx->b0_len, addr: reqctx->iv_dma); |
2727 | temp = req->assoclen + req->cryptlen + |
2728 | (reqctx->op ? -authsize : authsize); |
2729 | dsgl_walk_add_sg(walk: &dsgl_walk, sg: req->dst, slen: temp, skip: 0); |
2730 | dsgl_walk_end(walk: &dsgl_walk, qid, pci_chan_id: rx_channel_id); |
2731 | } |
2732 | |
2733 | void chcr_add_cipher_src_ent(struct skcipher_request *req, |
2734 | void *ulptx, |
2735 | struct cipher_wr_param *wrparam) |
2736 | { |
2737 | struct ulptx_walk ulp_walk; |
2738 | struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req); |
2739 | u8 *buf = ulptx; |
2740 | |
2741 | memcpy(buf, reqctx->iv, IV); |
2742 | buf += IV; |
2743 | if (reqctx->imm) { |
2744 | sg_pcopy_to_buffer(sgl: req->src, nents: sg_nents(sg: req->src), |
2745 | buf, buflen: wrparam->bytes, skip: reqctx->processed); |
2746 | } else { |
2747 | ulptx_walk_init(walk: &ulp_walk, ulp: (struct ulptx_sgl *)buf); |
2748 | ulptx_walk_add_sg(walk: &ulp_walk, sg: reqctx->srcsg, len: wrparam->bytes, |
2749 | skip: reqctx->src_ofst); |
2750 | reqctx->srcsg = ulp_walk.last_sg; |
2751 | reqctx->src_ofst = ulp_walk.last_sg_len; |
2752 | ulptx_walk_end(walk: &ulp_walk); |
2753 | } |
2754 | } |
2755 | |
2756 | void chcr_add_cipher_dst_ent(struct skcipher_request *req, |
2757 | struct cpl_rx_phys_dsgl *phys_cpl, |
2758 | struct cipher_wr_param *wrparam, |
2759 | unsigned short qid) |
2760 | { |
2761 | struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req); |
2762 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req: wrparam->req); |
2763 | struct chcr_context *ctx = c_ctx(tfm); |
2764 | struct uld_ctx *u_ctx = ULD_CTX(ctx); |
2765 | struct dsgl_walk dsgl_walk; |
2766 | unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan; |
2767 | |
2768 | rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]); |
2769 | dsgl_walk_init(walk: &dsgl_walk, dsgl: phys_cpl); |
2770 | dsgl_walk_add_sg(walk: &dsgl_walk, sg: reqctx->dstsg, slen: wrparam->bytes, |
2771 | skip: reqctx->dst_ofst); |
2772 | reqctx->dstsg = dsgl_walk.last_sg; |
2773 | reqctx->dst_ofst = dsgl_walk.last_sg_len; |
2774 | dsgl_walk_end(walk: &dsgl_walk, qid, pci_chan_id: rx_channel_id); |
2775 | } |
2776 | |
2777 | void chcr_add_hash_src_ent(struct ahash_request *req, |
2778 | struct ulptx_sgl *ulptx, |
2779 | struct hash_wr_param *param) |
2780 | { |
2781 | struct ulptx_walk ulp_walk; |
2782 | struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req); |
2783 | |
2784 | if (reqctx->hctx_wr.imm) { |
2785 | u8 *buf = (u8 *)ulptx; |
2786 | |
2787 | if (param->bfr_len) { |
2788 | memcpy(buf, reqctx->reqbfr, param->bfr_len); |
2789 | buf += param->bfr_len; |
2790 | } |
2791 | |
2792 | sg_pcopy_to_buffer(sgl: reqctx->hctx_wr.srcsg, |
2793 | nents: sg_nents(sg: reqctx->hctx_wr.srcsg), buf, |
2794 | buflen: param->sg_len, skip: 0); |
2795 | } else { |
2796 | ulptx_walk_init(walk: &ulp_walk, ulp: ulptx); |
2797 | if (param->bfr_len) |
2798 | ulptx_walk_add_page(walk: &ulp_walk, size: param->bfr_len, |
2799 | addr: reqctx->hctx_wr.dma_addr); |
2800 | ulptx_walk_add_sg(walk: &ulp_walk, sg: reqctx->hctx_wr.srcsg, |
2801 | len: param->sg_len, skip: reqctx->hctx_wr.src_ofst); |
2802 | reqctx->hctx_wr.srcsg = ulp_walk.last_sg; |
2803 | reqctx->hctx_wr.src_ofst = ulp_walk.last_sg_len; |
2804 | ulptx_walk_end(walk: &ulp_walk); |
2805 | } |
2806 | } |
2807 | |
2808 | int chcr_hash_dma_map(struct device *dev, |
2809 | struct ahash_request *req) |
2810 | { |
2811 | struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); |
2812 | int error = 0; |
2813 | |
2814 | if (!req->nbytes) |
2815 | return 0; |
2816 | error = dma_map_sg(dev, req->src, sg_nents(req->src), |
2817 | DMA_TO_DEVICE); |
2818 | if (!error) |
2819 | return -ENOMEM; |
2820 | req_ctx->hctx_wr.is_sg_map = 1; |
2821 | return 0; |
2822 | } |
2823 | |
2824 | void chcr_hash_dma_unmap(struct device *dev, |
2825 | struct ahash_request *req) |
2826 | { |
2827 | struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); |
2828 | |
2829 | if (!req->nbytes) |
2830 | return; |
2831 | |
2832 | dma_unmap_sg(dev, req->src, sg_nents(req->src), |
2833 | DMA_TO_DEVICE); |
2834 | req_ctx->hctx_wr.is_sg_map = 0; |
2835 | |
2836 | } |
2837 | |
2838 | int chcr_cipher_dma_map(struct device *dev, |
2839 | struct skcipher_request *req) |
2840 | { |
2841 | int error; |
2842 | |
2843 | if (req->src == req->dst) { |
2844 | error = dma_map_sg(dev, req->src, sg_nents(req->src), |
2845 | DMA_BIDIRECTIONAL); |
2846 | if (!error) |
2847 | goto err; |
2848 | } else { |
2849 | error = dma_map_sg(dev, req->src, sg_nents(req->src), |
2850 | DMA_TO_DEVICE); |
2851 | if (!error) |
2852 | goto err; |
2853 | error = dma_map_sg(dev, req->dst, sg_nents(req->dst), |
2854 | DMA_FROM_DEVICE); |
2855 | if (!error) { |
2856 | dma_unmap_sg(dev, req->src, sg_nents(req->src), |
2857 | DMA_TO_DEVICE); |
2858 | goto err; |
2859 | } |
2860 | } |
2861 | |
2862 | return 0; |
2863 | err: |
2864 | return -ENOMEM; |
2865 | } |
2866 | |
2867 | void chcr_cipher_dma_unmap(struct device *dev, |
2868 | struct skcipher_request *req) |
2869 | { |
2870 | if (req->src == req->dst) { |
2871 | dma_unmap_sg(dev, req->src, sg_nents(req->src), |
2872 | DMA_BIDIRECTIONAL); |
2873 | } else { |
2874 | dma_unmap_sg(dev, req->src, sg_nents(req->src), |
2875 | DMA_TO_DEVICE); |
2876 | dma_unmap_sg(dev, req->dst, sg_nents(req->dst), |
2877 | DMA_FROM_DEVICE); |
2878 | } |
2879 | } |
2880 | |
2881 | static int set_msg_len(u8 *block, unsigned int msglen, int csize) |
2882 | { |
2883 | __be32 data; |
2884 | |
2885 | memset(block, 0, csize); |
2886 | block += csize; |
2887 | |
2888 | if (csize >= 4) |
2889 | csize = 4; |
2890 | else if (msglen > (unsigned int)(1 << (8 * csize))) |
2891 | return -EOVERFLOW; |
2892 | |
2893 | data = cpu_to_be32(msglen); |
2894 | memcpy(block - csize, (u8 *)&data + 4 - csize, csize); |
2895 | |
2896 | return 0; |
2897 | } |
2898 | |
2899 | static int generate_b0(struct aead_request *req, u8 *ivptr, |
2900 | unsigned short op_type) |
2901 | { |
2902 | unsigned int l, lp, m; |
2903 | int rc; |
2904 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
2905 | struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req); |
2906 | u8 *b0 = reqctx->scratch_pad; |
2907 | |
2908 | m = crypto_aead_authsize(tfm: aead); |
2909 | |
2910 | memcpy(b0, ivptr, 16); |
2911 | |
2912 | lp = b0[0]; |
2913 | l = lp + 1; |
2914 | |
2915 | /* set m, bits 3-5 */ |
2916 | *b0 |= (8 * ((m - 2) / 2)); |
2917 | |
2918 | /* set adata, bit 6, if associated data is used */ |
2919 | if (req->assoclen) |
2920 | *b0 |= 64; |
2921 | rc = set_msg_len(block: b0 + 16 - l, |
2922 | msglen: (op_type == CHCR_DECRYPT_OP) ? |
2923 | req->cryptlen - m : req->cryptlen, csize: l); |
2924 | |
2925 | return rc; |
2926 | } |
2927 | |
2928 | static inline int crypto_ccm_check_iv(const u8 *iv) |
2929 | { |
2930 | /* 2 <= L <= 8, so 1 <= L' <= 7. */ |
2931 | if (iv[0] < 1 || iv[0] > 7) |
2932 | return -EINVAL; |
2933 | |
2934 | return 0; |
2935 | } |
2936 | |
2937 | static int ccm_format_packet(struct aead_request *req, |
2938 | u8 *ivptr, |
2939 | unsigned int sub_type, |
2940 | unsigned short op_type, |
2941 | unsigned int assoclen) |
2942 | { |
2943 | struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req); |
2944 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
2945 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx: a_ctx(tfm)); |
2946 | int rc = 0; |
2947 | |
2948 | if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) { |
2949 | ivptr[0] = 3; |
2950 | memcpy(ivptr + 1, &aeadctx->salt[0], 3); |
2951 | memcpy(ivptr + 4, req->iv, 8); |
2952 | memset(ivptr + 12, 0, 4); |
2953 | } else { |
2954 | memcpy(ivptr, req->iv, 16); |
2955 | } |
2956 | if (assoclen) |
2957 | put_unaligned_be16(val: assoclen, p: &reqctx->scratch_pad[16]); |
2958 | |
2959 | rc = generate_b0(req, ivptr, op_type); |
2960 | /* zero the ctr value */ |
2961 | memset(ivptr + 15 - ivptr[0], 0, ivptr[0] + 1); |
2962 | return rc; |
2963 | } |
2964 | |
2965 | static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl, |
2966 | unsigned int dst_size, |
2967 | struct aead_request *req, |
2968 | unsigned short op_type) |
2969 | { |
2970 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
2971 | struct chcr_context *ctx = a_ctx(tfm); |
2972 | struct uld_ctx *u_ctx = ULD_CTX(ctx); |
2973 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); |
2974 | struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req); |
2975 | unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM; |
2976 | unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC; |
2977 | unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan; |
2978 | unsigned int ccm_xtra; |
2979 | unsigned int tag_offset = 0, auth_offset = 0; |
2980 | unsigned int assoclen; |
2981 | |
2982 | rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]); |
2983 | |
2984 | if (get_aead_subtype(aead: tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) |
2985 | assoclen = req->assoclen - 8; |
2986 | else |
2987 | assoclen = req->assoclen; |
2988 | ccm_xtra = CCM_B0_SIZE + |
2989 | ((assoclen) ? CCM_AAD_FIELD_SIZE : 0); |
2990 | |
2991 | auth_offset = req->cryptlen ? |
2992 | (req->assoclen + IV + 1 + ccm_xtra) : 0; |
2993 | if (op_type == CHCR_DECRYPT_OP) { |
2994 | if (crypto_aead_authsize(tfm) != req->cryptlen) |
2995 | tag_offset = crypto_aead_authsize(tfm); |
2996 | else |
2997 | auth_offset = 0; |
2998 | } |
2999 | |
3000 | sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1); |
3001 | sec_cpl->pldlen = |
3002 | htonl(req->assoclen + IV + req->cryptlen + ccm_xtra); |
3003 | /* For CCM there wil be b0 always. So AAD start will be 1 always */ |
3004 | sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI( |
3005 | 1 + IV, IV + assoclen + ccm_xtra, |
3006 | req->assoclen + IV + 1 + ccm_xtra, 0); |
3007 | |
3008 | sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0, |
3009 | auth_offset, tag_offset, |
3010 | (op_type == CHCR_ENCRYPT_OP) ? 0 : |
3011 | crypto_aead_authsize(tfm)); |
3012 | sec_cpl->seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type, |
3013 | (op_type == CHCR_ENCRYPT_OP) ? 0 : 1, |
3014 | cipher_mode, mac_mode, |
3015 | aeadctx->hmac_ctrl, IV >> 1); |
3016 | |
3017 | sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0, |
3018 | 0, dst_size); |
3019 | } |
3020 | |
3021 | static int aead_ccm_validate_input(unsigned short op_type, |
3022 | struct aead_request *req, |
3023 | struct chcr_aead_ctx *aeadctx, |
3024 | unsigned int sub_type) |
3025 | { |
3026 | if (sub_type != CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) { |
3027 | if (crypto_ccm_check_iv(iv: req->iv)) { |
3028 | pr_err("CCM: IV check fails\n" ); |
3029 | return -EINVAL; |
3030 | } |
3031 | } else { |
3032 | if (req->assoclen != 16 && req->assoclen != 20) { |
3033 | pr_err("RFC4309: Invalid AAD length %d\n" , |
3034 | req->assoclen); |
3035 | return -EINVAL; |
3036 | } |
3037 | } |
3038 | return 0; |
3039 | } |
3040 | |
3041 | static struct sk_buff *create_aead_ccm_wr(struct aead_request *req, |
3042 | unsigned short qid, |
3043 | int size) |
3044 | { |
3045 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
3046 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx: a_ctx(tfm)); |
3047 | struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req); |
3048 | struct sk_buff *skb = NULL; |
3049 | struct chcr_wr *chcr_req; |
3050 | struct cpl_rx_phys_dsgl *phys_cpl; |
3051 | struct ulptx_sgl *ulptx; |
3052 | unsigned int transhdr_len; |
3053 | unsigned int dst_size = 0, kctx_len, dnents, temp, snents; |
3054 | unsigned int sub_type, assoclen = req->assoclen; |
3055 | unsigned int authsize = crypto_aead_authsize(tfm); |
3056 | int error = -EINVAL; |
3057 | u8 *ivptr; |
3058 | gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : |
3059 | GFP_ATOMIC; |
3060 | struct adapter *adap = padap(dev: a_ctx(tfm)->dev); |
3061 | |
3062 | sub_type = get_aead_subtype(aead: tfm); |
3063 | if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) |
3064 | assoclen -= 8; |
3065 | reqctx->b0_len = CCM_B0_SIZE + (assoclen ? CCM_AAD_FIELD_SIZE : 0); |
3066 | error = chcr_aead_common_init(req); |
3067 | if (error) |
3068 | return ERR_PTR(error); |
3069 | |
3070 | error = aead_ccm_validate_input(op_type: reqctx->op, req, aeadctx, sub_type); |
3071 | if (error) |
3072 | goto err; |
3073 | dnents = sg_nents_xlen(sg: req->dst, reqlen: req->assoclen + req->cryptlen |
3074 | + (reqctx->op ? -authsize : authsize), |
3075 | CHCR_DST_SG_SIZE, skip: 0); |
3076 | dnents += MIN_CCM_SG; // For IV and B0 |
3077 | dst_size = get_space_for_phys_dsgl(sgl_entr: dnents); |
3078 | snents = sg_nents_xlen(sg: req->src, reqlen: req->assoclen + req->cryptlen, |
3079 | CHCR_SRC_SG_SIZE, skip: 0); |
3080 | snents += MIN_CCM_SG; //For B0 |
3081 | kctx_len = roundup(aeadctx->enckey_len, 16) * 2; |
3082 | transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size); |
3083 | reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen + |
3084 | reqctx->b0_len) <= SGE_MAX_WR_LEN; |
3085 | temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen + |
3086 | reqctx->b0_len, 16) : |
3087 | (sgl_len(n: snents) * 8); |
3088 | transhdr_len += temp; |
3089 | transhdr_len = roundup(transhdr_len, 16); |
3090 | |
3091 | if (chcr_aead_need_fallback(req, dst_nents: dnents, T6_MAX_AAD_SIZE - |
3092 | reqctx->b0_len, wrlen: transhdr_len, op_type: reqctx->op)) { |
3093 | atomic_inc(v: &adap->chcr_stats.fallback); |
3094 | chcr_aead_common_exit(req); |
3095 | return ERR_PTR(error: chcr_aead_fallback(req, op_type: reqctx->op)); |
3096 | } |
3097 | skb = alloc_skb(size: transhdr_len, priority: flags); |
3098 | |
3099 | if (!skb) { |
3100 | error = -ENOMEM; |
3101 | goto err; |
3102 | } |
3103 | |
3104 | chcr_req = __skb_put_zero(skb, len: transhdr_len); |
3105 | |
3106 | fill_sec_cpl_for_aead(sec_cpl: &chcr_req->sec_cpl, dst_size, req, op_type: reqctx->op); |
3107 | |
3108 | chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr; |
3109 | memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len); |
3110 | memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16), |
3111 | aeadctx->key, aeadctx->enckey_len); |
3112 | |
3113 | phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len); |
3114 | ivptr = (u8 *)(phys_cpl + 1) + dst_size; |
3115 | ulptx = (struct ulptx_sgl *)(ivptr + IV); |
3116 | error = ccm_format_packet(req, ivptr, sub_type, op_type: reqctx->op, assoclen); |
3117 | if (error) |
3118 | goto dstmap_fail; |
3119 | chcr_add_aead_dst_ent(req, phys_cpl, qid); |
3120 | chcr_add_aead_src_ent(req, ulptx); |
3121 | |
3122 | atomic_inc(v: &adap->chcr_stats.aead_rqst); |
3123 | temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV + |
3124 | kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen + |
3125 | reqctx->b0_len) : 0); |
3126 | create_wreq(ctx: a_ctx(tfm), chcr_req, req: &req->base, imm: reqctx->imm, hash_sz: 0, |
3127 | len16: transhdr_len, sc_len: temp, lcb: 0); |
3128 | reqctx->skb = skb; |
3129 | |
3130 | return skb; |
3131 | dstmap_fail: |
3132 | kfree_skb(skb); |
3133 | err: |
3134 | chcr_aead_common_exit(req); |
3135 | return ERR_PTR(error); |
3136 | } |
3137 | |
3138 | static struct sk_buff *create_gcm_wr(struct aead_request *req, |
3139 | unsigned short qid, |
3140 | int size) |
3141 | { |
3142 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
3143 | struct chcr_context *ctx = a_ctx(tfm); |
3144 | struct uld_ctx *u_ctx = ULD_CTX(ctx); |
3145 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); |
3146 | struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req); |
3147 | struct sk_buff *skb = NULL; |
3148 | struct chcr_wr *chcr_req; |
3149 | struct cpl_rx_phys_dsgl *phys_cpl; |
3150 | struct ulptx_sgl *ulptx; |
3151 | unsigned int transhdr_len, dnents = 0, snents; |
3152 | unsigned int dst_size = 0, temp = 0, kctx_len, assoclen = req->assoclen; |
3153 | unsigned int authsize = crypto_aead_authsize(tfm); |
3154 | int error = -EINVAL; |
3155 | u8 *ivptr; |
3156 | gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : |
3157 | GFP_ATOMIC; |
3158 | struct adapter *adap = padap(dev: ctx->dev); |
3159 | unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan; |
3160 | |
3161 | rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]); |
3162 | if (get_aead_subtype(aead: tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) |
3163 | assoclen = req->assoclen - 8; |
3164 | |
3165 | reqctx->b0_len = 0; |
3166 | error = chcr_aead_common_init(req); |
3167 | if (error) |
3168 | return ERR_PTR(error); |
3169 | dnents = sg_nents_xlen(sg: req->dst, reqlen: req->assoclen + req->cryptlen + |
3170 | (reqctx->op ? -authsize : authsize), |
3171 | CHCR_DST_SG_SIZE, skip: 0); |
3172 | snents = sg_nents_xlen(sg: req->src, reqlen: req->assoclen + req->cryptlen, |
3173 | CHCR_SRC_SG_SIZE, skip: 0); |
3174 | dnents += MIN_GCM_SG; // For IV |
3175 | dst_size = get_space_for_phys_dsgl(sgl_entr: dnents); |
3176 | kctx_len = roundup(aeadctx->enckey_len, 16) + AEAD_H_SIZE; |
3177 | transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size); |
3178 | reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <= |
3179 | SGE_MAX_WR_LEN; |
3180 | temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16) : |
3181 | (sgl_len(n: snents) * 8); |
3182 | transhdr_len += temp; |
3183 | transhdr_len = roundup(transhdr_len, 16); |
3184 | if (chcr_aead_need_fallback(req, dst_nents: dnents, T6_MAX_AAD_SIZE, |
3185 | wrlen: transhdr_len, op_type: reqctx->op)) { |
3186 | |
3187 | atomic_inc(v: &adap->chcr_stats.fallback); |
3188 | chcr_aead_common_exit(req); |
3189 | return ERR_PTR(error: chcr_aead_fallback(req, op_type: reqctx->op)); |
3190 | } |
3191 | skb = alloc_skb(size: transhdr_len, priority: flags); |
3192 | if (!skb) { |
3193 | error = -ENOMEM; |
3194 | goto err; |
3195 | } |
3196 | |
3197 | chcr_req = __skb_put_zero(skb, len: transhdr_len); |
3198 | |
3199 | //Offset of tag from end |
3200 | temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize; |
3201 | chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR( |
3202 | rx_channel_id, 2, 1); |
3203 | chcr_req->sec_cpl.pldlen = |
3204 | htonl(req->assoclen + IV + req->cryptlen); |
3205 | chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI( |
3206 | assoclen ? 1 + IV : 0, |
3207 | assoclen ? IV + assoclen : 0, |
3208 | req->assoclen + IV + 1, 0); |
3209 | chcr_req->sec_cpl.cipherstop_lo_authinsert = |
3210 | FILL_SEC_CPL_AUTHINSERT(0, req->assoclen + IV + 1, |
3211 | temp, temp); |
3212 | chcr_req->sec_cpl.seqno_numivs = |
3213 | FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, (reqctx->op == |
3214 | CHCR_ENCRYPT_OP) ? 1 : 0, |
3215 | CHCR_SCMD_CIPHER_MODE_AES_GCM, |
3216 | CHCR_SCMD_AUTH_MODE_GHASH, |
3217 | aeadctx->hmac_ctrl, IV >> 1); |
3218 | chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, |
3219 | 0, 0, dst_size); |
3220 | chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr; |
3221 | memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len); |
3222 | memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16), |
3223 | GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE); |
3224 | |
3225 | phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len); |
3226 | ivptr = (u8 *)(phys_cpl + 1) + dst_size; |
3227 | /* prepare a 16 byte iv */ |
3228 | /* S A L T | IV | 0x00000001 */ |
3229 | if (get_aead_subtype(aead: tfm) == |
3230 | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) { |
3231 | memcpy(ivptr, aeadctx->salt, 4); |
3232 | memcpy(ivptr + 4, req->iv, GCM_RFC4106_IV_SIZE); |
3233 | } else { |
3234 | memcpy(ivptr, req->iv, GCM_AES_IV_SIZE); |
3235 | } |
3236 | put_unaligned_be32(val: 0x01, p: &ivptr[12]); |
3237 | ulptx = (struct ulptx_sgl *)(ivptr + 16); |
3238 | |
3239 | chcr_add_aead_dst_ent(req, phys_cpl, qid); |
3240 | chcr_add_aead_src_ent(req, ulptx); |
3241 | atomic_inc(v: &adap->chcr_stats.aead_rqst); |
3242 | temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV + |
3243 | kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0); |
3244 | create_wreq(ctx: a_ctx(tfm), chcr_req, req: &req->base, imm: reqctx->imm, hash_sz: size, |
3245 | len16: transhdr_len, sc_len: temp, lcb: reqctx->verify); |
3246 | reqctx->skb = skb; |
3247 | return skb; |
3248 | |
3249 | err: |
3250 | chcr_aead_common_exit(req); |
3251 | return ERR_PTR(error); |
3252 | } |
3253 | |
3254 | |
3255 | |
3256 | static int chcr_aead_cra_init(struct crypto_aead *tfm) |
3257 | { |
3258 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx: a_ctx(tfm)); |
3259 | struct aead_alg *alg = crypto_aead_alg(tfm); |
3260 | |
3261 | aeadctx->sw_cipher = crypto_alloc_aead(alg_name: alg->base.cra_name, type: 0, |
3262 | CRYPTO_ALG_NEED_FALLBACK | |
3263 | CRYPTO_ALG_ASYNC); |
3264 | if (IS_ERR(ptr: aeadctx->sw_cipher)) |
3265 | return PTR_ERR(ptr: aeadctx->sw_cipher); |
3266 | crypto_aead_set_reqsize_dma( |
3267 | aead: tfm, max(sizeof(struct chcr_aead_reqctx), |
3268 | sizeof(struct aead_request) + |
3269 | crypto_aead_reqsize(aeadctx->sw_cipher))); |
3270 | return chcr_device_init(ctx: a_ctx(tfm)); |
3271 | } |
3272 | |
3273 | static void chcr_aead_cra_exit(struct crypto_aead *tfm) |
3274 | { |
3275 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx: a_ctx(tfm)); |
3276 | |
3277 | crypto_free_aead(tfm: aeadctx->sw_cipher); |
3278 | } |
3279 | |
3280 | static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm, |
3281 | unsigned int authsize) |
3282 | { |
3283 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx: a_ctx(tfm)); |
3284 | |
3285 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP; |
3286 | aeadctx->mayverify = VERIFY_HW; |
3287 | return crypto_aead_setauthsize(tfm: aeadctx->sw_cipher, authsize); |
3288 | } |
3289 | static int chcr_authenc_setauthsize(struct crypto_aead *tfm, |
3290 | unsigned int authsize) |
3291 | { |
3292 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx: a_ctx(tfm)); |
3293 | u32 maxauth = crypto_aead_maxauthsize(aead: tfm); |
3294 | |
3295 | /*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not |
3296 | * true for sha1. authsize == 12 condition should be before |
3297 | * authsize == (maxauth >> 1) |
3298 | */ |
3299 | if (authsize == ICV_4) { |
3300 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1; |
3301 | aeadctx->mayverify = VERIFY_HW; |
3302 | } else if (authsize == ICV_6) { |
3303 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2; |
3304 | aeadctx->mayverify = VERIFY_HW; |
3305 | } else if (authsize == ICV_10) { |
3306 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366; |
3307 | aeadctx->mayverify = VERIFY_HW; |
3308 | } else if (authsize == ICV_12) { |
3309 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT; |
3310 | aeadctx->mayverify = VERIFY_HW; |
3311 | } else if (authsize == ICV_14) { |
3312 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3; |
3313 | aeadctx->mayverify = VERIFY_HW; |
3314 | } else if (authsize == (maxauth >> 1)) { |
3315 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2; |
3316 | aeadctx->mayverify = VERIFY_HW; |
3317 | } else if (authsize == maxauth) { |
3318 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC; |
3319 | aeadctx->mayverify = VERIFY_HW; |
3320 | } else { |
3321 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC; |
3322 | aeadctx->mayverify = VERIFY_SW; |
3323 | } |
3324 | return crypto_aead_setauthsize(tfm: aeadctx->sw_cipher, authsize); |
3325 | } |
3326 | |
3327 | |
3328 | static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize) |
3329 | { |
3330 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx: a_ctx(tfm)); |
3331 | |
3332 | switch (authsize) { |
3333 | case ICV_4: |
3334 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1; |
3335 | aeadctx->mayverify = VERIFY_HW; |
3336 | break; |
3337 | case ICV_8: |
3338 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2; |
3339 | aeadctx->mayverify = VERIFY_HW; |
3340 | break; |
3341 | case ICV_12: |
3342 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT; |
3343 | aeadctx->mayverify = VERIFY_HW; |
3344 | break; |
3345 | case ICV_14: |
3346 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3; |
3347 | aeadctx->mayverify = VERIFY_HW; |
3348 | break; |
3349 | case ICV_16: |
3350 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC; |
3351 | aeadctx->mayverify = VERIFY_HW; |
3352 | break; |
3353 | case ICV_13: |
3354 | case ICV_15: |
3355 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC; |
3356 | aeadctx->mayverify = VERIFY_SW; |
3357 | break; |
3358 | default: |
3359 | return -EINVAL; |
3360 | } |
3361 | return crypto_aead_setauthsize(tfm: aeadctx->sw_cipher, authsize); |
3362 | } |
3363 | |
3364 | static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm, |
3365 | unsigned int authsize) |
3366 | { |
3367 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx: a_ctx(tfm)); |
3368 | |
3369 | switch (authsize) { |
3370 | case ICV_8: |
3371 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2; |
3372 | aeadctx->mayverify = VERIFY_HW; |
3373 | break; |
3374 | case ICV_12: |
3375 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT; |
3376 | aeadctx->mayverify = VERIFY_HW; |
3377 | break; |
3378 | case ICV_16: |
3379 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC; |
3380 | aeadctx->mayverify = VERIFY_HW; |
3381 | break; |
3382 | default: |
3383 | return -EINVAL; |
3384 | } |
3385 | return crypto_aead_setauthsize(tfm: aeadctx->sw_cipher, authsize); |
3386 | } |
3387 | |
3388 | static int chcr_ccm_setauthsize(struct crypto_aead *tfm, |
3389 | unsigned int authsize) |
3390 | { |
3391 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx: a_ctx(tfm)); |
3392 | |
3393 | switch (authsize) { |
3394 | case ICV_4: |
3395 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1; |
3396 | aeadctx->mayverify = VERIFY_HW; |
3397 | break; |
3398 | case ICV_6: |
3399 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2; |
3400 | aeadctx->mayverify = VERIFY_HW; |
3401 | break; |
3402 | case ICV_8: |
3403 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2; |
3404 | aeadctx->mayverify = VERIFY_HW; |
3405 | break; |
3406 | case ICV_10: |
3407 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366; |
3408 | aeadctx->mayverify = VERIFY_HW; |
3409 | break; |
3410 | case ICV_12: |
3411 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT; |
3412 | aeadctx->mayverify = VERIFY_HW; |
3413 | break; |
3414 | case ICV_14: |
3415 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3; |
3416 | aeadctx->mayverify = VERIFY_HW; |
3417 | break; |
3418 | case ICV_16: |
3419 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC; |
3420 | aeadctx->mayverify = VERIFY_HW; |
3421 | break; |
3422 | default: |
3423 | return -EINVAL; |
3424 | } |
3425 | return crypto_aead_setauthsize(tfm: aeadctx->sw_cipher, authsize); |
3426 | } |
3427 | |
3428 | static int chcr_ccm_common_setkey(struct crypto_aead *aead, |
3429 | const u8 *key, |
3430 | unsigned int keylen) |
3431 | { |
3432 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx: a_ctx(tfm: aead)); |
3433 | unsigned char ck_size, mk_size; |
3434 | int key_ctx_size = 0; |
3435 | |
3436 | key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) * 2; |
3437 | if (keylen == AES_KEYSIZE_128) { |
3438 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128; |
3439 | mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128; |
3440 | } else if (keylen == AES_KEYSIZE_192) { |
3441 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192; |
3442 | mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192; |
3443 | } else if (keylen == AES_KEYSIZE_256) { |
3444 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; |
3445 | mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256; |
3446 | } else { |
3447 | aeadctx->enckey_len = 0; |
3448 | return -EINVAL; |
3449 | } |
3450 | aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, mk_size, 0, 0, |
3451 | key_ctx_size >> 4); |
3452 | memcpy(aeadctx->key, key, keylen); |
3453 | aeadctx->enckey_len = keylen; |
3454 | |
3455 | return 0; |
3456 | } |
3457 | |
3458 | static int chcr_aead_ccm_setkey(struct crypto_aead *aead, |
3459 | const u8 *key, |
3460 | unsigned int keylen) |
3461 | { |
3462 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx: a_ctx(tfm: aead)); |
3463 | int error; |
3464 | |
3465 | crypto_aead_clear_flags(tfm: aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK); |
3466 | crypto_aead_set_flags(tfm: aeadctx->sw_cipher, flags: crypto_aead_get_flags(tfm: aead) & |
3467 | CRYPTO_TFM_REQ_MASK); |
3468 | error = crypto_aead_setkey(tfm: aeadctx->sw_cipher, key, keylen); |
3469 | if (error) |
3470 | return error; |
3471 | return chcr_ccm_common_setkey(aead, key, keylen); |
3472 | } |
3473 | |
3474 | static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key, |
3475 | unsigned int keylen) |
3476 | { |
3477 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx: a_ctx(tfm: aead)); |
3478 | int error; |
3479 | |
3480 | if (keylen < 3) { |
3481 | aeadctx->enckey_len = 0; |
3482 | return -EINVAL; |
3483 | } |
3484 | crypto_aead_clear_flags(tfm: aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK); |
3485 | crypto_aead_set_flags(tfm: aeadctx->sw_cipher, flags: crypto_aead_get_flags(tfm: aead) & |
3486 | CRYPTO_TFM_REQ_MASK); |
3487 | error = crypto_aead_setkey(tfm: aeadctx->sw_cipher, key, keylen); |
3488 | if (error) |
3489 | return error; |
3490 | keylen -= 3; |
3491 | memcpy(aeadctx->salt, key + keylen, 3); |
3492 | return chcr_ccm_common_setkey(aead, key, keylen); |
3493 | } |
3494 | |
3495 | static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key, |
3496 | unsigned int keylen) |
3497 | { |
3498 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx: a_ctx(tfm: aead)); |
3499 | struct chcr_gcm_ctx *gctx = GCM_CTX(gctx: aeadctx); |
3500 | unsigned int ck_size; |
3501 | int ret = 0, key_ctx_size = 0; |
3502 | struct crypto_aes_ctx aes; |
3503 | |
3504 | aeadctx->enckey_len = 0; |
3505 | crypto_aead_clear_flags(tfm: aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK); |
3506 | crypto_aead_set_flags(tfm: aeadctx->sw_cipher, flags: crypto_aead_get_flags(tfm: aead) |
3507 | & CRYPTO_TFM_REQ_MASK); |
3508 | ret = crypto_aead_setkey(tfm: aeadctx->sw_cipher, key, keylen); |
3509 | if (ret) |
3510 | goto out; |
3511 | |
3512 | if (get_aead_subtype(aead) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 && |
3513 | keylen > 3) { |
3514 | keylen -= 4; /* nonce/salt is present in the last 4 bytes */ |
3515 | memcpy(aeadctx->salt, key + keylen, 4); |
3516 | } |
3517 | if (keylen == AES_KEYSIZE_128) { |
3518 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128; |
3519 | } else if (keylen == AES_KEYSIZE_192) { |
3520 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192; |
3521 | } else if (keylen == AES_KEYSIZE_256) { |
3522 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; |
3523 | } else { |
3524 | pr_err("GCM: Invalid key length %d\n" , keylen); |
3525 | ret = -EINVAL; |
3526 | goto out; |
3527 | } |
3528 | |
3529 | memcpy(aeadctx->key, key, keylen); |
3530 | aeadctx->enckey_len = keylen; |
3531 | key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) + |
3532 | AEAD_H_SIZE; |
3533 | aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, |
3534 | CHCR_KEYCTX_MAC_KEY_SIZE_128, |
3535 | 0, 0, |
3536 | key_ctx_size >> 4); |
3537 | /* Calculate the H = CIPH(K, 0 repeated 16 times). |
3538 | * It will go in key context |
3539 | */ |
3540 | ret = aes_expandkey(ctx: &aes, in_key: key, key_len: keylen); |
3541 | if (ret) { |
3542 | aeadctx->enckey_len = 0; |
3543 | goto out; |
3544 | } |
3545 | memset(gctx->ghash_h, 0, AEAD_H_SIZE); |
3546 | aes_encrypt(ctx: &aes, out: gctx->ghash_h, in: gctx->ghash_h); |
3547 | memzero_explicit(s: &aes, count: sizeof(aes)); |
3548 | |
3549 | out: |
3550 | return ret; |
3551 | } |
3552 | |
3553 | static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key, |
3554 | unsigned int keylen) |
3555 | { |
3556 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx: a_ctx(tfm: authenc)); |
3557 | struct chcr_authenc_ctx *actx = AUTHENC_CTX(gctx: aeadctx); |
3558 | /* it contains auth and cipher key both*/ |
3559 | struct crypto_authenc_keys keys; |
3560 | unsigned int bs, subtype; |
3561 | unsigned int max_authsize = crypto_aead_alg(tfm: authenc)->maxauthsize; |
3562 | int err = 0, i, key_ctx_len = 0; |
3563 | unsigned char ck_size = 0; |
3564 | unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 }; |
3565 | struct crypto_shash *base_hash = ERR_PTR(error: -EINVAL); |
3566 | struct algo_param param; |
3567 | int align; |
3568 | u8 *o_ptr = NULL; |
3569 | |
3570 | crypto_aead_clear_flags(tfm: aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK); |
3571 | crypto_aead_set_flags(tfm: aeadctx->sw_cipher, flags: crypto_aead_get_flags(tfm: authenc) |
3572 | & CRYPTO_TFM_REQ_MASK); |
3573 | err = crypto_aead_setkey(tfm: aeadctx->sw_cipher, key, keylen); |
3574 | if (err) |
3575 | goto out; |
3576 | |
3577 | if (crypto_authenc_extractkeys(keys: &keys, key, keylen) != 0) |
3578 | goto out; |
3579 | |
3580 | if (get_alg_config(params: ¶m, auth_size: max_authsize)) { |
3581 | pr_err("Unsupported digest size\n" ); |
3582 | goto out; |
3583 | } |
3584 | subtype = get_aead_subtype(aead: authenc); |
3585 | if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA || |
3586 | subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) { |
3587 | if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE) |
3588 | goto out; |
3589 | memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen |
3590 | - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE); |
3591 | keys.enckeylen -= CTR_RFC3686_NONCE_SIZE; |
3592 | } |
3593 | if (keys.enckeylen == AES_KEYSIZE_128) { |
3594 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128; |
3595 | } else if (keys.enckeylen == AES_KEYSIZE_192) { |
3596 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192; |
3597 | } else if (keys.enckeylen == AES_KEYSIZE_256) { |
3598 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; |
3599 | } else { |
3600 | pr_err("Unsupported cipher key\n" ); |
3601 | goto out; |
3602 | } |
3603 | |
3604 | /* Copy only encryption key. We use authkey to generate h(ipad) and |
3605 | * h(opad) so authkey is not needed again. authkeylen size have the |
3606 | * size of the hash digest size. |
3607 | */ |
3608 | memcpy(aeadctx->key, keys.enckey, keys.enckeylen); |
3609 | aeadctx->enckey_len = keys.enckeylen; |
3610 | if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA || |
3611 | subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) { |
3612 | |
3613 | get_aes_decrypt_key(dec_key: actx->dec_rrkey, key: aeadctx->key, |
3614 | keylength: aeadctx->enckey_len << 3); |
3615 | } |
3616 | base_hash = chcr_alloc_shash(ds: max_authsize); |
3617 | if (IS_ERR(ptr: base_hash)) { |
3618 | pr_err("Base driver cannot be loaded\n" ); |
3619 | goto out; |
3620 | } |
3621 | { |
3622 | SHASH_DESC_ON_STACK(shash, base_hash); |
3623 | |
3624 | shash->tfm = base_hash; |
3625 | bs = crypto_shash_blocksize(tfm: base_hash); |
3626 | align = KEYCTX_ALIGN_PAD(max_authsize); |
3627 | o_ptr = actx->h_iopad + param.result_size + align; |
3628 | |
3629 | if (keys.authkeylen > bs) { |
3630 | err = crypto_shash_digest(desc: shash, data: keys.authkey, |
3631 | len: keys.authkeylen, |
3632 | out: o_ptr); |
3633 | if (err) { |
3634 | pr_err("Base driver cannot be loaded\n" ); |
3635 | goto out; |
3636 | } |
3637 | keys.authkeylen = max_authsize; |
3638 | } else |
3639 | memcpy(o_ptr, keys.authkey, keys.authkeylen); |
3640 | |
3641 | /* Compute the ipad-digest*/ |
3642 | memset(pad + keys.authkeylen, 0, bs - keys.authkeylen); |
3643 | memcpy(pad, o_ptr, keys.authkeylen); |
3644 | for (i = 0; i < bs >> 2; i++) |
3645 | *((unsigned int *)pad + i) ^= IPAD_DATA; |
3646 | |
3647 | if (chcr_compute_partial_hash(desc: shash, iopad: pad, result_hash: actx->h_iopad, |
3648 | digest_size: max_authsize)) |
3649 | goto out; |
3650 | /* Compute the opad-digest */ |
3651 | memset(pad + keys.authkeylen, 0, bs - keys.authkeylen); |
3652 | memcpy(pad, o_ptr, keys.authkeylen); |
3653 | for (i = 0; i < bs >> 2; i++) |
3654 | *((unsigned int *)pad + i) ^= OPAD_DATA; |
3655 | |
3656 | if (chcr_compute_partial_hash(desc: shash, iopad: pad, result_hash: o_ptr, digest_size: max_authsize)) |
3657 | goto out; |
3658 | |
3659 | /* convert the ipad and opad digest to network order */ |
3660 | chcr_change_order(buf: actx->h_iopad, ds: param.result_size); |
3661 | chcr_change_order(buf: o_ptr, ds: param.result_size); |
3662 | key_ctx_len = sizeof(struct _key_ctx) + |
3663 | roundup(keys.enckeylen, 16) + |
3664 | (param.result_size + align) * 2; |
3665 | aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size, |
3666 | 0, 1, key_ctx_len >> 4); |
3667 | actx->auth_mode = param.auth_mode; |
3668 | chcr_free_shash(base_hash); |
3669 | |
3670 | memzero_explicit(s: &keys, count: sizeof(keys)); |
3671 | return 0; |
3672 | } |
3673 | out: |
3674 | aeadctx->enckey_len = 0; |
3675 | memzero_explicit(s: &keys, count: sizeof(keys)); |
3676 | if (!IS_ERR(ptr: base_hash)) |
3677 | chcr_free_shash(base_hash); |
3678 | return -EINVAL; |
3679 | } |
3680 | |
3681 | static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc, |
3682 | const u8 *key, unsigned int keylen) |
3683 | { |
3684 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx: a_ctx(tfm: authenc)); |
3685 | struct chcr_authenc_ctx *actx = AUTHENC_CTX(gctx: aeadctx); |
3686 | struct crypto_authenc_keys keys; |
3687 | int err; |
3688 | /* it contains auth and cipher key both*/ |
3689 | unsigned int subtype; |
3690 | int key_ctx_len = 0; |
3691 | unsigned char ck_size = 0; |
3692 | |
3693 | crypto_aead_clear_flags(tfm: aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK); |
3694 | crypto_aead_set_flags(tfm: aeadctx->sw_cipher, flags: crypto_aead_get_flags(tfm: authenc) |
3695 | & CRYPTO_TFM_REQ_MASK); |
3696 | err = crypto_aead_setkey(tfm: aeadctx->sw_cipher, key, keylen); |
3697 | if (err) |
3698 | goto out; |
3699 | |
3700 | if (crypto_authenc_extractkeys(keys: &keys, key, keylen) != 0) |
3701 | goto out; |
3702 | |
3703 | subtype = get_aead_subtype(aead: authenc); |
3704 | if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA || |
3705 | subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) { |
3706 | if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE) |
3707 | goto out; |
3708 | memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen |
3709 | - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE); |
3710 | keys.enckeylen -= CTR_RFC3686_NONCE_SIZE; |
3711 | } |
3712 | if (keys.enckeylen == AES_KEYSIZE_128) { |
3713 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128; |
3714 | } else if (keys.enckeylen == AES_KEYSIZE_192) { |
3715 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192; |
3716 | } else if (keys.enckeylen == AES_KEYSIZE_256) { |
3717 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; |
3718 | } else { |
3719 | pr_err("Unsupported cipher key %d\n" , keys.enckeylen); |
3720 | goto out; |
3721 | } |
3722 | memcpy(aeadctx->key, keys.enckey, keys.enckeylen); |
3723 | aeadctx->enckey_len = keys.enckeylen; |
3724 | if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA || |
3725 | subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) { |
3726 | get_aes_decrypt_key(dec_key: actx->dec_rrkey, key: aeadctx->key, |
3727 | keylength: aeadctx->enckey_len << 3); |
3728 | } |
3729 | key_ctx_len = sizeof(struct _key_ctx) + roundup(keys.enckeylen, 16); |
3730 | |
3731 | aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0, |
3732 | 0, key_ctx_len >> 4); |
3733 | actx->auth_mode = CHCR_SCMD_AUTH_MODE_NOP; |
3734 | memzero_explicit(s: &keys, count: sizeof(keys)); |
3735 | return 0; |
3736 | out: |
3737 | aeadctx->enckey_len = 0; |
3738 | memzero_explicit(s: &keys, count: sizeof(keys)); |
3739 | return -EINVAL; |
3740 | } |
3741 | |
3742 | static int chcr_aead_op(struct aead_request *req, |
3743 | int size, |
3744 | create_wr_t create_wr_fn) |
3745 | { |
3746 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
3747 | struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req); |
3748 | struct chcr_context *ctx = a_ctx(tfm); |
3749 | struct uld_ctx *u_ctx = ULD_CTX(ctx); |
3750 | struct sk_buff *skb; |
3751 | struct chcr_dev *cdev; |
3752 | |
3753 | cdev = a_ctx(tfm)->dev; |
3754 | if (!cdev) { |
3755 | pr_err("%s : No crypto device.\n" , __func__); |
3756 | return -ENXIO; |
3757 | } |
3758 | |
3759 | if (chcr_inc_wrcount(dev: cdev)) { |
3760 | /* Detach state for CHCR means lldi or padap is freed. |
3761 | * We cannot increment fallback here. |
3762 | */ |
3763 | return chcr_aead_fallback(req, op_type: reqctx->op); |
3764 | } |
3765 | |
3766 | if (cxgb4_is_crypto_q_full(dev: u_ctx->lldi.ports[0], |
3767 | idx: reqctx->txqidx) && |
3768 | (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) { |
3769 | chcr_dec_wrcount(dev: cdev); |
3770 | return -ENOSPC; |
3771 | } |
3772 | |
3773 | if (get_aead_subtype(aead: tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 && |
3774 | crypto_ipsec_check_assoclen(assoclen: req->assoclen) != 0) { |
3775 | pr_err("RFC4106: Invalid value of assoclen %d\n" , |
3776 | req->assoclen); |
3777 | return -EINVAL; |
3778 | } |
3779 | |
3780 | /* Form a WR from req */ |
3781 | skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx], size); |
3782 | |
3783 | if (IS_ERR_OR_NULL(ptr: skb)) { |
3784 | chcr_dec_wrcount(dev: cdev); |
3785 | return PTR_ERR_OR_ZERO(ptr: skb); |
3786 | } |
3787 | |
3788 | skb->dev = u_ctx->lldi.ports[0]; |
3789 | set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx); |
3790 | chcr_send_wr(skb); |
3791 | return -EINPROGRESS; |
3792 | } |
3793 | |
3794 | static int chcr_aead_encrypt(struct aead_request *req) |
3795 | { |
3796 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
3797 | struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req); |
3798 | struct chcr_context *ctx = a_ctx(tfm); |
3799 | unsigned int cpu; |
3800 | |
3801 | cpu = get_cpu(); |
3802 | reqctx->txqidx = cpu % ctx->ntxq; |
3803 | reqctx->rxqidx = cpu % ctx->nrxq; |
3804 | put_cpu(); |
3805 | |
3806 | reqctx->verify = VERIFY_HW; |
3807 | reqctx->op = CHCR_ENCRYPT_OP; |
3808 | |
3809 | switch (get_aead_subtype(aead: tfm)) { |
3810 | case CRYPTO_ALG_SUB_TYPE_CTR_SHA: |
3811 | case CRYPTO_ALG_SUB_TYPE_CBC_SHA: |
3812 | case CRYPTO_ALG_SUB_TYPE_CBC_NULL: |
3813 | case CRYPTO_ALG_SUB_TYPE_CTR_NULL: |
3814 | return chcr_aead_op(req, size: 0, create_wr_fn: create_authenc_wr); |
3815 | case CRYPTO_ALG_SUB_TYPE_AEAD_CCM: |
3816 | case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309: |
3817 | return chcr_aead_op(req, size: 0, create_wr_fn: create_aead_ccm_wr); |
3818 | default: |
3819 | return chcr_aead_op(req, size: 0, create_wr_fn: create_gcm_wr); |
3820 | } |
3821 | } |
3822 | |
3823 | static int chcr_aead_decrypt(struct aead_request *req) |
3824 | { |
3825 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
3826 | struct chcr_context *ctx = a_ctx(tfm); |
3827 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); |
3828 | struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req); |
3829 | int size; |
3830 | unsigned int cpu; |
3831 | |
3832 | cpu = get_cpu(); |
3833 | reqctx->txqidx = cpu % ctx->ntxq; |
3834 | reqctx->rxqidx = cpu % ctx->nrxq; |
3835 | put_cpu(); |
3836 | |
3837 | if (aeadctx->mayverify == VERIFY_SW) { |
3838 | size = crypto_aead_maxauthsize(aead: tfm); |
3839 | reqctx->verify = VERIFY_SW; |
3840 | } else { |
3841 | size = 0; |
3842 | reqctx->verify = VERIFY_HW; |
3843 | } |
3844 | reqctx->op = CHCR_DECRYPT_OP; |
3845 | switch (get_aead_subtype(aead: tfm)) { |
3846 | case CRYPTO_ALG_SUB_TYPE_CBC_SHA: |
3847 | case CRYPTO_ALG_SUB_TYPE_CTR_SHA: |
3848 | case CRYPTO_ALG_SUB_TYPE_CBC_NULL: |
3849 | case CRYPTO_ALG_SUB_TYPE_CTR_NULL: |
3850 | return chcr_aead_op(req, size, create_wr_fn: create_authenc_wr); |
3851 | case CRYPTO_ALG_SUB_TYPE_AEAD_CCM: |
3852 | case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309: |
3853 | return chcr_aead_op(req, size, create_wr_fn: create_aead_ccm_wr); |
3854 | default: |
3855 | return chcr_aead_op(req, size, create_wr_fn: create_gcm_wr); |
3856 | } |
3857 | } |
3858 | |
3859 | static struct chcr_alg_template driver_algs[] = { |
3860 | /* AES-CBC */ |
3861 | { |
3862 | .type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_CBC, |
3863 | .is_registered = 0, |
3864 | .alg.skcipher = { |
3865 | .base.cra_name = "cbc(aes)" , |
3866 | .base.cra_driver_name = "cbc-aes-chcr" , |
3867 | .base.cra_blocksize = AES_BLOCK_SIZE, |
3868 | |
3869 | .init = chcr_init_tfm, |
3870 | .exit = chcr_exit_tfm, |
3871 | .min_keysize = AES_MIN_KEY_SIZE, |
3872 | .max_keysize = AES_MAX_KEY_SIZE, |
3873 | .ivsize = AES_BLOCK_SIZE, |
3874 | .setkey = chcr_aes_cbc_setkey, |
3875 | .encrypt = chcr_aes_encrypt, |
3876 | .decrypt = chcr_aes_decrypt, |
3877 | } |
3878 | }, |
3879 | { |
3880 | .type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_XTS, |
3881 | .is_registered = 0, |
3882 | .alg.skcipher = { |
3883 | .base.cra_name = "xts(aes)" , |
3884 | .base.cra_driver_name = "xts-aes-chcr" , |
3885 | .base.cra_blocksize = AES_BLOCK_SIZE, |
3886 | |
3887 | .init = chcr_init_tfm, |
3888 | .exit = chcr_exit_tfm, |
3889 | .min_keysize = 2 * AES_MIN_KEY_SIZE, |
3890 | .max_keysize = 2 * AES_MAX_KEY_SIZE, |
3891 | .ivsize = AES_BLOCK_SIZE, |
3892 | .setkey = chcr_aes_xts_setkey, |
3893 | .encrypt = chcr_aes_encrypt, |
3894 | .decrypt = chcr_aes_decrypt, |
3895 | } |
3896 | }, |
3897 | { |
3898 | .type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_CTR, |
3899 | .is_registered = 0, |
3900 | .alg.skcipher = { |
3901 | .base.cra_name = "ctr(aes)" , |
3902 | .base.cra_driver_name = "ctr-aes-chcr" , |
3903 | .base.cra_blocksize = 1, |
3904 | |
3905 | .init = chcr_init_tfm, |
3906 | .exit = chcr_exit_tfm, |
3907 | .min_keysize = AES_MIN_KEY_SIZE, |
3908 | .max_keysize = AES_MAX_KEY_SIZE, |
3909 | .ivsize = AES_BLOCK_SIZE, |
3910 | .setkey = chcr_aes_ctr_setkey, |
3911 | .encrypt = chcr_aes_encrypt, |
3912 | .decrypt = chcr_aes_decrypt, |
3913 | } |
3914 | }, |
3915 | { |
3916 | .type = CRYPTO_ALG_TYPE_SKCIPHER | |
3917 | CRYPTO_ALG_SUB_TYPE_CTR_RFC3686, |
3918 | .is_registered = 0, |
3919 | .alg.skcipher = { |
3920 | .base.cra_name = "rfc3686(ctr(aes))" , |
3921 | .base.cra_driver_name = "rfc3686-ctr-aes-chcr" , |
3922 | .base.cra_blocksize = 1, |
3923 | |
3924 | .init = chcr_rfc3686_init, |
3925 | .exit = chcr_exit_tfm, |
3926 | .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, |
3927 | .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, |
3928 | .ivsize = CTR_RFC3686_IV_SIZE, |
3929 | .setkey = chcr_aes_rfc3686_setkey, |
3930 | .encrypt = chcr_aes_encrypt, |
3931 | .decrypt = chcr_aes_decrypt, |
3932 | } |
3933 | }, |
3934 | /* SHA */ |
3935 | { |
3936 | .type = CRYPTO_ALG_TYPE_AHASH, |
3937 | .is_registered = 0, |
3938 | .alg.hash = { |
3939 | .halg.digestsize = SHA1_DIGEST_SIZE, |
3940 | .halg.base = { |
3941 | .cra_name = "sha1" , |
3942 | .cra_driver_name = "sha1-chcr" , |
3943 | .cra_blocksize = SHA1_BLOCK_SIZE, |
3944 | } |
3945 | } |
3946 | }, |
3947 | { |
3948 | .type = CRYPTO_ALG_TYPE_AHASH, |
3949 | .is_registered = 0, |
3950 | .alg.hash = { |
3951 | .halg.digestsize = SHA256_DIGEST_SIZE, |
3952 | .halg.base = { |
3953 | .cra_name = "sha256" , |
3954 | .cra_driver_name = "sha256-chcr" , |
3955 | .cra_blocksize = SHA256_BLOCK_SIZE, |
3956 | } |
3957 | } |
3958 | }, |
3959 | { |
3960 | .type = CRYPTO_ALG_TYPE_AHASH, |
3961 | .is_registered = 0, |
3962 | .alg.hash = { |
3963 | .halg.digestsize = SHA224_DIGEST_SIZE, |
3964 | .halg.base = { |
3965 | .cra_name = "sha224" , |
3966 | .cra_driver_name = "sha224-chcr" , |
3967 | .cra_blocksize = SHA224_BLOCK_SIZE, |
3968 | } |
3969 | } |
3970 | }, |
3971 | { |
3972 | .type = CRYPTO_ALG_TYPE_AHASH, |
3973 | .is_registered = 0, |
3974 | .alg.hash = { |
3975 | .halg.digestsize = SHA384_DIGEST_SIZE, |
3976 | .halg.base = { |
3977 | .cra_name = "sha384" , |
3978 | .cra_driver_name = "sha384-chcr" , |
3979 | .cra_blocksize = SHA384_BLOCK_SIZE, |
3980 | } |
3981 | } |
3982 | }, |
3983 | { |
3984 | .type = CRYPTO_ALG_TYPE_AHASH, |
3985 | .is_registered = 0, |
3986 | .alg.hash = { |
3987 | .halg.digestsize = SHA512_DIGEST_SIZE, |
3988 | .halg.base = { |
3989 | .cra_name = "sha512" , |
3990 | .cra_driver_name = "sha512-chcr" , |
3991 | .cra_blocksize = SHA512_BLOCK_SIZE, |
3992 | } |
3993 | } |
3994 | }, |
3995 | /* HMAC */ |
3996 | { |
3997 | .type = CRYPTO_ALG_TYPE_HMAC, |
3998 | .is_registered = 0, |
3999 | .alg.hash = { |
4000 | .halg.digestsize = SHA1_DIGEST_SIZE, |
4001 | .halg.base = { |
4002 | .cra_name = "hmac(sha1)" , |
4003 | .cra_driver_name = "hmac-sha1-chcr" , |
4004 | .cra_blocksize = SHA1_BLOCK_SIZE, |
4005 | } |
4006 | } |
4007 | }, |
4008 | { |
4009 | .type = CRYPTO_ALG_TYPE_HMAC, |
4010 | .is_registered = 0, |
4011 | .alg.hash = { |
4012 | .halg.digestsize = SHA224_DIGEST_SIZE, |
4013 | .halg.base = { |
4014 | .cra_name = "hmac(sha224)" , |
4015 | .cra_driver_name = "hmac-sha224-chcr" , |
4016 | .cra_blocksize = SHA224_BLOCK_SIZE, |
4017 | } |
4018 | } |
4019 | }, |
4020 | { |
4021 | .type = CRYPTO_ALG_TYPE_HMAC, |
4022 | .is_registered = 0, |
4023 | .alg.hash = { |
4024 | .halg.digestsize = SHA256_DIGEST_SIZE, |
4025 | .halg.base = { |
4026 | .cra_name = "hmac(sha256)" , |
4027 | .cra_driver_name = "hmac-sha256-chcr" , |
4028 | .cra_blocksize = SHA256_BLOCK_SIZE, |
4029 | } |
4030 | } |
4031 | }, |
4032 | { |
4033 | .type = CRYPTO_ALG_TYPE_HMAC, |
4034 | .is_registered = 0, |
4035 | .alg.hash = { |
4036 | .halg.digestsize = SHA384_DIGEST_SIZE, |
4037 | .halg.base = { |
4038 | .cra_name = "hmac(sha384)" , |
4039 | .cra_driver_name = "hmac-sha384-chcr" , |
4040 | .cra_blocksize = SHA384_BLOCK_SIZE, |
4041 | } |
4042 | } |
4043 | }, |
4044 | { |
4045 | .type = CRYPTO_ALG_TYPE_HMAC, |
4046 | .is_registered = 0, |
4047 | .alg.hash = { |
4048 | .halg.digestsize = SHA512_DIGEST_SIZE, |
4049 | .halg.base = { |
4050 | .cra_name = "hmac(sha512)" , |
4051 | .cra_driver_name = "hmac-sha512-chcr" , |
4052 | .cra_blocksize = SHA512_BLOCK_SIZE, |
4053 | } |
4054 | } |
4055 | }, |
4056 | /* Add AEAD Algorithms */ |
4057 | { |
4058 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_GCM, |
4059 | .is_registered = 0, |
4060 | .alg.aead = { |
4061 | .base = { |
4062 | .cra_name = "gcm(aes)" , |
4063 | .cra_driver_name = "gcm-aes-chcr" , |
4064 | .cra_blocksize = 1, |
4065 | .cra_priority = CHCR_AEAD_PRIORITY, |
4066 | .cra_ctxsize = sizeof(struct chcr_context) + |
4067 | sizeof(struct chcr_aead_ctx) + |
4068 | sizeof(struct chcr_gcm_ctx), |
4069 | }, |
4070 | .ivsize = GCM_AES_IV_SIZE, |
4071 | .maxauthsize = GHASH_DIGEST_SIZE, |
4072 | .setkey = chcr_gcm_setkey, |
4073 | .setauthsize = chcr_gcm_setauthsize, |
4074 | } |
4075 | }, |
4076 | { |
4077 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106, |
4078 | .is_registered = 0, |
4079 | .alg.aead = { |
4080 | .base = { |
4081 | .cra_name = "rfc4106(gcm(aes))" , |
4082 | .cra_driver_name = "rfc4106-gcm-aes-chcr" , |
4083 | .cra_blocksize = 1, |
4084 | .cra_priority = CHCR_AEAD_PRIORITY + 1, |
4085 | .cra_ctxsize = sizeof(struct chcr_context) + |
4086 | sizeof(struct chcr_aead_ctx) + |
4087 | sizeof(struct chcr_gcm_ctx), |
4088 | |
4089 | }, |
4090 | .ivsize = GCM_RFC4106_IV_SIZE, |
4091 | .maxauthsize = GHASH_DIGEST_SIZE, |
4092 | .setkey = chcr_gcm_setkey, |
4093 | .setauthsize = chcr_4106_4309_setauthsize, |
4094 | } |
4095 | }, |
4096 | { |
4097 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_CCM, |
4098 | .is_registered = 0, |
4099 | .alg.aead = { |
4100 | .base = { |
4101 | .cra_name = "ccm(aes)" , |
4102 | .cra_driver_name = "ccm-aes-chcr" , |
4103 | .cra_blocksize = 1, |
4104 | .cra_priority = CHCR_AEAD_PRIORITY, |
4105 | .cra_ctxsize = sizeof(struct chcr_context) + |
4106 | sizeof(struct chcr_aead_ctx), |
4107 | |
4108 | }, |
4109 | .ivsize = AES_BLOCK_SIZE, |
4110 | .maxauthsize = GHASH_DIGEST_SIZE, |
4111 | .setkey = chcr_aead_ccm_setkey, |
4112 | .setauthsize = chcr_ccm_setauthsize, |
4113 | } |
4114 | }, |
4115 | { |
4116 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309, |
4117 | .is_registered = 0, |
4118 | .alg.aead = { |
4119 | .base = { |
4120 | .cra_name = "rfc4309(ccm(aes))" , |
4121 | .cra_driver_name = "rfc4309-ccm-aes-chcr" , |
4122 | .cra_blocksize = 1, |
4123 | .cra_priority = CHCR_AEAD_PRIORITY + 1, |
4124 | .cra_ctxsize = sizeof(struct chcr_context) + |
4125 | sizeof(struct chcr_aead_ctx), |
4126 | |
4127 | }, |
4128 | .ivsize = 8, |
4129 | .maxauthsize = GHASH_DIGEST_SIZE, |
4130 | .setkey = chcr_aead_rfc4309_setkey, |
4131 | .setauthsize = chcr_4106_4309_setauthsize, |
4132 | } |
4133 | }, |
4134 | { |
4135 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA, |
4136 | .is_registered = 0, |
4137 | .alg.aead = { |
4138 | .base = { |
4139 | .cra_name = "authenc(hmac(sha1),cbc(aes))" , |
4140 | .cra_driver_name = |
4141 | "authenc-hmac-sha1-cbc-aes-chcr" , |
4142 | .cra_blocksize = AES_BLOCK_SIZE, |
4143 | .cra_priority = CHCR_AEAD_PRIORITY, |
4144 | .cra_ctxsize = sizeof(struct chcr_context) + |
4145 | sizeof(struct chcr_aead_ctx) + |
4146 | sizeof(struct chcr_authenc_ctx), |
4147 | |
4148 | }, |
4149 | .ivsize = AES_BLOCK_SIZE, |
4150 | .maxauthsize = SHA1_DIGEST_SIZE, |
4151 | .setkey = chcr_authenc_setkey, |
4152 | .setauthsize = chcr_authenc_setauthsize, |
4153 | } |
4154 | }, |
4155 | { |
4156 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA, |
4157 | .is_registered = 0, |
4158 | .alg.aead = { |
4159 | .base = { |
4160 | |
4161 | .cra_name = "authenc(hmac(sha256),cbc(aes))" , |
4162 | .cra_driver_name = |
4163 | "authenc-hmac-sha256-cbc-aes-chcr" , |
4164 | .cra_blocksize = AES_BLOCK_SIZE, |
4165 | .cra_priority = CHCR_AEAD_PRIORITY, |
4166 | .cra_ctxsize = sizeof(struct chcr_context) + |
4167 | sizeof(struct chcr_aead_ctx) + |
4168 | sizeof(struct chcr_authenc_ctx), |
4169 | |
4170 | }, |
4171 | .ivsize = AES_BLOCK_SIZE, |
4172 | .maxauthsize = SHA256_DIGEST_SIZE, |
4173 | .setkey = chcr_authenc_setkey, |
4174 | .setauthsize = chcr_authenc_setauthsize, |
4175 | } |
4176 | }, |
4177 | { |
4178 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA, |
4179 | .is_registered = 0, |
4180 | .alg.aead = { |
4181 | .base = { |
4182 | .cra_name = "authenc(hmac(sha224),cbc(aes))" , |
4183 | .cra_driver_name = |
4184 | "authenc-hmac-sha224-cbc-aes-chcr" , |
4185 | .cra_blocksize = AES_BLOCK_SIZE, |
4186 | .cra_priority = CHCR_AEAD_PRIORITY, |
4187 | .cra_ctxsize = sizeof(struct chcr_context) + |
4188 | sizeof(struct chcr_aead_ctx) + |
4189 | sizeof(struct chcr_authenc_ctx), |
4190 | }, |
4191 | .ivsize = AES_BLOCK_SIZE, |
4192 | .maxauthsize = SHA224_DIGEST_SIZE, |
4193 | .setkey = chcr_authenc_setkey, |
4194 | .setauthsize = chcr_authenc_setauthsize, |
4195 | } |
4196 | }, |
4197 | { |
4198 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA, |
4199 | .is_registered = 0, |
4200 | .alg.aead = { |
4201 | .base = { |
4202 | .cra_name = "authenc(hmac(sha384),cbc(aes))" , |
4203 | .cra_driver_name = |
4204 | "authenc-hmac-sha384-cbc-aes-chcr" , |
4205 | .cra_blocksize = AES_BLOCK_SIZE, |
4206 | .cra_priority = CHCR_AEAD_PRIORITY, |
4207 | .cra_ctxsize = sizeof(struct chcr_context) + |
4208 | sizeof(struct chcr_aead_ctx) + |
4209 | sizeof(struct chcr_authenc_ctx), |
4210 | |
4211 | }, |
4212 | .ivsize = AES_BLOCK_SIZE, |
4213 | .maxauthsize = SHA384_DIGEST_SIZE, |
4214 | .setkey = chcr_authenc_setkey, |
4215 | .setauthsize = chcr_authenc_setauthsize, |
4216 | } |
4217 | }, |
4218 | { |
4219 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA, |
4220 | .is_registered = 0, |
4221 | .alg.aead = { |
4222 | .base = { |
4223 | .cra_name = "authenc(hmac(sha512),cbc(aes))" , |
4224 | .cra_driver_name = |
4225 | "authenc-hmac-sha512-cbc-aes-chcr" , |
4226 | .cra_blocksize = AES_BLOCK_SIZE, |
4227 | .cra_priority = CHCR_AEAD_PRIORITY, |
4228 | .cra_ctxsize = sizeof(struct chcr_context) + |
4229 | sizeof(struct chcr_aead_ctx) + |
4230 | sizeof(struct chcr_authenc_ctx), |
4231 | |
4232 | }, |
4233 | .ivsize = AES_BLOCK_SIZE, |
4234 | .maxauthsize = SHA512_DIGEST_SIZE, |
4235 | .setkey = chcr_authenc_setkey, |
4236 | .setauthsize = chcr_authenc_setauthsize, |
4237 | } |
4238 | }, |
4239 | { |
4240 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_NULL, |
4241 | .is_registered = 0, |
4242 | .alg.aead = { |
4243 | .base = { |
4244 | .cra_name = "authenc(digest_null,cbc(aes))" , |
4245 | .cra_driver_name = |
4246 | "authenc-digest_null-cbc-aes-chcr" , |
4247 | .cra_blocksize = AES_BLOCK_SIZE, |
4248 | .cra_priority = CHCR_AEAD_PRIORITY, |
4249 | .cra_ctxsize = sizeof(struct chcr_context) + |
4250 | sizeof(struct chcr_aead_ctx) + |
4251 | sizeof(struct chcr_authenc_ctx), |
4252 | |
4253 | }, |
4254 | .ivsize = AES_BLOCK_SIZE, |
4255 | .maxauthsize = 0, |
4256 | .setkey = chcr_aead_digest_null_setkey, |
4257 | .setauthsize = chcr_authenc_null_setauthsize, |
4258 | } |
4259 | }, |
4260 | { |
4261 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA, |
4262 | .is_registered = 0, |
4263 | .alg.aead = { |
4264 | .base = { |
4265 | .cra_name = "authenc(hmac(sha1),rfc3686(ctr(aes)))" , |
4266 | .cra_driver_name = |
4267 | "authenc-hmac-sha1-rfc3686-ctr-aes-chcr" , |
4268 | .cra_blocksize = 1, |
4269 | .cra_priority = CHCR_AEAD_PRIORITY, |
4270 | .cra_ctxsize = sizeof(struct chcr_context) + |
4271 | sizeof(struct chcr_aead_ctx) + |
4272 | sizeof(struct chcr_authenc_ctx), |
4273 | |
4274 | }, |
4275 | .ivsize = CTR_RFC3686_IV_SIZE, |
4276 | .maxauthsize = SHA1_DIGEST_SIZE, |
4277 | .setkey = chcr_authenc_setkey, |
4278 | .setauthsize = chcr_authenc_setauthsize, |
4279 | } |
4280 | }, |
4281 | { |
4282 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA, |
4283 | .is_registered = 0, |
4284 | .alg.aead = { |
4285 | .base = { |
4286 | |
4287 | .cra_name = "authenc(hmac(sha256),rfc3686(ctr(aes)))" , |
4288 | .cra_driver_name = |
4289 | "authenc-hmac-sha256-rfc3686-ctr-aes-chcr" , |
4290 | .cra_blocksize = 1, |
4291 | .cra_priority = CHCR_AEAD_PRIORITY, |
4292 | .cra_ctxsize = sizeof(struct chcr_context) + |
4293 | sizeof(struct chcr_aead_ctx) + |
4294 | sizeof(struct chcr_authenc_ctx), |
4295 | |
4296 | }, |
4297 | .ivsize = CTR_RFC3686_IV_SIZE, |
4298 | .maxauthsize = SHA256_DIGEST_SIZE, |
4299 | .setkey = chcr_authenc_setkey, |
4300 | .setauthsize = chcr_authenc_setauthsize, |
4301 | } |
4302 | }, |
4303 | { |
4304 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA, |
4305 | .is_registered = 0, |
4306 | .alg.aead = { |
4307 | .base = { |
4308 | .cra_name = "authenc(hmac(sha224),rfc3686(ctr(aes)))" , |
4309 | .cra_driver_name = |
4310 | "authenc-hmac-sha224-rfc3686-ctr-aes-chcr" , |
4311 | .cra_blocksize = 1, |
4312 | .cra_priority = CHCR_AEAD_PRIORITY, |
4313 | .cra_ctxsize = sizeof(struct chcr_context) + |
4314 | sizeof(struct chcr_aead_ctx) + |
4315 | sizeof(struct chcr_authenc_ctx), |
4316 | }, |
4317 | .ivsize = CTR_RFC3686_IV_SIZE, |
4318 | .maxauthsize = SHA224_DIGEST_SIZE, |
4319 | .setkey = chcr_authenc_setkey, |
4320 | .setauthsize = chcr_authenc_setauthsize, |
4321 | } |
4322 | }, |
4323 | { |
4324 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA, |
4325 | .is_registered = 0, |
4326 | .alg.aead = { |
4327 | .base = { |
4328 | .cra_name = "authenc(hmac(sha384),rfc3686(ctr(aes)))" , |
4329 | .cra_driver_name = |
4330 | "authenc-hmac-sha384-rfc3686-ctr-aes-chcr" , |
4331 | .cra_blocksize = 1, |
4332 | .cra_priority = CHCR_AEAD_PRIORITY, |
4333 | .cra_ctxsize = sizeof(struct chcr_context) + |
4334 | sizeof(struct chcr_aead_ctx) + |
4335 | sizeof(struct chcr_authenc_ctx), |
4336 | |
4337 | }, |
4338 | .ivsize = CTR_RFC3686_IV_SIZE, |
4339 | .maxauthsize = SHA384_DIGEST_SIZE, |
4340 | .setkey = chcr_authenc_setkey, |
4341 | .setauthsize = chcr_authenc_setauthsize, |
4342 | } |
4343 | }, |
4344 | { |
4345 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA, |
4346 | .is_registered = 0, |
4347 | .alg.aead = { |
4348 | .base = { |
4349 | .cra_name = "authenc(hmac(sha512),rfc3686(ctr(aes)))" , |
4350 | .cra_driver_name = |
4351 | "authenc-hmac-sha512-rfc3686-ctr-aes-chcr" , |
4352 | .cra_blocksize = 1, |
4353 | .cra_priority = CHCR_AEAD_PRIORITY, |
4354 | .cra_ctxsize = sizeof(struct chcr_context) + |
4355 | sizeof(struct chcr_aead_ctx) + |
4356 | sizeof(struct chcr_authenc_ctx), |
4357 | |
4358 | }, |
4359 | .ivsize = CTR_RFC3686_IV_SIZE, |
4360 | .maxauthsize = SHA512_DIGEST_SIZE, |
4361 | .setkey = chcr_authenc_setkey, |
4362 | .setauthsize = chcr_authenc_setauthsize, |
4363 | } |
4364 | }, |
4365 | { |
4366 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_NULL, |
4367 | .is_registered = 0, |
4368 | .alg.aead = { |
4369 | .base = { |
4370 | .cra_name = "authenc(digest_null,rfc3686(ctr(aes)))" , |
4371 | .cra_driver_name = |
4372 | "authenc-digest_null-rfc3686-ctr-aes-chcr" , |
4373 | .cra_blocksize = 1, |
4374 | .cra_priority = CHCR_AEAD_PRIORITY, |
4375 | .cra_ctxsize = sizeof(struct chcr_context) + |
4376 | sizeof(struct chcr_aead_ctx) + |
4377 | sizeof(struct chcr_authenc_ctx), |
4378 | |
4379 | }, |
4380 | .ivsize = CTR_RFC3686_IV_SIZE, |
4381 | .maxauthsize = 0, |
4382 | .setkey = chcr_aead_digest_null_setkey, |
4383 | .setauthsize = chcr_authenc_null_setauthsize, |
4384 | } |
4385 | }, |
4386 | }; |
4387 | |
4388 | /* |
4389 | * chcr_unregister_alg - Deregister crypto algorithms with |
4390 | * kernel framework. |
4391 | */ |
4392 | static int chcr_unregister_alg(void) |
4393 | { |
4394 | int i; |
4395 | |
4396 | for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { |
4397 | switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) { |
4398 | case CRYPTO_ALG_TYPE_SKCIPHER: |
4399 | if (driver_algs[i].is_registered && refcount_read( |
4400 | r: &driver_algs[i].alg.skcipher.base.cra_refcnt) |
4401 | == 1) { |
4402 | crypto_unregister_skcipher( |
4403 | alg: &driver_algs[i].alg.skcipher); |
4404 | driver_algs[i].is_registered = 0; |
4405 | } |
4406 | break; |
4407 | case CRYPTO_ALG_TYPE_AEAD: |
4408 | if (driver_algs[i].is_registered && refcount_read( |
4409 | r: &driver_algs[i].alg.aead.base.cra_refcnt) == 1) { |
4410 | crypto_unregister_aead( |
4411 | alg: &driver_algs[i].alg.aead); |
4412 | driver_algs[i].is_registered = 0; |
4413 | } |
4414 | break; |
4415 | case CRYPTO_ALG_TYPE_AHASH: |
4416 | if (driver_algs[i].is_registered && refcount_read( |
4417 | r: &driver_algs[i].alg.hash.halg.base.cra_refcnt) |
4418 | == 1) { |
4419 | crypto_unregister_ahash( |
4420 | alg: &driver_algs[i].alg.hash); |
4421 | driver_algs[i].is_registered = 0; |
4422 | } |
4423 | break; |
4424 | } |
4425 | } |
4426 | return 0; |
4427 | } |
4428 | |
4429 | #define SZ_AHASH_CTX sizeof(struct chcr_context) |
4430 | #define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx)) |
4431 | #define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx) |
4432 | |
4433 | /* |
4434 | * chcr_register_alg - Register crypto algorithms with kernel framework. |
4435 | */ |
4436 | static int chcr_register_alg(void) |
4437 | { |
4438 | struct crypto_alg ai; |
4439 | struct ahash_alg *a_hash; |
4440 | int err = 0, i; |
4441 | char *name = NULL; |
4442 | |
4443 | for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { |
4444 | if (driver_algs[i].is_registered) |
4445 | continue; |
4446 | switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) { |
4447 | case CRYPTO_ALG_TYPE_SKCIPHER: |
4448 | driver_algs[i].alg.skcipher.base.cra_priority = |
4449 | CHCR_CRA_PRIORITY; |
4450 | driver_algs[i].alg.skcipher.base.cra_module = THIS_MODULE; |
4451 | driver_algs[i].alg.skcipher.base.cra_flags = |
4452 | CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC | |
4453 | CRYPTO_ALG_ALLOCATES_MEMORY | |
4454 | CRYPTO_ALG_NEED_FALLBACK; |
4455 | driver_algs[i].alg.skcipher.base.cra_ctxsize = |
4456 | sizeof(struct chcr_context) + |
4457 | sizeof(struct ablk_ctx); |
4458 | driver_algs[i].alg.skcipher.base.cra_alignmask = 0; |
4459 | |
4460 | err = crypto_register_skcipher(alg: &driver_algs[i].alg.skcipher); |
4461 | name = driver_algs[i].alg.skcipher.base.cra_driver_name; |
4462 | break; |
4463 | case CRYPTO_ALG_TYPE_AEAD: |
4464 | driver_algs[i].alg.aead.base.cra_flags = |
4465 | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK | |
4466 | CRYPTO_ALG_ALLOCATES_MEMORY; |
4467 | driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt; |
4468 | driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt; |
4469 | driver_algs[i].alg.aead.init = chcr_aead_cra_init; |
4470 | driver_algs[i].alg.aead.exit = chcr_aead_cra_exit; |
4471 | driver_algs[i].alg.aead.base.cra_module = THIS_MODULE; |
4472 | err = crypto_register_aead(alg: &driver_algs[i].alg.aead); |
4473 | name = driver_algs[i].alg.aead.base.cra_driver_name; |
4474 | break; |
4475 | case CRYPTO_ALG_TYPE_AHASH: |
4476 | a_hash = &driver_algs[i].alg.hash; |
4477 | a_hash->update = chcr_ahash_update; |
4478 | a_hash->final = chcr_ahash_final; |
4479 | a_hash->finup = chcr_ahash_finup; |
4480 | a_hash->digest = chcr_ahash_digest; |
4481 | a_hash->export = chcr_ahash_export; |
4482 | a_hash->import = chcr_ahash_import; |
4483 | a_hash->halg.statesize = SZ_AHASH_REQ_CTX; |
4484 | a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY; |
4485 | a_hash->halg.base.cra_module = THIS_MODULE; |
4486 | a_hash->halg.base.cra_flags = |
4487 | CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY; |
4488 | a_hash->halg.base.cra_alignmask = 0; |
4489 | a_hash->halg.base.cra_exit = NULL; |
4490 | |
4491 | if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) { |
4492 | a_hash->halg.base.cra_init = chcr_hmac_cra_init; |
4493 | a_hash->halg.base.cra_exit = chcr_hmac_cra_exit; |
4494 | a_hash->init = chcr_hmac_init; |
4495 | a_hash->setkey = chcr_ahash_setkey; |
4496 | a_hash->halg.base.cra_ctxsize = SZ_AHASH_H_CTX; |
4497 | } else { |
4498 | a_hash->init = chcr_sha_init; |
4499 | a_hash->halg.base.cra_ctxsize = SZ_AHASH_CTX; |
4500 | a_hash->halg.base.cra_init = chcr_sha_cra_init; |
4501 | } |
4502 | err = crypto_register_ahash(alg: &driver_algs[i].alg.hash); |
4503 | ai = driver_algs[i].alg.hash.halg.base; |
4504 | name = ai.cra_driver_name; |
4505 | break; |
4506 | } |
4507 | if (err) { |
4508 | pr_err("%s : Algorithm registration failed\n" , name); |
4509 | goto register_err; |
4510 | } else { |
4511 | driver_algs[i].is_registered = 1; |
4512 | } |
4513 | } |
4514 | return 0; |
4515 | |
4516 | register_err: |
4517 | chcr_unregister_alg(); |
4518 | return err; |
4519 | } |
4520 | |
4521 | /* |
4522 | * start_crypto - Register the crypto algorithms. |
4523 | * This should called once when the first device comesup. After this |
4524 | * kernel will start calling driver APIs for crypto operations. |
4525 | */ |
4526 | int start_crypto(void) |
4527 | { |
4528 | return chcr_register_alg(); |
4529 | } |
4530 | |
4531 | /* |
4532 | * stop_crypto - Deregister all the crypto algorithms with kernel. |
4533 | * This should be called once when the last device goes down. After this |
4534 | * kernel will not call the driver API for crypto operations. |
4535 | */ |
4536 | int stop_crypto(void) |
4537 | { |
4538 | chcr_unregister_alg(); |
4539 | return 0; |
4540 | } |
4541 | |