1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */ |
3 | |
4 | #include <crypto/internal/aead.h> |
5 | #include <crypto/authenc.h> |
6 | #include <crypto/scatterwalk.h> |
7 | #include <linux/dmapool.h> |
8 | #include <linux/dma-mapping.h> |
9 | |
10 | #include "cc_buffer_mgr.h" |
11 | #include "cc_lli_defs.h" |
12 | #include "cc_cipher.h" |
13 | #include "cc_hash.h" |
14 | #include "cc_aead.h" |
15 | |
16 | union buffer_array_entry { |
17 | struct scatterlist *sgl; |
18 | dma_addr_t buffer_dma; |
19 | }; |
20 | |
21 | struct buffer_array { |
22 | unsigned int num_of_buffers; |
23 | union buffer_array_entry entry[MAX_NUM_OF_BUFFERS_IN_MLLI]; |
24 | unsigned int offset[MAX_NUM_OF_BUFFERS_IN_MLLI]; |
25 | int nents[MAX_NUM_OF_BUFFERS_IN_MLLI]; |
26 | int total_data_len[MAX_NUM_OF_BUFFERS_IN_MLLI]; |
27 | bool is_last[MAX_NUM_OF_BUFFERS_IN_MLLI]; |
28 | u32 *mlli_nents[MAX_NUM_OF_BUFFERS_IN_MLLI]; |
29 | }; |
30 | |
31 | static inline char *cc_dma_buf_type(enum cc_req_dma_buf_type type) |
32 | { |
33 | switch (type) { |
34 | case CC_DMA_BUF_NULL: |
35 | return "BUF_NULL" ; |
36 | case CC_DMA_BUF_DLLI: |
37 | return "BUF_DLLI" ; |
38 | case CC_DMA_BUF_MLLI: |
39 | return "BUF_MLLI" ; |
40 | default: |
41 | return "BUF_INVALID" ; |
42 | } |
43 | } |
44 | |
45 | /** |
46 | * cc_copy_mac() - Copy MAC to temporary location |
47 | * |
48 | * @dev: device object |
49 | * @req: aead request object |
50 | * @dir: [IN] copy from/to sgl |
51 | */ |
52 | static void cc_copy_mac(struct device *dev, struct aead_request *req, |
53 | enum cc_sg_cpy_direct dir) |
54 | { |
55 | struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req); |
56 | u32 skip = req->assoclen + req->cryptlen; |
57 | |
58 | cc_copy_sg_portion(dev, dest: areq_ctx->backup_mac, sg: req->src, |
59 | to_skip: (skip - areq_ctx->req_authsize), end: skip, direct: dir); |
60 | } |
61 | |
62 | /** |
63 | * cc_get_sgl_nents() - Get scatterlist number of entries. |
64 | * |
65 | * @dev: Device object |
66 | * @sg_list: SG list |
67 | * @nbytes: [IN] Total SGL data bytes. |
68 | * @lbytes: [OUT] Returns the amount of bytes at the last entry |
69 | * |
70 | * Return: |
71 | * Number of entries in the scatterlist |
72 | */ |
73 | static unsigned int cc_get_sgl_nents(struct device *dev, |
74 | struct scatterlist *sg_list, |
75 | unsigned int nbytes, u32 *lbytes) |
76 | { |
77 | unsigned int nents = 0; |
78 | |
79 | *lbytes = 0; |
80 | |
81 | while (nbytes && sg_list) { |
82 | nents++; |
83 | /* get the number of bytes in the last entry */ |
84 | *lbytes = nbytes; |
85 | nbytes -= (sg_list->length > nbytes) ? |
86 | nbytes : sg_list->length; |
87 | sg_list = sg_next(sg_list); |
88 | } |
89 | |
90 | dev_dbg(dev, "nents %d last bytes %d\n" , nents, *lbytes); |
91 | return nents; |
92 | } |
93 | |
94 | /** |
95 | * cc_copy_sg_portion() - Copy scatter list data, |
96 | * from to_skip to end, to dest and vice versa |
97 | * |
98 | * @dev: Device object |
99 | * @dest: Buffer to copy to/from |
100 | * @sg: SG list |
101 | * @to_skip: Number of bytes to skip before copying |
102 | * @end: Offset of last byte to copy |
103 | * @direct: Transfer direction (true == from SG list to buffer, false == from |
104 | * buffer to SG list) |
105 | */ |
106 | void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg, |
107 | u32 to_skip, u32 end, enum cc_sg_cpy_direct direct) |
108 | { |
109 | u32 nents; |
110 | |
111 | nents = sg_nents_for_len(sg, len: end); |
112 | sg_copy_buffer(sgl: sg, nents, buf: dest, buflen: (end - to_skip + 1), skip: to_skip, |
113 | to_buffer: (direct == CC_SG_TO_BUF)); |
114 | } |
115 | |
116 | static int cc_render_buff_to_mlli(struct device *dev, dma_addr_t buff_dma, |
117 | u32 buff_size, u32 *curr_nents, |
118 | u32 **mlli_entry_pp) |
119 | { |
120 | u32 *mlli_entry_p = *mlli_entry_pp; |
121 | u32 new_nents; |
122 | |
123 | /* Verify there is no memory overflow*/ |
124 | new_nents = (*curr_nents + buff_size / CC_MAX_MLLI_ENTRY_SIZE + 1); |
125 | if (new_nents > MAX_NUM_OF_TOTAL_MLLI_ENTRIES) { |
126 | dev_err(dev, "Too many mlli entries. current %d max %d\n" , |
127 | new_nents, MAX_NUM_OF_TOTAL_MLLI_ENTRIES); |
128 | return -ENOMEM; |
129 | } |
130 | |
131 | /*handle buffer longer than 64 kbytes */ |
132 | while (buff_size > CC_MAX_MLLI_ENTRY_SIZE) { |
133 | cc_lli_set_addr(lli_p: mlli_entry_p, addr: buff_dma); |
134 | cc_lli_set_size(lli_p: mlli_entry_p, CC_MAX_MLLI_ENTRY_SIZE); |
135 | dev_dbg(dev, "entry[%d]: single_buff=0x%08X size=%08X\n" , |
136 | *curr_nents, mlli_entry_p[LLI_WORD0_OFFSET], |
137 | mlli_entry_p[LLI_WORD1_OFFSET]); |
138 | buff_dma += CC_MAX_MLLI_ENTRY_SIZE; |
139 | buff_size -= CC_MAX_MLLI_ENTRY_SIZE; |
140 | mlli_entry_p = mlli_entry_p + 2; |
141 | (*curr_nents)++; |
142 | } |
143 | /*Last entry */ |
144 | cc_lli_set_addr(lli_p: mlli_entry_p, addr: buff_dma); |
145 | cc_lli_set_size(lli_p: mlli_entry_p, size: buff_size); |
146 | dev_dbg(dev, "entry[%d]: single_buff=0x%08X size=%08X\n" , |
147 | *curr_nents, mlli_entry_p[LLI_WORD0_OFFSET], |
148 | mlli_entry_p[LLI_WORD1_OFFSET]); |
149 | mlli_entry_p = mlli_entry_p + 2; |
150 | *mlli_entry_pp = mlli_entry_p; |
151 | (*curr_nents)++; |
152 | return 0; |
153 | } |
154 | |
155 | static int cc_render_sg_to_mlli(struct device *dev, struct scatterlist *sgl, |
156 | u32 sgl_data_len, u32 sgl_offset, |
157 | u32 *curr_nents, u32 **mlli_entry_pp) |
158 | { |
159 | struct scatterlist *curr_sgl = sgl; |
160 | u32 *mlli_entry_p = *mlli_entry_pp; |
161 | s32 rc = 0; |
162 | |
163 | for ( ; (curr_sgl && sgl_data_len); |
164 | curr_sgl = sg_next(curr_sgl)) { |
165 | u32 entry_data_len = |
166 | (sgl_data_len > sg_dma_len(curr_sgl) - sgl_offset) ? |
167 | sg_dma_len(curr_sgl) - sgl_offset : |
168 | sgl_data_len; |
169 | sgl_data_len -= entry_data_len; |
170 | rc = cc_render_buff_to_mlli(dev, sg_dma_address(curr_sgl) + |
171 | sgl_offset, buff_size: entry_data_len, |
172 | curr_nents, mlli_entry_pp: &mlli_entry_p); |
173 | if (rc) |
174 | return rc; |
175 | |
176 | sgl_offset = 0; |
177 | } |
178 | *mlli_entry_pp = mlli_entry_p; |
179 | return 0; |
180 | } |
181 | |
182 | static int cc_generate_mlli(struct device *dev, struct buffer_array *sg_data, |
183 | struct mlli_params *mlli_params, gfp_t flags) |
184 | { |
185 | u32 *mlli_p; |
186 | u32 total_nents = 0, prev_total_nents = 0; |
187 | int rc = 0, i; |
188 | |
189 | dev_dbg(dev, "NUM of SG's = %d\n" , sg_data->num_of_buffers); |
190 | |
191 | /* Allocate memory from the pointed pool */ |
192 | mlli_params->mlli_virt_addr = |
193 | dma_pool_alloc(pool: mlli_params->curr_pool, mem_flags: flags, |
194 | handle: &mlli_params->mlli_dma_addr); |
195 | if (!mlli_params->mlli_virt_addr) { |
196 | dev_err(dev, "dma_pool_alloc() failed\n" ); |
197 | rc = -ENOMEM; |
198 | goto build_mlli_exit; |
199 | } |
200 | /* Point to start of MLLI */ |
201 | mlli_p = mlli_params->mlli_virt_addr; |
202 | /* go over all SG's and link it to one MLLI table */ |
203 | for (i = 0; i < sg_data->num_of_buffers; i++) { |
204 | union buffer_array_entry *entry = &sg_data->entry[i]; |
205 | u32 tot_len = sg_data->total_data_len[i]; |
206 | u32 offset = sg_data->offset[i]; |
207 | |
208 | rc = cc_render_sg_to_mlli(dev, sgl: entry->sgl, sgl_data_len: tot_len, sgl_offset: offset, |
209 | curr_nents: &total_nents, mlli_entry_pp: &mlli_p); |
210 | if (rc) |
211 | return rc; |
212 | |
213 | /* set last bit in the current table */ |
214 | if (sg_data->mlli_nents[i]) { |
215 | /*Calculate the current MLLI table length for the |
216 | *length field in the descriptor |
217 | */ |
218 | *sg_data->mlli_nents[i] += |
219 | (total_nents - prev_total_nents); |
220 | prev_total_nents = total_nents; |
221 | } |
222 | } |
223 | |
224 | /* Set MLLI size for the bypass operation */ |
225 | mlli_params->mlli_len = (total_nents * LLI_ENTRY_BYTE_SIZE); |
226 | |
227 | dev_dbg(dev, "MLLI params: virt_addr=%pK dma_addr=%pad mlli_len=0x%X\n" , |
228 | mlli_params->mlli_virt_addr, &mlli_params->mlli_dma_addr, |
229 | mlli_params->mlli_len); |
230 | |
231 | build_mlli_exit: |
232 | return rc; |
233 | } |
234 | |
235 | static void cc_add_sg_entry(struct device *dev, struct buffer_array *sgl_data, |
236 | unsigned int nents, struct scatterlist *sgl, |
237 | unsigned int data_len, unsigned int data_offset, |
238 | bool is_last_table, u32 *mlli_nents) |
239 | { |
240 | unsigned int index = sgl_data->num_of_buffers; |
241 | |
242 | dev_dbg(dev, "index=%u nents=%u sgl=%pK data_len=0x%08X is_last=%d\n" , |
243 | index, nents, sgl, data_len, is_last_table); |
244 | sgl_data->nents[index] = nents; |
245 | sgl_data->entry[index].sgl = sgl; |
246 | sgl_data->offset[index] = data_offset; |
247 | sgl_data->total_data_len[index] = data_len; |
248 | sgl_data->is_last[index] = is_last_table; |
249 | sgl_data->mlli_nents[index] = mlli_nents; |
250 | if (sgl_data->mlli_nents[index]) |
251 | *sgl_data->mlli_nents[index] = 0; |
252 | sgl_data->num_of_buffers++; |
253 | } |
254 | |
255 | static int cc_map_sg(struct device *dev, struct scatterlist *sg, |
256 | unsigned int nbytes, int direction, u32 *nents, |
257 | u32 max_sg_nents, u32 *lbytes, u32 *mapped_nents) |
258 | { |
259 | int ret = 0; |
260 | |
261 | if (!nbytes) { |
262 | *mapped_nents = 0; |
263 | *lbytes = 0; |
264 | *nents = 0; |
265 | return 0; |
266 | } |
267 | |
268 | *nents = cc_get_sgl_nents(dev, sg_list: sg, nbytes, lbytes); |
269 | if (*nents > max_sg_nents) { |
270 | *nents = 0; |
271 | dev_err(dev, "Too many fragments. current %d max %d\n" , |
272 | *nents, max_sg_nents); |
273 | return -ENOMEM; |
274 | } |
275 | |
276 | ret = dma_map_sg(dev, sg, *nents, direction); |
277 | if (!ret) { |
278 | *nents = 0; |
279 | dev_err(dev, "dma_map_sg() sg buffer failed %d\n" , ret); |
280 | return -ENOMEM; |
281 | } |
282 | |
283 | *mapped_nents = ret; |
284 | |
285 | return 0; |
286 | } |
287 | |
288 | static int |
289 | cc_set_aead_conf_buf(struct device *dev, struct aead_req_ctx *areq_ctx, |
290 | u8 *config_data, struct buffer_array *sg_data, |
291 | unsigned int assoclen) |
292 | { |
293 | dev_dbg(dev, " handle additional data config set to DLLI\n" ); |
294 | /* create sg for the current buffer */ |
295 | sg_init_one(&areq_ctx->ccm_adata_sg, config_data, |
296 | AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size); |
297 | if (dma_map_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE) != 1) { |
298 | dev_err(dev, "dma_map_sg() config buffer failed\n" ); |
299 | return -ENOMEM; |
300 | } |
301 | dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n" , |
302 | &sg_dma_address(&areq_ctx->ccm_adata_sg), |
303 | sg_page(&areq_ctx->ccm_adata_sg), |
304 | sg_virt(&areq_ctx->ccm_adata_sg), |
305 | areq_ctx->ccm_adata_sg.offset, areq_ctx->ccm_adata_sg.length); |
306 | /* prepare for case of MLLI */ |
307 | if (assoclen > 0) { |
308 | cc_add_sg_entry(dev, sgl_data: sg_data, nents: 1, sgl: &areq_ctx->ccm_adata_sg, |
309 | data_len: (AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size), |
310 | data_offset: 0, is_last_table: false, NULL); |
311 | } |
312 | return 0; |
313 | } |
314 | |
315 | static int cc_set_hash_buf(struct device *dev, struct ahash_req_ctx *areq_ctx, |
316 | u8 *curr_buff, u32 curr_buff_cnt, |
317 | struct buffer_array *sg_data) |
318 | { |
319 | dev_dbg(dev, " handle curr buff %x set to DLLI\n" , curr_buff_cnt); |
320 | /* create sg for the current buffer */ |
321 | sg_init_one(areq_ctx->buff_sg, curr_buff, curr_buff_cnt); |
322 | if (dma_map_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE) != 1) { |
323 | dev_err(dev, "dma_map_sg() src buffer failed\n" ); |
324 | return -ENOMEM; |
325 | } |
326 | dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n" , |
327 | &sg_dma_address(areq_ctx->buff_sg), sg_page(areq_ctx->buff_sg), |
328 | sg_virt(areq_ctx->buff_sg), areq_ctx->buff_sg->offset, |
329 | areq_ctx->buff_sg->length); |
330 | areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI; |
331 | areq_ctx->curr_sg = areq_ctx->buff_sg; |
332 | areq_ctx->in_nents = 0; |
333 | /* prepare for case of MLLI */ |
334 | cc_add_sg_entry(dev, sgl_data: sg_data, nents: 1, sgl: areq_ctx->buff_sg, data_len: curr_buff_cnt, data_offset: 0, |
335 | is_last_table: false, NULL); |
336 | return 0; |
337 | } |
338 | |
339 | void cc_unmap_cipher_request(struct device *dev, void *ctx, |
340 | unsigned int ivsize, struct scatterlist *src, |
341 | struct scatterlist *dst) |
342 | { |
343 | struct cipher_req_ctx *req_ctx = (struct cipher_req_ctx *)ctx; |
344 | |
345 | if (req_ctx->gen_ctx.iv_dma_addr) { |
346 | dev_dbg(dev, "Unmapped iv: iv_dma_addr=%pad iv_size=%u\n" , |
347 | &req_ctx->gen_ctx.iv_dma_addr, ivsize); |
348 | dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr, |
349 | ivsize, DMA_BIDIRECTIONAL); |
350 | } |
351 | /* Release pool */ |
352 | if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI && |
353 | req_ctx->mlli_params.mlli_virt_addr) { |
354 | dma_pool_free(pool: req_ctx->mlli_params.curr_pool, |
355 | vaddr: req_ctx->mlli_params.mlli_virt_addr, |
356 | addr: req_ctx->mlli_params.mlli_dma_addr); |
357 | } |
358 | |
359 | if (src != dst) { |
360 | dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_TO_DEVICE); |
361 | dma_unmap_sg(dev, dst, req_ctx->out_nents, DMA_FROM_DEVICE); |
362 | dev_dbg(dev, "Unmapped req->dst=%pK\n" , sg_virt(dst)); |
363 | dev_dbg(dev, "Unmapped req->src=%pK\n" , sg_virt(src)); |
364 | } else { |
365 | dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_BIDIRECTIONAL); |
366 | dev_dbg(dev, "Unmapped req->src=%pK\n" , sg_virt(src)); |
367 | } |
368 | } |
369 | |
370 | int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx, |
371 | unsigned int ivsize, unsigned int nbytes, |
372 | void *info, struct scatterlist *src, |
373 | struct scatterlist *dst, gfp_t flags) |
374 | { |
375 | struct cipher_req_ctx *req_ctx = (struct cipher_req_ctx *)ctx; |
376 | struct mlli_params *mlli_params = &req_ctx->mlli_params; |
377 | struct device *dev = drvdata_to_dev(drvdata); |
378 | struct buffer_array sg_data; |
379 | u32 dummy = 0; |
380 | int rc = 0; |
381 | u32 mapped_nents = 0; |
382 | int src_direction = (src != dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL); |
383 | |
384 | req_ctx->dma_buf_type = CC_DMA_BUF_DLLI; |
385 | mlli_params->curr_pool = NULL; |
386 | sg_data.num_of_buffers = 0; |
387 | |
388 | /* Map IV buffer */ |
389 | if (ivsize) { |
390 | dump_byte_array(name: "iv" , the_array: info, size: ivsize); |
391 | req_ctx->gen_ctx.iv_dma_addr = |
392 | dma_map_single(dev, info, ivsize, DMA_BIDIRECTIONAL); |
393 | if (dma_mapping_error(dev, dma_addr: req_ctx->gen_ctx.iv_dma_addr)) { |
394 | dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n" , |
395 | ivsize, info); |
396 | return -ENOMEM; |
397 | } |
398 | dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n" , |
399 | ivsize, info, &req_ctx->gen_ctx.iv_dma_addr); |
400 | } else { |
401 | req_ctx->gen_ctx.iv_dma_addr = 0; |
402 | } |
403 | |
404 | /* Map the src SGL */ |
405 | rc = cc_map_sg(dev, sg: src, nbytes, direction: src_direction, nents: &req_ctx->in_nents, |
406 | LLI_MAX_NUM_OF_DATA_ENTRIES, lbytes: &dummy, mapped_nents: &mapped_nents); |
407 | if (rc) |
408 | goto cipher_exit; |
409 | if (mapped_nents > 1) |
410 | req_ctx->dma_buf_type = CC_DMA_BUF_MLLI; |
411 | |
412 | if (src == dst) { |
413 | /* Handle inplace operation */ |
414 | if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) { |
415 | req_ctx->out_nents = 0; |
416 | cc_add_sg_entry(dev, sgl_data: &sg_data, nents: req_ctx->in_nents, sgl: src, |
417 | data_len: nbytes, data_offset: 0, is_last_table: true, |
418 | mlli_nents: &req_ctx->in_mlli_nents); |
419 | } |
420 | } else { |
421 | /* Map the dst sg */ |
422 | rc = cc_map_sg(dev, sg: dst, nbytes, direction: DMA_FROM_DEVICE, |
423 | nents: &req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES, |
424 | lbytes: &dummy, mapped_nents: &mapped_nents); |
425 | if (rc) |
426 | goto cipher_exit; |
427 | if (mapped_nents > 1) |
428 | req_ctx->dma_buf_type = CC_DMA_BUF_MLLI; |
429 | |
430 | if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) { |
431 | cc_add_sg_entry(dev, sgl_data: &sg_data, nents: req_ctx->in_nents, sgl: src, |
432 | data_len: nbytes, data_offset: 0, is_last_table: true, |
433 | mlli_nents: &req_ctx->in_mlli_nents); |
434 | cc_add_sg_entry(dev, sgl_data: &sg_data, nents: req_ctx->out_nents, sgl: dst, |
435 | data_len: nbytes, data_offset: 0, is_last_table: true, |
436 | mlli_nents: &req_ctx->out_mlli_nents); |
437 | } |
438 | } |
439 | |
440 | if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) { |
441 | mlli_params->curr_pool = drvdata->mlli_buffs_pool; |
442 | rc = cc_generate_mlli(dev, sg_data: &sg_data, mlli_params, flags); |
443 | if (rc) |
444 | goto cipher_exit; |
445 | } |
446 | |
447 | dev_dbg(dev, "areq_ctx->dma_buf_type = %s\n" , |
448 | cc_dma_buf_type(req_ctx->dma_buf_type)); |
449 | |
450 | return 0; |
451 | |
452 | cipher_exit: |
453 | cc_unmap_cipher_request(dev, ctx: req_ctx, ivsize, src, dst); |
454 | return rc; |
455 | } |
456 | |
457 | void cc_unmap_aead_request(struct device *dev, struct aead_request *req) |
458 | { |
459 | struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req); |
460 | unsigned int hw_iv_size = areq_ctx->hw_iv_size; |
461 | struct cc_drvdata *drvdata = dev_get_drvdata(dev); |
462 | int src_direction = (req->src != req->dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL); |
463 | |
464 | if (areq_ctx->mac_buf_dma_addr) { |
465 | dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr, |
466 | MAX_MAC_SIZE, DMA_BIDIRECTIONAL); |
467 | } |
468 | |
469 | if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) { |
470 | if (areq_ctx->hkey_dma_addr) { |
471 | dma_unmap_single(dev, areq_ctx->hkey_dma_addr, |
472 | AES_BLOCK_SIZE, DMA_BIDIRECTIONAL); |
473 | } |
474 | |
475 | if (areq_ctx->gcm_block_len_dma_addr) { |
476 | dma_unmap_single(dev, areq_ctx->gcm_block_len_dma_addr, |
477 | AES_BLOCK_SIZE, DMA_TO_DEVICE); |
478 | } |
479 | |
480 | if (areq_ctx->gcm_iv_inc1_dma_addr) { |
481 | dma_unmap_single(dev, areq_ctx->gcm_iv_inc1_dma_addr, |
482 | AES_BLOCK_SIZE, DMA_TO_DEVICE); |
483 | } |
484 | |
485 | if (areq_ctx->gcm_iv_inc2_dma_addr) { |
486 | dma_unmap_single(dev, areq_ctx->gcm_iv_inc2_dma_addr, |
487 | AES_BLOCK_SIZE, DMA_TO_DEVICE); |
488 | } |
489 | } |
490 | |
491 | if (areq_ctx->ccm_hdr_size != ccm_header_size_null) { |
492 | if (areq_ctx->ccm_iv0_dma_addr) { |
493 | dma_unmap_single(dev, areq_ctx->ccm_iv0_dma_addr, |
494 | AES_BLOCK_SIZE, DMA_TO_DEVICE); |
495 | } |
496 | |
497 | dma_unmap_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE); |
498 | } |
499 | if (areq_ctx->gen_ctx.iv_dma_addr) { |
500 | dma_unmap_single(dev, areq_ctx->gen_ctx.iv_dma_addr, |
501 | hw_iv_size, DMA_BIDIRECTIONAL); |
502 | kfree_sensitive(objp: areq_ctx->gen_ctx.iv); |
503 | } |
504 | |
505 | /* Release pool */ |
506 | if ((areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI || |
507 | areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) && |
508 | (areq_ctx->mlli_params.mlli_virt_addr)) { |
509 | dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n" , |
510 | &areq_ctx->mlli_params.mlli_dma_addr, |
511 | areq_ctx->mlli_params.mlli_virt_addr); |
512 | dma_pool_free(pool: areq_ctx->mlli_params.curr_pool, |
513 | vaddr: areq_ctx->mlli_params.mlli_virt_addr, |
514 | addr: areq_ctx->mlli_params.mlli_dma_addr); |
515 | } |
516 | |
517 | dev_dbg(dev, "Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n" , |
518 | sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents, |
519 | areq_ctx->assoclen, req->cryptlen); |
520 | |
521 | dma_unmap_sg(dev, req->src, areq_ctx->src.mapped_nents, src_direction); |
522 | if (req->src != req->dst) { |
523 | dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n" , |
524 | sg_virt(req->dst)); |
525 | dma_unmap_sg(dev, req->dst, areq_ctx->dst.mapped_nents, DMA_FROM_DEVICE); |
526 | } |
527 | if (drvdata->coherent && |
528 | areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT && |
529 | req->src == req->dst) { |
530 | /* copy back mac from temporary location to deal with possible |
531 | * data memory overriding that caused by cache coherence |
532 | * problem. |
533 | */ |
534 | cc_copy_mac(dev, req, dir: CC_SG_FROM_BUF); |
535 | } |
536 | } |
537 | |
538 | static bool cc_is_icv_frag(unsigned int sgl_nents, unsigned int authsize, |
539 | u32 last_entry_data_size) |
540 | { |
541 | return ((sgl_nents > 1) && (last_entry_data_size < authsize)); |
542 | } |
543 | |
544 | static int cc_aead_chain_iv(struct cc_drvdata *drvdata, |
545 | struct aead_request *req, |
546 | struct buffer_array *sg_data, |
547 | bool is_last, bool do_chain) |
548 | { |
549 | struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req); |
550 | unsigned int hw_iv_size = areq_ctx->hw_iv_size; |
551 | struct device *dev = drvdata_to_dev(drvdata); |
552 | gfp_t flags = cc_gfp_flags(req: &req->base); |
553 | int rc = 0; |
554 | |
555 | if (!req->iv) { |
556 | areq_ctx->gen_ctx.iv_dma_addr = 0; |
557 | areq_ctx->gen_ctx.iv = NULL; |
558 | goto chain_iv_exit; |
559 | } |
560 | |
561 | areq_ctx->gen_ctx.iv = kmemdup(p: req->iv, size: hw_iv_size, gfp: flags); |
562 | if (!areq_ctx->gen_ctx.iv) |
563 | return -ENOMEM; |
564 | |
565 | areq_ctx->gen_ctx.iv_dma_addr = |
566 | dma_map_single(dev, areq_ctx->gen_ctx.iv, hw_iv_size, |
567 | DMA_BIDIRECTIONAL); |
568 | if (dma_mapping_error(dev, dma_addr: areq_ctx->gen_ctx.iv_dma_addr)) { |
569 | dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n" , |
570 | hw_iv_size, req->iv); |
571 | kfree_sensitive(objp: areq_ctx->gen_ctx.iv); |
572 | areq_ctx->gen_ctx.iv = NULL; |
573 | rc = -ENOMEM; |
574 | goto chain_iv_exit; |
575 | } |
576 | |
577 | dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n" , |
578 | hw_iv_size, req->iv, &areq_ctx->gen_ctx.iv_dma_addr); |
579 | |
580 | chain_iv_exit: |
581 | return rc; |
582 | } |
583 | |
584 | static int cc_aead_chain_assoc(struct cc_drvdata *drvdata, |
585 | struct aead_request *req, |
586 | struct buffer_array *sg_data, |
587 | bool is_last, bool do_chain) |
588 | { |
589 | struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req); |
590 | int rc = 0; |
591 | int mapped_nents = 0; |
592 | struct device *dev = drvdata_to_dev(drvdata); |
593 | |
594 | if (!sg_data) { |
595 | rc = -EINVAL; |
596 | goto chain_assoc_exit; |
597 | } |
598 | |
599 | if (areq_ctx->assoclen == 0) { |
600 | areq_ctx->assoc_buff_type = CC_DMA_BUF_NULL; |
601 | areq_ctx->assoc.nents = 0; |
602 | areq_ctx->assoc.mlli_nents = 0; |
603 | dev_dbg(dev, "Chain assoc of length 0: buff_type=%s nents=%u\n" , |
604 | cc_dma_buf_type(areq_ctx->assoc_buff_type), |
605 | areq_ctx->assoc.nents); |
606 | goto chain_assoc_exit; |
607 | } |
608 | |
609 | mapped_nents = sg_nents_for_len(sg: req->src, len: areq_ctx->assoclen); |
610 | if (mapped_nents < 0) |
611 | return mapped_nents; |
612 | |
613 | if (mapped_nents > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) { |
614 | dev_err(dev, "Too many fragments. current %d max %d\n" , |
615 | mapped_nents, LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES); |
616 | return -ENOMEM; |
617 | } |
618 | areq_ctx->assoc.nents = mapped_nents; |
619 | |
620 | /* in CCM case we have additional entry for |
621 | * ccm header configurations |
622 | */ |
623 | if (areq_ctx->ccm_hdr_size != ccm_header_size_null) { |
624 | if ((mapped_nents + 1) > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) { |
625 | dev_err(dev, "CCM case.Too many fragments. Current %d max %d\n" , |
626 | (areq_ctx->assoc.nents + 1), |
627 | LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES); |
628 | rc = -ENOMEM; |
629 | goto chain_assoc_exit; |
630 | } |
631 | } |
632 | |
633 | if (mapped_nents == 1 && areq_ctx->ccm_hdr_size == ccm_header_size_null) |
634 | areq_ctx->assoc_buff_type = CC_DMA_BUF_DLLI; |
635 | else |
636 | areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI; |
637 | |
638 | if (do_chain || areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI) { |
639 | dev_dbg(dev, "Chain assoc: buff_type=%s nents=%u\n" , |
640 | cc_dma_buf_type(areq_ctx->assoc_buff_type), |
641 | areq_ctx->assoc.nents); |
642 | cc_add_sg_entry(dev, sgl_data: sg_data, nents: areq_ctx->assoc.nents, sgl: req->src, |
643 | data_len: areq_ctx->assoclen, data_offset: 0, is_last_table: is_last, |
644 | mlli_nents: &areq_ctx->assoc.mlli_nents); |
645 | areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI; |
646 | } |
647 | |
648 | chain_assoc_exit: |
649 | return rc; |
650 | } |
651 | |
652 | static void cc_prepare_aead_data_dlli(struct aead_request *req, |
653 | u32 *src_last_bytes, u32 *dst_last_bytes) |
654 | { |
655 | struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req); |
656 | enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type; |
657 | unsigned int authsize = areq_ctx->req_authsize; |
658 | struct scatterlist *sg; |
659 | ssize_t offset; |
660 | |
661 | areq_ctx->is_icv_fragmented = false; |
662 | |
663 | if ((req->src == req->dst) || direct == DRV_CRYPTO_DIRECTION_DECRYPT) { |
664 | sg = areq_ctx->src_sgl; |
665 | offset = *src_last_bytes - authsize; |
666 | } else { |
667 | sg = areq_ctx->dst_sgl; |
668 | offset = *dst_last_bytes - authsize; |
669 | } |
670 | |
671 | areq_ctx->icv_dma_addr = sg_dma_address(sg) + offset; |
672 | areq_ctx->icv_virt_addr = sg_virt(sg) + offset; |
673 | } |
674 | |
675 | static void cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata, |
676 | struct aead_request *req, |
677 | struct buffer_array *sg_data, |
678 | u32 *src_last_bytes, u32 *dst_last_bytes, |
679 | bool is_last_table) |
680 | { |
681 | struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req); |
682 | enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type; |
683 | unsigned int authsize = areq_ctx->req_authsize; |
684 | struct device *dev = drvdata_to_dev(drvdata); |
685 | struct scatterlist *sg; |
686 | |
687 | if (req->src == req->dst) { |
688 | /*INPLACE*/ |
689 | cc_add_sg_entry(dev, sgl_data: sg_data, nents: areq_ctx->src.nents, |
690 | sgl: areq_ctx->src_sgl, data_len: areq_ctx->cryptlen, |
691 | data_offset: areq_ctx->src_offset, is_last_table, |
692 | mlli_nents: &areq_ctx->src.mlli_nents); |
693 | |
694 | areq_ctx->is_icv_fragmented = |
695 | cc_is_icv_frag(sgl_nents: areq_ctx->src.nents, authsize, |
696 | last_entry_data_size: *src_last_bytes); |
697 | |
698 | if (areq_ctx->is_icv_fragmented) { |
699 | /* Backup happens only when ICV is fragmented, ICV |
700 | * verification is made by CPU compare in order to |
701 | * simplify MAC verification upon request completion |
702 | */ |
703 | if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) { |
704 | /* In coherent platforms (e.g. ACP) |
705 | * already copying ICV for any |
706 | * INPLACE-DECRYPT operation, hence |
707 | * we must neglect this code. |
708 | */ |
709 | if (!drvdata->coherent) |
710 | cc_copy_mac(dev, req, dir: CC_SG_TO_BUF); |
711 | |
712 | areq_ctx->icv_virt_addr = areq_ctx->backup_mac; |
713 | } else { |
714 | areq_ctx->icv_virt_addr = areq_ctx->mac_buf; |
715 | areq_ctx->icv_dma_addr = |
716 | areq_ctx->mac_buf_dma_addr; |
717 | } |
718 | } else { /* Contig. ICV */ |
719 | sg = &areq_ctx->src_sgl[areq_ctx->src.nents - 1]; |
720 | /*Should hanlde if the sg is not contig.*/ |
721 | areq_ctx->icv_dma_addr = sg_dma_address(sg) + |
722 | (*src_last_bytes - authsize); |
723 | areq_ctx->icv_virt_addr = sg_virt(sg) + |
724 | (*src_last_bytes - authsize); |
725 | } |
726 | |
727 | } else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) { |
728 | /*NON-INPLACE and DECRYPT*/ |
729 | cc_add_sg_entry(dev, sgl_data: sg_data, nents: areq_ctx->src.nents, |
730 | sgl: areq_ctx->src_sgl, data_len: areq_ctx->cryptlen, |
731 | data_offset: areq_ctx->src_offset, is_last_table, |
732 | mlli_nents: &areq_ctx->src.mlli_nents); |
733 | cc_add_sg_entry(dev, sgl_data: sg_data, nents: areq_ctx->dst.nents, |
734 | sgl: areq_ctx->dst_sgl, data_len: areq_ctx->cryptlen, |
735 | data_offset: areq_ctx->dst_offset, is_last_table, |
736 | mlli_nents: &areq_ctx->dst.mlli_nents); |
737 | |
738 | areq_ctx->is_icv_fragmented = |
739 | cc_is_icv_frag(sgl_nents: areq_ctx->src.nents, authsize, |
740 | last_entry_data_size: *src_last_bytes); |
741 | /* Backup happens only when ICV is fragmented, ICV |
742 | |
743 | * verification is made by CPU compare in order to simplify |
744 | * MAC verification upon request completion |
745 | */ |
746 | if (areq_ctx->is_icv_fragmented) { |
747 | cc_copy_mac(dev, req, dir: CC_SG_TO_BUF); |
748 | areq_ctx->icv_virt_addr = areq_ctx->backup_mac; |
749 | |
750 | } else { /* Contig. ICV */ |
751 | sg = &areq_ctx->src_sgl[areq_ctx->src.nents - 1]; |
752 | /*Should hanlde if the sg is not contig.*/ |
753 | areq_ctx->icv_dma_addr = sg_dma_address(sg) + |
754 | (*src_last_bytes - authsize); |
755 | areq_ctx->icv_virt_addr = sg_virt(sg) + |
756 | (*src_last_bytes - authsize); |
757 | } |
758 | |
759 | } else { |
760 | /*NON-INPLACE and ENCRYPT*/ |
761 | cc_add_sg_entry(dev, sgl_data: sg_data, nents: areq_ctx->dst.nents, |
762 | sgl: areq_ctx->dst_sgl, data_len: areq_ctx->cryptlen, |
763 | data_offset: areq_ctx->dst_offset, is_last_table, |
764 | mlli_nents: &areq_ctx->dst.mlli_nents); |
765 | cc_add_sg_entry(dev, sgl_data: sg_data, nents: areq_ctx->src.nents, |
766 | sgl: areq_ctx->src_sgl, data_len: areq_ctx->cryptlen, |
767 | data_offset: areq_ctx->src_offset, is_last_table, |
768 | mlli_nents: &areq_ctx->src.mlli_nents); |
769 | |
770 | areq_ctx->is_icv_fragmented = |
771 | cc_is_icv_frag(sgl_nents: areq_ctx->dst.nents, authsize, |
772 | last_entry_data_size: *dst_last_bytes); |
773 | |
774 | if (!areq_ctx->is_icv_fragmented) { |
775 | sg = &areq_ctx->dst_sgl[areq_ctx->dst.nents - 1]; |
776 | /* Contig. ICV */ |
777 | areq_ctx->icv_dma_addr = sg_dma_address(sg) + |
778 | (*dst_last_bytes - authsize); |
779 | areq_ctx->icv_virt_addr = sg_virt(sg) + |
780 | (*dst_last_bytes - authsize); |
781 | } else { |
782 | areq_ctx->icv_dma_addr = areq_ctx->mac_buf_dma_addr; |
783 | areq_ctx->icv_virt_addr = areq_ctx->mac_buf; |
784 | } |
785 | } |
786 | } |
787 | |
788 | static int cc_aead_chain_data(struct cc_drvdata *drvdata, |
789 | struct aead_request *req, |
790 | struct buffer_array *sg_data, |
791 | bool is_last_table, bool do_chain) |
792 | { |
793 | struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req); |
794 | struct device *dev = drvdata_to_dev(drvdata); |
795 | enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type; |
796 | unsigned int authsize = areq_ctx->req_authsize; |
797 | unsigned int src_last_bytes = 0, dst_last_bytes = 0; |
798 | int rc = 0; |
799 | u32 src_mapped_nents = 0, dst_mapped_nents = 0; |
800 | u32 offset = 0; |
801 | /* non-inplace mode */ |
802 | unsigned int size_for_map = req->assoclen + req->cryptlen; |
803 | u32 sg_index = 0; |
804 | u32 size_to_skip = req->assoclen; |
805 | struct scatterlist *sgl; |
806 | |
807 | offset = size_to_skip; |
808 | |
809 | if (!sg_data) |
810 | return -EINVAL; |
811 | |
812 | areq_ctx->src_sgl = req->src; |
813 | areq_ctx->dst_sgl = req->dst; |
814 | |
815 | size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? |
816 | authsize : 0; |
817 | src_mapped_nents = cc_get_sgl_nents(dev, sg_list: req->src, nbytes: size_for_map, |
818 | lbytes: &src_last_bytes); |
819 | sg_index = areq_ctx->src_sgl->length; |
820 | //check where the data starts |
821 | while (src_mapped_nents && (sg_index <= size_to_skip)) { |
822 | src_mapped_nents--; |
823 | offset -= areq_ctx->src_sgl->length; |
824 | sgl = sg_next(areq_ctx->src_sgl); |
825 | if (!sgl) |
826 | break; |
827 | areq_ctx->src_sgl = sgl; |
828 | sg_index += areq_ctx->src_sgl->length; |
829 | } |
830 | if (src_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) { |
831 | dev_err(dev, "Too many fragments. current %d max %d\n" , |
832 | src_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES); |
833 | return -ENOMEM; |
834 | } |
835 | |
836 | areq_ctx->src.nents = src_mapped_nents; |
837 | |
838 | areq_ctx->src_offset = offset; |
839 | |
840 | if (req->src != req->dst) { |
841 | size_for_map = req->assoclen + req->cryptlen; |
842 | |
843 | if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) |
844 | size_for_map += authsize; |
845 | else |
846 | size_for_map -= authsize; |
847 | |
848 | rc = cc_map_sg(dev, sg: req->dst, nbytes: size_for_map, direction: DMA_FROM_DEVICE, |
849 | nents: &areq_ctx->dst.mapped_nents, |
850 | LLI_MAX_NUM_OF_DATA_ENTRIES, lbytes: &dst_last_bytes, |
851 | mapped_nents: &dst_mapped_nents); |
852 | if (rc) |
853 | goto chain_data_exit; |
854 | } |
855 | |
856 | dst_mapped_nents = cc_get_sgl_nents(dev, sg_list: req->dst, nbytes: size_for_map, |
857 | lbytes: &dst_last_bytes); |
858 | sg_index = areq_ctx->dst_sgl->length; |
859 | offset = size_to_skip; |
860 | |
861 | //check where the data starts |
862 | while (dst_mapped_nents && sg_index <= size_to_skip) { |
863 | dst_mapped_nents--; |
864 | offset -= areq_ctx->dst_sgl->length; |
865 | sgl = sg_next(areq_ctx->dst_sgl); |
866 | if (!sgl) |
867 | break; |
868 | areq_ctx->dst_sgl = sgl; |
869 | sg_index += areq_ctx->dst_sgl->length; |
870 | } |
871 | if (dst_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) { |
872 | dev_err(dev, "Too many fragments. current %d max %d\n" , |
873 | dst_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES); |
874 | return -ENOMEM; |
875 | } |
876 | areq_ctx->dst.nents = dst_mapped_nents; |
877 | areq_ctx->dst_offset = offset; |
878 | if (src_mapped_nents > 1 || |
879 | dst_mapped_nents > 1 || |
880 | do_chain) { |
881 | areq_ctx->data_buff_type = CC_DMA_BUF_MLLI; |
882 | cc_prepare_aead_data_mlli(drvdata, req, sg_data, |
883 | src_last_bytes: &src_last_bytes, dst_last_bytes: &dst_last_bytes, |
884 | is_last_table); |
885 | } else { |
886 | areq_ctx->data_buff_type = CC_DMA_BUF_DLLI; |
887 | cc_prepare_aead_data_dlli(req, src_last_bytes: &src_last_bytes, |
888 | dst_last_bytes: &dst_last_bytes); |
889 | } |
890 | |
891 | chain_data_exit: |
892 | return rc; |
893 | } |
894 | |
895 | static void cc_update_aead_mlli_nents(struct cc_drvdata *drvdata, |
896 | struct aead_request *req) |
897 | { |
898 | struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req); |
899 | u32 curr_mlli_size = 0; |
900 | |
901 | if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI) { |
902 | areq_ctx->assoc.sram_addr = drvdata->mlli_sram_addr; |
903 | curr_mlli_size = areq_ctx->assoc.mlli_nents * |
904 | LLI_ENTRY_BYTE_SIZE; |
905 | } |
906 | |
907 | if (areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) { |
908 | /*Inplace case dst nents equal to src nents*/ |
909 | if (req->src == req->dst) { |
910 | areq_ctx->dst.mlli_nents = areq_ctx->src.mlli_nents; |
911 | areq_ctx->src.sram_addr = drvdata->mlli_sram_addr + |
912 | curr_mlli_size; |
913 | areq_ctx->dst.sram_addr = areq_ctx->src.sram_addr; |
914 | if (!areq_ctx->is_single_pass) |
915 | areq_ctx->assoc.mlli_nents += |
916 | areq_ctx->src.mlli_nents; |
917 | } else { |
918 | if (areq_ctx->gen_ctx.op_type == |
919 | DRV_CRYPTO_DIRECTION_DECRYPT) { |
920 | areq_ctx->src.sram_addr = |
921 | drvdata->mlli_sram_addr + |
922 | curr_mlli_size; |
923 | areq_ctx->dst.sram_addr = |
924 | areq_ctx->src.sram_addr + |
925 | areq_ctx->src.mlli_nents * |
926 | LLI_ENTRY_BYTE_SIZE; |
927 | if (!areq_ctx->is_single_pass) |
928 | areq_ctx->assoc.mlli_nents += |
929 | areq_ctx->src.mlli_nents; |
930 | } else { |
931 | areq_ctx->dst.sram_addr = |
932 | drvdata->mlli_sram_addr + |
933 | curr_mlli_size; |
934 | areq_ctx->src.sram_addr = |
935 | areq_ctx->dst.sram_addr + |
936 | areq_ctx->dst.mlli_nents * |
937 | LLI_ENTRY_BYTE_SIZE; |
938 | if (!areq_ctx->is_single_pass) |
939 | areq_ctx->assoc.mlli_nents += |
940 | areq_ctx->dst.mlli_nents; |
941 | } |
942 | } |
943 | } |
944 | } |
945 | |
946 | int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req) |
947 | { |
948 | struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req); |
949 | struct mlli_params *mlli_params = &areq_ctx->mlli_params; |
950 | struct device *dev = drvdata_to_dev(drvdata); |
951 | struct buffer_array sg_data; |
952 | unsigned int authsize = areq_ctx->req_authsize; |
953 | int rc = 0; |
954 | dma_addr_t dma_addr; |
955 | u32 mapped_nents = 0; |
956 | u32 dummy = 0; /*used for the assoc data fragments */ |
957 | u32 size_to_map; |
958 | gfp_t flags = cc_gfp_flags(req: &req->base); |
959 | |
960 | mlli_params->curr_pool = NULL; |
961 | sg_data.num_of_buffers = 0; |
962 | |
963 | /* copy mac to a temporary location to deal with possible |
964 | * data memory overriding that caused by cache coherence problem. |
965 | */ |
966 | if (drvdata->coherent && |
967 | areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT && |
968 | req->src == req->dst) |
969 | cc_copy_mac(dev, req, dir: CC_SG_TO_BUF); |
970 | |
971 | /* cacluate the size for cipher remove ICV in decrypt*/ |
972 | areq_ctx->cryptlen = (areq_ctx->gen_ctx.op_type == |
973 | DRV_CRYPTO_DIRECTION_ENCRYPT) ? |
974 | req->cryptlen : |
975 | (req->cryptlen - authsize); |
976 | |
977 | dma_addr = dma_map_single(dev, areq_ctx->mac_buf, MAX_MAC_SIZE, |
978 | DMA_BIDIRECTIONAL); |
979 | if (dma_mapping_error(dev, dma_addr)) { |
980 | dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n" , |
981 | MAX_MAC_SIZE, areq_ctx->mac_buf); |
982 | rc = -ENOMEM; |
983 | goto aead_map_failure; |
984 | } |
985 | areq_ctx->mac_buf_dma_addr = dma_addr; |
986 | |
987 | if (areq_ctx->ccm_hdr_size != ccm_header_size_null) { |
988 | void *addr = areq_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET; |
989 | |
990 | dma_addr = dma_map_single(dev, addr, AES_BLOCK_SIZE, |
991 | DMA_TO_DEVICE); |
992 | |
993 | if (dma_mapping_error(dev, dma_addr)) { |
994 | dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n" , |
995 | AES_BLOCK_SIZE, addr); |
996 | areq_ctx->ccm_iv0_dma_addr = 0; |
997 | rc = -ENOMEM; |
998 | goto aead_map_failure; |
999 | } |
1000 | areq_ctx->ccm_iv0_dma_addr = dma_addr; |
1001 | |
1002 | rc = cc_set_aead_conf_buf(dev, areq_ctx, config_data: areq_ctx->ccm_config, |
1003 | sg_data: &sg_data, assoclen: areq_ctx->assoclen); |
1004 | if (rc) |
1005 | goto aead_map_failure; |
1006 | } |
1007 | |
1008 | if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) { |
1009 | dma_addr = dma_map_single(dev, areq_ctx->hkey, AES_BLOCK_SIZE, |
1010 | DMA_BIDIRECTIONAL); |
1011 | if (dma_mapping_error(dev, dma_addr)) { |
1012 | dev_err(dev, "Mapping hkey %u B at va=%pK for DMA failed\n" , |
1013 | AES_BLOCK_SIZE, areq_ctx->hkey); |
1014 | rc = -ENOMEM; |
1015 | goto aead_map_failure; |
1016 | } |
1017 | areq_ctx->hkey_dma_addr = dma_addr; |
1018 | |
1019 | dma_addr = dma_map_single(dev, &areq_ctx->gcm_len_block, |
1020 | AES_BLOCK_SIZE, DMA_TO_DEVICE); |
1021 | if (dma_mapping_error(dev, dma_addr)) { |
1022 | dev_err(dev, "Mapping gcm_len_block %u B at va=%pK for DMA failed\n" , |
1023 | AES_BLOCK_SIZE, &areq_ctx->gcm_len_block); |
1024 | rc = -ENOMEM; |
1025 | goto aead_map_failure; |
1026 | } |
1027 | areq_ctx->gcm_block_len_dma_addr = dma_addr; |
1028 | |
1029 | dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc1, |
1030 | AES_BLOCK_SIZE, DMA_TO_DEVICE); |
1031 | |
1032 | if (dma_mapping_error(dev, dma_addr)) { |
1033 | dev_err(dev, "Mapping gcm_iv_inc1 %u B at va=%pK for DMA failed\n" , |
1034 | AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc1)); |
1035 | areq_ctx->gcm_iv_inc1_dma_addr = 0; |
1036 | rc = -ENOMEM; |
1037 | goto aead_map_failure; |
1038 | } |
1039 | areq_ctx->gcm_iv_inc1_dma_addr = dma_addr; |
1040 | |
1041 | dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc2, |
1042 | AES_BLOCK_SIZE, DMA_TO_DEVICE); |
1043 | |
1044 | if (dma_mapping_error(dev, dma_addr)) { |
1045 | dev_err(dev, "Mapping gcm_iv_inc2 %u B at va=%pK for DMA failed\n" , |
1046 | AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc2)); |
1047 | areq_ctx->gcm_iv_inc2_dma_addr = 0; |
1048 | rc = -ENOMEM; |
1049 | goto aead_map_failure; |
1050 | } |
1051 | areq_ctx->gcm_iv_inc2_dma_addr = dma_addr; |
1052 | } |
1053 | |
1054 | size_to_map = req->cryptlen + req->assoclen; |
1055 | /* If we do in-place encryption, we also need the auth tag */ |
1056 | if ((areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT) && |
1057 | (req->src == req->dst)) { |
1058 | size_to_map += authsize; |
1059 | } |
1060 | |
1061 | rc = cc_map_sg(dev, sg: req->src, nbytes: size_to_map, |
1062 | direction: (req->src != req->dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL), |
1063 | nents: &areq_ctx->src.mapped_nents, |
1064 | max_sg_nents: (LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES + |
1065 | LLI_MAX_NUM_OF_DATA_ENTRIES), |
1066 | lbytes: &dummy, mapped_nents: &mapped_nents); |
1067 | if (rc) |
1068 | goto aead_map_failure; |
1069 | |
1070 | if (areq_ctx->is_single_pass) { |
1071 | /* |
1072 | * Create MLLI table for: |
1073 | * (1) Assoc. data |
1074 | * (2) Src/Dst SGLs |
1075 | * Note: IV is contg. buffer (not an SGL) |
1076 | */ |
1077 | rc = cc_aead_chain_assoc(drvdata, req, sg_data: &sg_data, is_last: true, do_chain: false); |
1078 | if (rc) |
1079 | goto aead_map_failure; |
1080 | rc = cc_aead_chain_iv(drvdata, req, sg_data: &sg_data, is_last: true, do_chain: false); |
1081 | if (rc) |
1082 | goto aead_map_failure; |
1083 | rc = cc_aead_chain_data(drvdata, req, sg_data: &sg_data, is_last_table: true, do_chain: false); |
1084 | if (rc) |
1085 | goto aead_map_failure; |
1086 | } else { /* DOUBLE-PASS flow */ |
1087 | /* |
1088 | * Prepare MLLI table(s) in this order: |
1089 | * |
1090 | * If ENCRYPT/DECRYPT (inplace): |
1091 | * (1) MLLI table for assoc |
1092 | * (2) IV entry (chained right after end of assoc) |
1093 | * (3) MLLI for src/dst (inplace operation) |
1094 | * |
1095 | * If ENCRYPT (non-inplace) |
1096 | * (1) MLLI table for assoc |
1097 | * (2) IV entry (chained right after end of assoc) |
1098 | * (3) MLLI for dst |
1099 | * (4) MLLI for src |
1100 | * |
1101 | * If DECRYPT (non-inplace) |
1102 | * (1) MLLI table for assoc |
1103 | * (2) IV entry (chained right after end of assoc) |
1104 | * (3) MLLI for src |
1105 | * (4) MLLI for dst |
1106 | */ |
1107 | rc = cc_aead_chain_assoc(drvdata, req, sg_data: &sg_data, is_last: false, do_chain: true); |
1108 | if (rc) |
1109 | goto aead_map_failure; |
1110 | rc = cc_aead_chain_iv(drvdata, req, sg_data: &sg_data, is_last: false, do_chain: true); |
1111 | if (rc) |
1112 | goto aead_map_failure; |
1113 | rc = cc_aead_chain_data(drvdata, req, sg_data: &sg_data, is_last_table: true, do_chain: true); |
1114 | if (rc) |
1115 | goto aead_map_failure; |
1116 | } |
1117 | |
1118 | /* Mlli support -start building the MLLI according to the above |
1119 | * results |
1120 | */ |
1121 | if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI || |
1122 | areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) { |
1123 | mlli_params->curr_pool = drvdata->mlli_buffs_pool; |
1124 | rc = cc_generate_mlli(dev, sg_data: &sg_data, mlli_params, flags); |
1125 | if (rc) |
1126 | goto aead_map_failure; |
1127 | |
1128 | cc_update_aead_mlli_nents(drvdata, req); |
1129 | dev_dbg(dev, "assoc params mn %d\n" , |
1130 | areq_ctx->assoc.mlli_nents); |
1131 | dev_dbg(dev, "src params mn %d\n" , areq_ctx->src.mlli_nents); |
1132 | dev_dbg(dev, "dst params mn %d\n" , areq_ctx->dst.mlli_nents); |
1133 | } |
1134 | return 0; |
1135 | |
1136 | aead_map_failure: |
1137 | cc_unmap_aead_request(dev, req); |
1138 | return rc; |
1139 | } |
1140 | |
1141 | int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx, |
1142 | struct scatterlist *src, unsigned int nbytes, |
1143 | bool do_update, gfp_t flags) |
1144 | { |
1145 | struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx; |
1146 | struct device *dev = drvdata_to_dev(drvdata); |
1147 | u8 *curr_buff = cc_hash_buf(state: areq_ctx); |
1148 | u32 *curr_buff_cnt = cc_hash_buf_cnt(state: areq_ctx); |
1149 | struct mlli_params *mlli_params = &areq_ctx->mlli_params; |
1150 | struct buffer_array sg_data; |
1151 | int rc = 0; |
1152 | u32 dummy = 0; |
1153 | u32 mapped_nents = 0; |
1154 | |
1155 | dev_dbg(dev, "final params : curr_buff=%pK curr_buff_cnt=0x%X nbytes = 0x%X src=%pK curr_index=%u\n" , |
1156 | curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index); |
1157 | /* Init the type of the dma buffer */ |
1158 | areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL; |
1159 | mlli_params->curr_pool = NULL; |
1160 | sg_data.num_of_buffers = 0; |
1161 | areq_ctx->in_nents = 0; |
1162 | |
1163 | if (nbytes == 0 && *curr_buff_cnt == 0) { |
1164 | /* nothing to do */ |
1165 | return 0; |
1166 | } |
1167 | |
1168 | /* map the previous buffer */ |
1169 | if (*curr_buff_cnt) { |
1170 | rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, curr_buff_cnt: *curr_buff_cnt, |
1171 | sg_data: &sg_data); |
1172 | if (rc) |
1173 | return rc; |
1174 | } |
1175 | |
1176 | if (src && nbytes > 0 && do_update) { |
1177 | rc = cc_map_sg(dev, sg: src, nbytes, direction: DMA_TO_DEVICE, |
1178 | nents: &areq_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES, |
1179 | lbytes: &dummy, mapped_nents: &mapped_nents); |
1180 | if (rc) |
1181 | goto unmap_curr_buff; |
1182 | if (src && mapped_nents == 1 && |
1183 | areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) { |
1184 | memcpy(areq_ctx->buff_sg, src, |
1185 | sizeof(struct scatterlist)); |
1186 | areq_ctx->buff_sg->length = nbytes; |
1187 | areq_ctx->curr_sg = areq_ctx->buff_sg; |
1188 | areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI; |
1189 | } else { |
1190 | areq_ctx->data_dma_buf_type = CC_DMA_BUF_MLLI; |
1191 | } |
1192 | } |
1193 | |
1194 | /*build mlli */ |
1195 | if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) { |
1196 | mlli_params->curr_pool = drvdata->mlli_buffs_pool; |
1197 | /* add the src data to the sg_data */ |
1198 | cc_add_sg_entry(dev, sgl_data: &sg_data, nents: areq_ctx->in_nents, sgl: src, data_len: nbytes, |
1199 | data_offset: 0, is_last_table: true, mlli_nents: &areq_ctx->mlli_nents); |
1200 | rc = cc_generate_mlli(dev, sg_data: &sg_data, mlli_params, flags); |
1201 | if (rc) |
1202 | goto fail_unmap_din; |
1203 | } |
1204 | /* change the buffer index for the unmap function */ |
1205 | areq_ctx->buff_index = (areq_ctx->buff_index ^ 1); |
1206 | dev_dbg(dev, "areq_ctx->data_dma_buf_type = %s\n" , |
1207 | cc_dma_buf_type(areq_ctx->data_dma_buf_type)); |
1208 | return 0; |
1209 | |
1210 | fail_unmap_din: |
1211 | dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE); |
1212 | |
1213 | unmap_curr_buff: |
1214 | if (*curr_buff_cnt) |
1215 | dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE); |
1216 | |
1217 | return rc; |
1218 | } |
1219 | |
1220 | int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx, |
1221 | struct scatterlist *src, unsigned int nbytes, |
1222 | unsigned int block_size, gfp_t flags) |
1223 | { |
1224 | struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx; |
1225 | struct device *dev = drvdata_to_dev(drvdata); |
1226 | u8 *curr_buff = cc_hash_buf(state: areq_ctx); |
1227 | u32 *curr_buff_cnt = cc_hash_buf_cnt(state: areq_ctx); |
1228 | u8 *next_buff = cc_next_buf(state: areq_ctx); |
1229 | u32 *next_buff_cnt = cc_next_buf_cnt(state: areq_ctx); |
1230 | struct mlli_params *mlli_params = &areq_ctx->mlli_params; |
1231 | unsigned int update_data_len; |
1232 | u32 total_in_len = nbytes + *curr_buff_cnt; |
1233 | struct buffer_array sg_data; |
1234 | unsigned int swap_index = 0; |
1235 | int rc = 0; |
1236 | u32 dummy = 0; |
1237 | u32 mapped_nents = 0; |
1238 | |
1239 | dev_dbg(dev, " update params : curr_buff=%pK curr_buff_cnt=0x%X nbytes=0x%X src=%pK curr_index=%u\n" , |
1240 | curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index); |
1241 | /* Init the type of the dma buffer */ |
1242 | areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL; |
1243 | mlli_params->curr_pool = NULL; |
1244 | areq_ctx->curr_sg = NULL; |
1245 | sg_data.num_of_buffers = 0; |
1246 | areq_ctx->in_nents = 0; |
1247 | |
1248 | if (total_in_len < block_size) { |
1249 | dev_dbg(dev, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n" , |
1250 | curr_buff, *curr_buff_cnt, &curr_buff[*curr_buff_cnt]); |
1251 | areq_ctx->in_nents = sg_nents_for_len(sg: src, len: nbytes); |
1252 | sg_copy_to_buffer(sgl: src, nents: areq_ctx->in_nents, |
1253 | buf: &curr_buff[*curr_buff_cnt], buflen: nbytes); |
1254 | *curr_buff_cnt += nbytes; |
1255 | return 1; |
1256 | } |
1257 | |
1258 | /* Calculate the residue size*/ |
1259 | *next_buff_cnt = total_in_len & (block_size - 1); |
1260 | /* update data len */ |
1261 | update_data_len = total_in_len - *next_buff_cnt; |
1262 | |
1263 | dev_dbg(dev, " temp length : *next_buff_cnt=0x%X update_data_len=0x%X\n" , |
1264 | *next_buff_cnt, update_data_len); |
1265 | |
1266 | /* Copy the new residue to next buffer */ |
1267 | if (*next_buff_cnt) { |
1268 | dev_dbg(dev, " handle residue: next buff %pK skip data %u residue %u\n" , |
1269 | next_buff, (update_data_len - *curr_buff_cnt), |
1270 | *next_buff_cnt); |
1271 | cc_copy_sg_portion(dev, dest: next_buff, sg: src, |
1272 | to_skip: (update_data_len - *curr_buff_cnt), |
1273 | end: nbytes, direct: CC_SG_TO_BUF); |
1274 | /* change the buffer index for next operation */ |
1275 | swap_index = 1; |
1276 | } |
1277 | |
1278 | if (*curr_buff_cnt) { |
1279 | rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, curr_buff_cnt: *curr_buff_cnt, |
1280 | sg_data: &sg_data); |
1281 | if (rc) |
1282 | return rc; |
1283 | /* change the buffer index for next operation */ |
1284 | swap_index = 1; |
1285 | } |
1286 | |
1287 | if (update_data_len > *curr_buff_cnt) { |
1288 | rc = cc_map_sg(dev, sg: src, nbytes: (update_data_len - *curr_buff_cnt), |
1289 | direction: DMA_TO_DEVICE, nents: &areq_ctx->in_nents, |
1290 | LLI_MAX_NUM_OF_DATA_ENTRIES, lbytes: &dummy, |
1291 | mapped_nents: &mapped_nents); |
1292 | if (rc) |
1293 | goto unmap_curr_buff; |
1294 | if (mapped_nents == 1 && |
1295 | areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) { |
1296 | /* only one entry in the SG and no previous data */ |
1297 | memcpy(areq_ctx->buff_sg, src, |
1298 | sizeof(struct scatterlist)); |
1299 | areq_ctx->buff_sg->length = update_data_len; |
1300 | areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI; |
1301 | areq_ctx->curr_sg = areq_ctx->buff_sg; |
1302 | } else { |
1303 | areq_ctx->data_dma_buf_type = CC_DMA_BUF_MLLI; |
1304 | } |
1305 | } |
1306 | |
1307 | if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) { |
1308 | mlli_params->curr_pool = drvdata->mlli_buffs_pool; |
1309 | /* add the src data to the sg_data */ |
1310 | cc_add_sg_entry(dev, sgl_data: &sg_data, nents: areq_ctx->in_nents, sgl: src, |
1311 | data_len: (update_data_len - *curr_buff_cnt), data_offset: 0, is_last_table: true, |
1312 | mlli_nents: &areq_ctx->mlli_nents); |
1313 | rc = cc_generate_mlli(dev, sg_data: &sg_data, mlli_params, flags); |
1314 | if (rc) |
1315 | goto fail_unmap_din; |
1316 | } |
1317 | areq_ctx->buff_index = (areq_ctx->buff_index ^ swap_index); |
1318 | |
1319 | return 0; |
1320 | |
1321 | fail_unmap_din: |
1322 | dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE); |
1323 | |
1324 | unmap_curr_buff: |
1325 | if (*curr_buff_cnt) |
1326 | dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE); |
1327 | |
1328 | return rc; |
1329 | } |
1330 | |
1331 | void cc_unmap_hash_request(struct device *dev, void *ctx, |
1332 | struct scatterlist *src, bool do_revert) |
1333 | { |
1334 | struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx; |
1335 | u32 *prev_len = cc_next_buf_cnt(state: areq_ctx); |
1336 | |
1337 | /*In case a pool was set, a table was |
1338 | *allocated and should be released |
1339 | */ |
1340 | if (areq_ctx->mlli_params.curr_pool) { |
1341 | dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n" , |
1342 | &areq_ctx->mlli_params.mlli_dma_addr, |
1343 | areq_ctx->mlli_params.mlli_virt_addr); |
1344 | dma_pool_free(pool: areq_ctx->mlli_params.curr_pool, |
1345 | vaddr: areq_ctx->mlli_params.mlli_virt_addr, |
1346 | addr: areq_ctx->mlli_params.mlli_dma_addr); |
1347 | } |
1348 | |
1349 | if (src && areq_ctx->in_nents) { |
1350 | dev_dbg(dev, "Unmapped sg src: virt=%pK dma=%pad len=0x%X\n" , |
1351 | sg_virt(src), &sg_dma_address(src), sg_dma_len(src)); |
1352 | dma_unmap_sg(dev, src, |
1353 | areq_ctx->in_nents, DMA_TO_DEVICE); |
1354 | } |
1355 | |
1356 | if (*prev_len) { |
1357 | dev_dbg(dev, "Unmapped buffer: areq_ctx->buff_sg=%pK dma=%pad len 0x%X\n" , |
1358 | sg_virt(areq_ctx->buff_sg), |
1359 | &sg_dma_address(areq_ctx->buff_sg), |
1360 | sg_dma_len(areq_ctx->buff_sg)); |
1361 | dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE); |
1362 | if (!do_revert) { |
1363 | /* clean the previous data length for update |
1364 | * operation |
1365 | */ |
1366 | *prev_len = 0; |
1367 | } else { |
1368 | areq_ctx->buff_index ^= 1; |
1369 | } |
1370 | } |
1371 | } |
1372 | |
1373 | int cc_buffer_mgr_init(struct cc_drvdata *drvdata) |
1374 | { |
1375 | struct device *dev = drvdata_to_dev(drvdata); |
1376 | |
1377 | drvdata->mlli_buffs_pool = |
1378 | dma_pool_create(name: "dx_single_mlli_tables" , dev, |
1379 | MAX_NUM_OF_TOTAL_MLLI_ENTRIES * |
1380 | LLI_ENTRY_BYTE_SIZE, |
1381 | MLLI_TABLE_MIN_ALIGNMENT, allocation: 0); |
1382 | |
1383 | if (!drvdata->mlli_buffs_pool) |
1384 | return -ENOMEM; |
1385 | |
1386 | return 0; |
1387 | } |
1388 | |
1389 | int cc_buffer_mgr_fini(struct cc_drvdata *drvdata) |
1390 | { |
1391 | dma_pool_destroy(pool: drvdata->mlli_buffs_pool); |
1392 | return 0; |
1393 | } |
1394 | |