1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Cryptographic API. |
4 | * |
5 | * Support for ATMEL DES/TDES HW acceleration. |
6 | * |
7 | * Copyright (c) 2012 Eukréa Electromatique - ATMEL |
8 | * Author: Nicolas Royer <nicolas@eukrea.com> |
9 | * |
10 | * Some ideas are from omap-aes.c drivers. |
11 | */ |
12 | |
13 | |
14 | #include <linux/kernel.h> |
15 | #include <linux/module.h> |
16 | #include <linux/slab.h> |
17 | #include <linux/err.h> |
18 | #include <linux/clk.h> |
19 | #include <linux/io.h> |
20 | #include <linux/hw_random.h> |
21 | #include <linux/platform_device.h> |
22 | |
23 | #include <linux/device.h> |
24 | #include <linux/dmaengine.h> |
25 | #include <linux/init.h> |
26 | #include <linux/errno.h> |
27 | #include <linux/interrupt.h> |
28 | #include <linux/irq.h> |
29 | #include <linux/scatterlist.h> |
30 | #include <linux/dma-mapping.h> |
31 | #include <linux/mod_devicetable.h> |
32 | #include <linux/delay.h> |
33 | #include <linux/crypto.h> |
34 | #include <crypto/scatterwalk.h> |
35 | #include <crypto/algapi.h> |
36 | #include <crypto/internal/des.h> |
37 | #include <crypto/internal/skcipher.h> |
38 | #include "atmel-tdes-regs.h" |
39 | |
40 | #define ATMEL_TDES_PRIORITY 300 |
41 | |
42 | /* TDES flags */ |
43 | /* Reserve bits [17:16], [13:12], [2:0] for AES Mode Register */ |
44 | #define TDES_FLAGS_ENCRYPT TDES_MR_CYPHER_ENC |
45 | #define TDES_FLAGS_OPMODE_MASK (TDES_MR_OPMOD_MASK | TDES_MR_CFBS_MASK) |
46 | #define TDES_FLAGS_ECB TDES_MR_OPMOD_ECB |
47 | #define TDES_FLAGS_CBC TDES_MR_OPMOD_CBC |
48 | |
49 | #define TDES_FLAGS_MODE_MASK (TDES_FLAGS_OPMODE_MASK | TDES_FLAGS_ENCRYPT) |
50 | |
51 | #define TDES_FLAGS_INIT BIT(3) |
52 | #define TDES_FLAGS_FAST BIT(4) |
53 | #define TDES_FLAGS_BUSY BIT(5) |
54 | #define TDES_FLAGS_DMA BIT(6) |
55 | |
56 | #define ATMEL_TDES_QUEUE_LENGTH 50 |
57 | |
58 | struct atmel_tdes_caps { |
59 | bool has_dma; |
60 | }; |
61 | |
62 | struct atmel_tdes_dev; |
63 | |
64 | struct atmel_tdes_ctx { |
65 | struct atmel_tdes_dev *dd; |
66 | |
67 | int keylen; |
68 | u32 key[DES3_EDE_KEY_SIZE / sizeof(u32)]; |
69 | unsigned long flags; |
70 | |
71 | u16 block_size; |
72 | }; |
73 | |
74 | struct atmel_tdes_reqctx { |
75 | unsigned long mode; |
76 | u8 lastc[DES_BLOCK_SIZE]; |
77 | }; |
78 | |
79 | struct atmel_tdes_dma { |
80 | struct dma_chan *chan; |
81 | struct dma_slave_config dma_conf; |
82 | }; |
83 | |
84 | struct atmel_tdes_dev { |
85 | struct list_head list; |
86 | unsigned long phys_base; |
87 | void __iomem *io_base; |
88 | |
89 | struct atmel_tdes_ctx *ctx; |
90 | struct device *dev; |
91 | struct clk *iclk; |
92 | int irq; |
93 | |
94 | unsigned long flags; |
95 | |
96 | spinlock_t lock; |
97 | struct crypto_queue queue; |
98 | |
99 | struct tasklet_struct done_task; |
100 | struct tasklet_struct queue_task; |
101 | |
102 | struct skcipher_request *req; |
103 | size_t total; |
104 | |
105 | struct scatterlist *in_sg; |
106 | unsigned int nb_in_sg; |
107 | size_t in_offset; |
108 | struct scatterlist *out_sg; |
109 | unsigned int nb_out_sg; |
110 | size_t out_offset; |
111 | |
112 | size_t buflen; |
113 | size_t dma_size; |
114 | |
115 | void *buf_in; |
116 | int dma_in; |
117 | dma_addr_t dma_addr_in; |
118 | struct atmel_tdes_dma dma_lch_in; |
119 | |
120 | void *buf_out; |
121 | int dma_out; |
122 | dma_addr_t dma_addr_out; |
123 | struct atmel_tdes_dma dma_lch_out; |
124 | |
125 | struct atmel_tdes_caps caps; |
126 | |
127 | u32 hw_version; |
128 | }; |
129 | |
130 | struct atmel_tdes_drv { |
131 | struct list_head dev_list; |
132 | spinlock_t lock; |
133 | }; |
134 | |
135 | static struct atmel_tdes_drv atmel_tdes = { |
136 | .dev_list = LIST_HEAD_INIT(atmel_tdes.dev_list), |
137 | .lock = __SPIN_LOCK_UNLOCKED(atmel_tdes.lock), |
138 | }; |
139 | |
140 | static int atmel_tdes_sg_copy(struct scatterlist **sg, size_t *offset, |
141 | void *buf, size_t buflen, size_t total, int out) |
142 | { |
143 | size_t count, off = 0; |
144 | |
145 | while (buflen && total) { |
146 | count = min((*sg)->length - *offset, total); |
147 | count = min(count, buflen); |
148 | |
149 | if (!count) |
150 | return off; |
151 | |
152 | scatterwalk_map_and_copy(buf: buf + off, sg: *sg, start: *offset, nbytes: count, out); |
153 | |
154 | off += count; |
155 | buflen -= count; |
156 | *offset += count; |
157 | total -= count; |
158 | |
159 | if (*offset == (*sg)->length) { |
160 | *sg = sg_next(*sg); |
161 | if (*sg) |
162 | *offset = 0; |
163 | else |
164 | total = 0; |
165 | } |
166 | } |
167 | |
168 | return off; |
169 | } |
170 | |
171 | static inline u32 atmel_tdes_read(struct atmel_tdes_dev *dd, u32 offset) |
172 | { |
173 | return readl_relaxed(dd->io_base + offset); |
174 | } |
175 | |
176 | static inline void atmel_tdes_write(struct atmel_tdes_dev *dd, |
177 | u32 offset, u32 value) |
178 | { |
179 | writel_relaxed(value, dd->io_base + offset); |
180 | } |
181 | |
182 | static void atmel_tdes_write_n(struct atmel_tdes_dev *dd, u32 offset, |
183 | const u32 *value, int count) |
184 | { |
185 | for (; count--; value++, offset += 4) |
186 | atmel_tdes_write(dd, offset, value: *value); |
187 | } |
188 | |
189 | static struct atmel_tdes_dev *atmel_tdes_dev_alloc(void) |
190 | { |
191 | struct atmel_tdes_dev *tdes_dd; |
192 | |
193 | spin_lock_bh(lock: &atmel_tdes.lock); |
194 | /* One TDES IP per SoC. */ |
195 | tdes_dd = list_first_entry_or_null(&atmel_tdes.dev_list, |
196 | struct atmel_tdes_dev, list); |
197 | spin_unlock_bh(lock: &atmel_tdes.lock); |
198 | return tdes_dd; |
199 | } |
200 | |
201 | static int atmel_tdes_hw_init(struct atmel_tdes_dev *dd) |
202 | { |
203 | int err; |
204 | |
205 | err = clk_prepare_enable(clk: dd->iclk); |
206 | if (err) |
207 | return err; |
208 | |
209 | if (!(dd->flags & TDES_FLAGS_INIT)) { |
210 | atmel_tdes_write(dd, TDES_CR, TDES_CR_SWRST); |
211 | dd->flags |= TDES_FLAGS_INIT; |
212 | } |
213 | |
214 | return 0; |
215 | } |
216 | |
217 | static inline unsigned int atmel_tdes_get_version(struct atmel_tdes_dev *dd) |
218 | { |
219 | return atmel_tdes_read(dd, TDES_HW_VERSION) & 0x00000fff; |
220 | } |
221 | |
222 | static int atmel_tdes_hw_version_init(struct atmel_tdes_dev *dd) |
223 | { |
224 | int err; |
225 | |
226 | err = atmel_tdes_hw_init(dd); |
227 | if (err) |
228 | return err; |
229 | |
230 | dd->hw_version = atmel_tdes_get_version(dd); |
231 | |
232 | dev_info(dd->dev, |
233 | "version: 0x%x\n" , dd->hw_version); |
234 | |
235 | clk_disable_unprepare(clk: dd->iclk); |
236 | |
237 | return 0; |
238 | } |
239 | |
240 | static void atmel_tdes_dma_callback(void *data) |
241 | { |
242 | struct atmel_tdes_dev *dd = data; |
243 | |
244 | /* dma_lch_out - completed */ |
245 | tasklet_schedule(t: &dd->done_task); |
246 | } |
247 | |
248 | static int atmel_tdes_write_ctrl(struct atmel_tdes_dev *dd) |
249 | { |
250 | int err; |
251 | u32 valmr = TDES_MR_SMOD_PDC; |
252 | |
253 | err = atmel_tdes_hw_init(dd); |
254 | |
255 | if (err) |
256 | return err; |
257 | |
258 | if (!dd->caps.has_dma) |
259 | atmel_tdes_write(dd, TDES_PTCR, |
260 | TDES_PTCR_TXTDIS | TDES_PTCR_RXTDIS); |
261 | |
262 | /* MR register must be set before IV registers */ |
263 | if (dd->ctx->keylen > (DES_KEY_SIZE << 1)) { |
264 | valmr |= TDES_MR_KEYMOD_3KEY; |
265 | valmr |= TDES_MR_TDESMOD_TDES; |
266 | } else if (dd->ctx->keylen > DES_KEY_SIZE) { |
267 | valmr |= TDES_MR_KEYMOD_2KEY; |
268 | valmr |= TDES_MR_TDESMOD_TDES; |
269 | } else { |
270 | valmr |= TDES_MR_TDESMOD_DES; |
271 | } |
272 | |
273 | valmr |= dd->flags & TDES_FLAGS_MODE_MASK; |
274 | |
275 | atmel_tdes_write(dd, TDES_MR, value: valmr); |
276 | |
277 | atmel_tdes_write_n(dd, TDES_KEY1W1R, value: dd->ctx->key, |
278 | count: dd->ctx->keylen >> 2); |
279 | |
280 | if (dd->req->iv && (valmr & TDES_MR_OPMOD_MASK) != TDES_MR_OPMOD_ECB) |
281 | atmel_tdes_write_n(dd, TDES_IV1R, value: (void *)dd->req->iv, count: 2); |
282 | |
283 | return 0; |
284 | } |
285 | |
286 | static int atmel_tdes_crypt_pdc_stop(struct atmel_tdes_dev *dd) |
287 | { |
288 | int err = 0; |
289 | size_t count; |
290 | |
291 | atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTDIS|TDES_PTCR_RXTDIS); |
292 | |
293 | if (dd->flags & TDES_FLAGS_FAST) { |
294 | dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE); |
295 | dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); |
296 | } else { |
297 | dma_sync_single_for_device(dev: dd->dev, addr: dd->dma_addr_out, |
298 | size: dd->dma_size, dir: DMA_FROM_DEVICE); |
299 | |
300 | /* copy data */ |
301 | count = atmel_tdes_sg_copy(sg: &dd->out_sg, offset: &dd->out_offset, |
302 | buf: dd->buf_out, buflen: dd->buflen, total: dd->dma_size, out: 1); |
303 | if (count != dd->dma_size) { |
304 | err = -EINVAL; |
305 | dev_dbg(dd->dev, "not all data converted: %zu\n" , count); |
306 | } |
307 | } |
308 | |
309 | return err; |
310 | } |
311 | |
312 | static int atmel_tdes_buff_init(struct atmel_tdes_dev *dd) |
313 | { |
314 | int err = -ENOMEM; |
315 | |
316 | dd->buf_in = (void *)__get_free_pages(GFP_KERNEL, order: 0); |
317 | dd->buf_out = (void *)__get_free_pages(GFP_KERNEL, order: 0); |
318 | dd->buflen = PAGE_SIZE; |
319 | dd->buflen &= ~(DES_BLOCK_SIZE - 1); |
320 | |
321 | if (!dd->buf_in || !dd->buf_out) { |
322 | dev_dbg(dd->dev, "unable to alloc pages.\n" ); |
323 | goto err_alloc; |
324 | } |
325 | |
326 | /* MAP here */ |
327 | dd->dma_addr_in = dma_map_single(dd->dev, dd->buf_in, |
328 | dd->buflen, DMA_TO_DEVICE); |
329 | err = dma_mapping_error(dev: dd->dev, dma_addr: dd->dma_addr_in); |
330 | if (err) { |
331 | dev_dbg(dd->dev, "dma %zd bytes error\n" , dd->buflen); |
332 | goto err_map_in; |
333 | } |
334 | |
335 | dd->dma_addr_out = dma_map_single(dd->dev, dd->buf_out, |
336 | dd->buflen, DMA_FROM_DEVICE); |
337 | err = dma_mapping_error(dev: dd->dev, dma_addr: dd->dma_addr_out); |
338 | if (err) { |
339 | dev_dbg(dd->dev, "dma %zd bytes error\n" , dd->buflen); |
340 | goto err_map_out; |
341 | } |
342 | |
343 | return 0; |
344 | |
345 | err_map_out: |
346 | dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, |
347 | DMA_TO_DEVICE); |
348 | err_map_in: |
349 | err_alloc: |
350 | free_page((unsigned long)dd->buf_out); |
351 | free_page((unsigned long)dd->buf_in); |
352 | return err; |
353 | } |
354 | |
355 | static void atmel_tdes_buff_cleanup(struct atmel_tdes_dev *dd) |
356 | { |
357 | dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen, |
358 | DMA_FROM_DEVICE); |
359 | dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, |
360 | DMA_TO_DEVICE); |
361 | free_page((unsigned long)dd->buf_out); |
362 | free_page((unsigned long)dd->buf_in); |
363 | } |
364 | |
365 | static int atmel_tdes_crypt_pdc(struct atmel_tdes_dev *dd, |
366 | dma_addr_t dma_addr_in, |
367 | dma_addr_t dma_addr_out, int length) |
368 | { |
369 | int len32; |
370 | |
371 | dd->dma_size = length; |
372 | |
373 | if (!(dd->flags & TDES_FLAGS_FAST)) { |
374 | dma_sync_single_for_device(dev: dd->dev, addr: dma_addr_in, size: length, |
375 | dir: DMA_TO_DEVICE); |
376 | } |
377 | |
378 | len32 = DIV_ROUND_UP(length, sizeof(u32)); |
379 | |
380 | atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTDIS|TDES_PTCR_RXTDIS); |
381 | atmel_tdes_write(dd, TDES_TPR, value: dma_addr_in); |
382 | atmel_tdes_write(dd, TDES_TCR, value: len32); |
383 | atmel_tdes_write(dd, TDES_RPR, value: dma_addr_out); |
384 | atmel_tdes_write(dd, TDES_RCR, value: len32); |
385 | |
386 | /* Enable Interrupt */ |
387 | atmel_tdes_write(dd, TDES_IER, TDES_INT_ENDRX); |
388 | |
389 | /* Start DMA transfer */ |
390 | atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTEN | TDES_PTCR_RXTEN); |
391 | |
392 | return 0; |
393 | } |
394 | |
395 | static int atmel_tdes_crypt_dma(struct atmel_tdes_dev *dd, |
396 | dma_addr_t dma_addr_in, |
397 | dma_addr_t dma_addr_out, int length) |
398 | { |
399 | struct scatterlist sg[2]; |
400 | struct dma_async_tx_descriptor *in_desc, *out_desc; |
401 | enum dma_slave_buswidth addr_width; |
402 | |
403 | dd->dma_size = length; |
404 | |
405 | if (!(dd->flags & TDES_FLAGS_FAST)) { |
406 | dma_sync_single_for_device(dev: dd->dev, addr: dma_addr_in, size: length, |
407 | dir: DMA_TO_DEVICE); |
408 | } |
409 | |
410 | addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; |
411 | |
412 | dd->dma_lch_in.dma_conf.dst_addr_width = addr_width; |
413 | dd->dma_lch_out.dma_conf.src_addr_width = addr_width; |
414 | |
415 | dmaengine_slave_config(chan: dd->dma_lch_in.chan, config: &dd->dma_lch_in.dma_conf); |
416 | dmaengine_slave_config(chan: dd->dma_lch_out.chan, config: &dd->dma_lch_out.dma_conf); |
417 | |
418 | dd->flags |= TDES_FLAGS_DMA; |
419 | |
420 | sg_init_table(&sg[0], 1); |
421 | sg_dma_address(&sg[0]) = dma_addr_in; |
422 | sg_dma_len(&sg[0]) = length; |
423 | |
424 | sg_init_table(&sg[1], 1); |
425 | sg_dma_address(&sg[1]) = dma_addr_out; |
426 | sg_dma_len(&sg[1]) = length; |
427 | |
428 | in_desc = dmaengine_prep_slave_sg(chan: dd->dma_lch_in.chan, sgl: &sg[0], |
429 | sg_len: 1, dir: DMA_MEM_TO_DEV, |
430 | flags: DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
431 | if (!in_desc) |
432 | return -EINVAL; |
433 | |
434 | out_desc = dmaengine_prep_slave_sg(chan: dd->dma_lch_out.chan, sgl: &sg[1], |
435 | sg_len: 1, dir: DMA_DEV_TO_MEM, |
436 | flags: DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
437 | if (!out_desc) |
438 | return -EINVAL; |
439 | |
440 | out_desc->callback = atmel_tdes_dma_callback; |
441 | out_desc->callback_param = dd; |
442 | |
443 | dmaengine_submit(desc: out_desc); |
444 | dma_async_issue_pending(chan: dd->dma_lch_out.chan); |
445 | |
446 | dmaengine_submit(desc: in_desc); |
447 | dma_async_issue_pending(chan: dd->dma_lch_in.chan); |
448 | |
449 | return 0; |
450 | } |
451 | |
452 | static int atmel_tdes_crypt_start(struct atmel_tdes_dev *dd) |
453 | { |
454 | int err, fast = 0, in, out; |
455 | size_t count; |
456 | dma_addr_t addr_in, addr_out; |
457 | |
458 | if ((!dd->in_offset) && (!dd->out_offset)) { |
459 | /* check for alignment */ |
460 | in = IS_ALIGNED((u32)dd->in_sg->offset, sizeof(u32)) && |
461 | IS_ALIGNED(dd->in_sg->length, dd->ctx->block_size); |
462 | out = IS_ALIGNED((u32)dd->out_sg->offset, sizeof(u32)) && |
463 | IS_ALIGNED(dd->out_sg->length, dd->ctx->block_size); |
464 | fast = in && out; |
465 | |
466 | if (sg_dma_len(dd->in_sg) != sg_dma_len(dd->out_sg)) |
467 | fast = 0; |
468 | } |
469 | |
470 | |
471 | if (fast) { |
472 | count = min_t(size_t, dd->total, sg_dma_len(dd->in_sg)); |
473 | count = min_t(size_t, count, sg_dma_len(dd->out_sg)); |
474 | |
475 | err = dma_map_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); |
476 | if (!err) { |
477 | dev_dbg(dd->dev, "dma_map_sg() error\n" ); |
478 | return -EINVAL; |
479 | } |
480 | |
481 | err = dma_map_sg(dd->dev, dd->out_sg, 1, |
482 | DMA_FROM_DEVICE); |
483 | if (!err) { |
484 | dev_dbg(dd->dev, "dma_map_sg() error\n" ); |
485 | dma_unmap_sg(dd->dev, dd->in_sg, 1, |
486 | DMA_TO_DEVICE); |
487 | return -EINVAL; |
488 | } |
489 | |
490 | addr_in = sg_dma_address(dd->in_sg); |
491 | addr_out = sg_dma_address(dd->out_sg); |
492 | |
493 | dd->flags |= TDES_FLAGS_FAST; |
494 | |
495 | } else { |
496 | /* use cache buffers */ |
497 | count = atmel_tdes_sg_copy(sg: &dd->in_sg, offset: &dd->in_offset, |
498 | buf: dd->buf_in, buflen: dd->buflen, total: dd->total, out: 0); |
499 | |
500 | addr_in = dd->dma_addr_in; |
501 | addr_out = dd->dma_addr_out; |
502 | |
503 | dd->flags &= ~TDES_FLAGS_FAST; |
504 | } |
505 | |
506 | dd->total -= count; |
507 | |
508 | if (dd->caps.has_dma) |
509 | err = atmel_tdes_crypt_dma(dd, dma_addr_in: addr_in, dma_addr_out: addr_out, length: count); |
510 | else |
511 | err = atmel_tdes_crypt_pdc(dd, dma_addr_in: addr_in, dma_addr_out: addr_out, length: count); |
512 | |
513 | if (err && (dd->flags & TDES_FLAGS_FAST)) { |
514 | dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); |
515 | dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE); |
516 | } |
517 | |
518 | return err; |
519 | } |
520 | |
521 | static void |
522 | atmel_tdes_set_iv_as_last_ciphertext_block(struct atmel_tdes_dev *dd) |
523 | { |
524 | struct skcipher_request *req = dd->req; |
525 | struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(req); |
526 | struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); |
527 | unsigned int ivsize = crypto_skcipher_ivsize(tfm: skcipher); |
528 | |
529 | if (req->cryptlen < ivsize) |
530 | return; |
531 | |
532 | if (rctx->mode & TDES_FLAGS_ENCRYPT) |
533 | scatterwalk_map_and_copy(buf: req->iv, sg: req->dst, |
534 | start: req->cryptlen - ivsize, nbytes: ivsize, out: 0); |
535 | else |
536 | memcpy(req->iv, rctx->lastc, ivsize); |
537 | |
538 | } |
539 | |
540 | static void atmel_tdes_finish_req(struct atmel_tdes_dev *dd, int err) |
541 | { |
542 | struct skcipher_request *req = dd->req; |
543 | struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(req); |
544 | |
545 | clk_disable_unprepare(clk: dd->iclk); |
546 | |
547 | dd->flags &= ~TDES_FLAGS_BUSY; |
548 | |
549 | if (!err && (rctx->mode & TDES_FLAGS_OPMODE_MASK) != TDES_FLAGS_ECB) |
550 | atmel_tdes_set_iv_as_last_ciphertext_block(dd); |
551 | |
552 | skcipher_request_complete(req, err); |
553 | } |
554 | |
555 | static int atmel_tdes_handle_queue(struct atmel_tdes_dev *dd, |
556 | struct skcipher_request *req) |
557 | { |
558 | struct crypto_async_request *async_req, *backlog; |
559 | struct atmel_tdes_ctx *ctx; |
560 | struct atmel_tdes_reqctx *rctx; |
561 | unsigned long flags; |
562 | int err, ret = 0; |
563 | |
564 | spin_lock_irqsave(&dd->lock, flags); |
565 | if (req) |
566 | ret = crypto_enqueue_request(queue: &dd->queue, request: &req->base); |
567 | if (dd->flags & TDES_FLAGS_BUSY) { |
568 | spin_unlock_irqrestore(lock: &dd->lock, flags); |
569 | return ret; |
570 | } |
571 | backlog = crypto_get_backlog(queue: &dd->queue); |
572 | async_req = crypto_dequeue_request(queue: &dd->queue); |
573 | if (async_req) |
574 | dd->flags |= TDES_FLAGS_BUSY; |
575 | spin_unlock_irqrestore(lock: &dd->lock, flags); |
576 | |
577 | if (!async_req) |
578 | return ret; |
579 | |
580 | if (backlog) |
581 | crypto_request_complete(req: backlog, err: -EINPROGRESS); |
582 | |
583 | req = skcipher_request_cast(req: async_req); |
584 | |
585 | /* assign new request to device */ |
586 | dd->req = req; |
587 | dd->total = req->cryptlen; |
588 | dd->in_offset = 0; |
589 | dd->in_sg = req->src; |
590 | dd->out_offset = 0; |
591 | dd->out_sg = req->dst; |
592 | |
593 | rctx = skcipher_request_ctx(req); |
594 | ctx = crypto_skcipher_ctx(tfm: crypto_skcipher_reqtfm(req)); |
595 | rctx->mode &= TDES_FLAGS_MODE_MASK; |
596 | dd->flags = (dd->flags & ~TDES_FLAGS_MODE_MASK) | rctx->mode; |
597 | dd->ctx = ctx; |
598 | |
599 | err = atmel_tdes_write_ctrl(dd); |
600 | if (!err) |
601 | err = atmel_tdes_crypt_start(dd); |
602 | if (err) { |
603 | /* des_task will not finish it, so do it here */ |
604 | atmel_tdes_finish_req(dd, err); |
605 | tasklet_schedule(t: &dd->queue_task); |
606 | } |
607 | |
608 | return ret; |
609 | } |
610 | |
611 | static int atmel_tdes_crypt_dma_stop(struct atmel_tdes_dev *dd) |
612 | { |
613 | int err = -EINVAL; |
614 | size_t count; |
615 | |
616 | if (dd->flags & TDES_FLAGS_DMA) { |
617 | err = 0; |
618 | if (dd->flags & TDES_FLAGS_FAST) { |
619 | dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE); |
620 | dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); |
621 | } else { |
622 | dma_sync_single_for_device(dev: dd->dev, addr: dd->dma_addr_out, |
623 | size: dd->dma_size, dir: DMA_FROM_DEVICE); |
624 | |
625 | /* copy data */ |
626 | count = atmel_tdes_sg_copy(sg: &dd->out_sg, offset: &dd->out_offset, |
627 | buf: dd->buf_out, buflen: dd->buflen, total: dd->dma_size, out: 1); |
628 | if (count != dd->dma_size) { |
629 | err = -EINVAL; |
630 | dev_dbg(dd->dev, "not all data converted: %zu\n" , count); |
631 | } |
632 | } |
633 | } |
634 | return err; |
635 | } |
636 | |
637 | static int atmel_tdes_crypt(struct skcipher_request *req, unsigned long mode) |
638 | { |
639 | struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); |
640 | struct atmel_tdes_ctx *ctx = crypto_skcipher_ctx(tfm: skcipher); |
641 | struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(req); |
642 | struct device *dev = ctx->dd->dev; |
643 | |
644 | if (!req->cryptlen) |
645 | return 0; |
646 | |
647 | if (!IS_ALIGNED(req->cryptlen, DES_BLOCK_SIZE)) { |
648 | dev_dbg(dev, "request size is not exact amount of DES blocks\n" ); |
649 | return -EINVAL; |
650 | } |
651 | ctx->block_size = DES_BLOCK_SIZE; |
652 | |
653 | rctx->mode = mode; |
654 | |
655 | if ((mode & TDES_FLAGS_OPMODE_MASK) != TDES_FLAGS_ECB && |
656 | !(mode & TDES_FLAGS_ENCRYPT)) { |
657 | unsigned int ivsize = crypto_skcipher_ivsize(tfm: skcipher); |
658 | |
659 | if (req->cryptlen >= ivsize) |
660 | scatterwalk_map_and_copy(buf: rctx->lastc, sg: req->src, |
661 | start: req->cryptlen - ivsize, |
662 | nbytes: ivsize, out: 0); |
663 | } |
664 | |
665 | return atmel_tdes_handle_queue(dd: ctx->dd, req); |
666 | } |
667 | |
668 | static int atmel_tdes_dma_init(struct atmel_tdes_dev *dd) |
669 | { |
670 | int ret; |
671 | |
672 | /* Try to grab 2 DMA channels */ |
673 | dd->dma_lch_in.chan = dma_request_chan(dev: dd->dev, name: "tx" ); |
674 | if (IS_ERR(ptr: dd->dma_lch_in.chan)) { |
675 | ret = PTR_ERR(ptr: dd->dma_lch_in.chan); |
676 | goto err_dma_in; |
677 | } |
678 | |
679 | dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base + |
680 | TDES_IDATA1R; |
681 | dd->dma_lch_in.dma_conf.src_maxburst = 1; |
682 | dd->dma_lch_in.dma_conf.src_addr_width = |
683 | DMA_SLAVE_BUSWIDTH_4_BYTES; |
684 | dd->dma_lch_in.dma_conf.dst_maxburst = 1; |
685 | dd->dma_lch_in.dma_conf.dst_addr_width = |
686 | DMA_SLAVE_BUSWIDTH_4_BYTES; |
687 | dd->dma_lch_in.dma_conf.device_fc = false; |
688 | |
689 | dd->dma_lch_out.chan = dma_request_chan(dev: dd->dev, name: "rx" ); |
690 | if (IS_ERR(ptr: dd->dma_lch_out.chan)) { |
691 | ret = PTR_ERR(ptr: dd->dma_lch_out.chan); |
692 | goto err_dma_out; |
693 | } |
694 | |
695 | dd->dma_lch_out.dma_conf.src_addr = dd->phys_base + |
696 | TDES_ODATA1R; |
697 | dd->dma_lch_out.dma_conf.src_maxburst = 1; |
698 | dd->dma_lch_out.dma_conf.src_addr_width = |
699 | DMA_SLAVE_BUSWIDTH_4_BYTES; |
700 | dd->dma_lch_out.dma_conf.dst_maxburst = 1; |
701 | dd->dma_lch_out.dma_conf.dst_addr_width = |
702 | DMA_SLAVE_BUSWIDTH_4_BYTES; |
703 | dd->dma_lch_out.dma_conf.device_fc = false; |
704 | |
705 | return 0; |
706 | |
707 | err_dma_out: |
708 | dma_release_channel(chan: dd->dma_lch_in.chan); |
709 | err_dma_in: |
710 | dev_err(dd->dev, "no DMA channel available\n" ); |
711 | return ret; |
712 | } |
713 | |
714 | static void atmel_tdes_dma_cleanup(struct atmel_tdes_dev *dd) |
715 | { |
716 | dma_release_channel(chan: dd->dma_lch_in.chan); |
717 | dma_release_channel(chan: dd->dma_lch_out.chan); |
718 | } |
719 | |
720 | static int atmel_des_setkey(struct crypto_skcipher *tfm, const u8 *key, |
721 | unsigned int keylen) |
722 | { |
723 | struct atmel_tdes_ctx *ctx = crypto_skcipher_ctx(tfm); |
724 | int err; |
725 | |
726 | err = verify_skcipher_des_key(tfm, key); |
727 | if (err) |
728 | return err; |
729 | |
730 | memcpy(ctx->key, key, keylen); |
731 | ctx->keylen = keylen; |
732 | |
733 | return 0; |
734 | } |
735 | |
736 | static int atmel_tdes_setkey(struct crypto_skcipher *tfm, const u8 *key, |
737 | unsigned int keylen) |
738 | { |
739 | struct atmel_tdes_ctx *ctx = crypto_skcipher_ctx(tfm); |
740 | int err; |
741 | |
742 | err = verify_skcipher_des3_key(tfm, key); |
743 | if (err) |
744 | return err; |
745 | |
746 | memcpy(ctx->key, key, keylen); |
747 | ctx->keylen = keylen; |
748 | |
749 | return 0; |
750 | } |
751 | |
752 | static int atmel_tdes_ecb_encrypt(struct skcipher_request *req) |
753 | { |
754 | return atmel_tdes_crypt(req, TDES_FLAGS_ECB | TDES_FLAGS_ENCRYPT); |
755 | } |
756 | |
757 | static int atmel_tdes_ecb_decrypt(struct skcipher_request *req) |
758 | { |
759 | return atmel_tdes_crypt(req, TDES_FLAGS_ECB); |
760 | } |
761 | |
762 | static int atmel_tdes_cbc_encrypt(struct skcipher_request *req) |
763 | { |
764 | return atmel_tdes_crypt(req, TDES_FLAGS_CBC | TDES_FLAGS_ENCRYPT); |
765 | } |
766 | |
767 | static int atmel_tdes_cbc_decrypt(struct skcipher_request *req) |
768 | { |
769 | return atmel_tdes_crypt(req, TDES_FLAGS_CBC); |
770 | } |
771 | |
772 | static int atmel_tdes_init_tfm(struct crypto_skcipher *tfm) |
773 | { |
774 | struct atmel_tdes_ctx *ctx = crypto_skcipher_ctx(tfm); |
775 | |
776 | ctx->dd = atmel_tdes_dev_alloc(); |
777 | if (!ctx->dd) |
778 | return -ENODEV; |
779 | |
780 | crypto_skcipher_set_reqsize(skcipher: tfm, reqsize: sizeof(struct atmel_tdes_reqctx)); |
781 | |
782 | return 0; |
783 | } |
784 | |
785 | static void atmel_tdes_skcipher_alg_init(struct skcipher_alg *alg) |
786 | { |
787 | alg->base.cra_priority = ATMEL_TDES_PRIORITY; |
788 | alg->base.cra_flags = CRYPTO_ALG_ASYNC; |
789 | alg->base.cra_ctxsize = sizeof(struct atmel_tdes_ctx); |
790 | alg->base.cra_module = THIS_MODULE; |
791 | |
792 | alg->init = atmel_tdes_init_tfm; |
793 | } |
794 | |
795 | static struct skcipher_alg tdes_algs[] = { |
796 | { |
797 | .base.cra_name = "ecb(des)" , |
798 | .base.cra_driver_name = "atmel-ecb-des" , |
799 | .base.cra_blocksize = DES_BLOCK_SIZE, |
800 | .base.cra_alignmask = 0x7, |
801 | |
802 | .min_keysize = DES_KEY_SIZE, |
803 | .max_keysize = DES_KEY_SIZE, |
804 | .setkey = atmel_des_setkey, |
805 | .encrypt = atmel_tdes_ecb_encrypt, |
806 | .decrypt = atmel_tdes_ecb_decrypt, |
807 | }, |
808 | { |
809 | .base.cra_name = "cbc(des)" , |
810 | .base.cra_driver_name = "atmel-cbc-des" , |
811 | .base.cra_blocksize = DES_BLOCK_SIZE, |
812 | .base.cra_alignmask = 0x7, |
813 | |
814 | .min_keysize = DES_KEY_SIZE, |
815 | .max_keysize = DES_KEY_SIZE, |
816 | .ivsize = DES_BLOCK_SIZE, |
817 | .setkey = atmel_des_setkey, |
818 | .encrypt = atmel_tdes_cbc_encrypt, |
819 | .decrypt = atmel_tdes_cbc_decrypt, |
820 | }, |
821 | { |
822 | .base.cra_name = "ecb(des3_ede)" , |
823 | .base.cra_driver_name = "atmel-ecb-tdes" , |
824 | .base.cra_blocksize = DES_BLOCK_SIZE, |
825 | .base.cra_alignmask = 0x7, |
826 | |
827 | .min_keysize = DES3_EDE_KEY_SIZE, |
828 | .max_keysize = DES3_EDE_KEY_SIZE, |
829 | .setkey = atmel_tdes_setkey, |
830 | .encrypt = atmel_tdes_ecb_encrypt, |
831 | .decrypt = atmel_tdes_ecb_decrypt, |
832 | }, |
833 | { |
834 | .base.cra_name = "cbc(des3_ede)" , |
835 | .base.cra_driver_name = "atmel-cbc-tdes" , |
836 | .base.cra_blocksize = DES_BLOCK_SIZE, |
837 | .base.cra_alignmask = 0x7, |
838 | |
839 | .min_keysize = DES3_EDE_KEY_SIZE, |
840 | .max_keysize = DES3_EDE_KEY_SIZE, |
841 | .setkey = atmel_tdes_setkey, |
842 | .encrypt = atmel_tdes_cbc_encrypt, |
843 | .decrypt = atmel_tdes_cbc_decrypt, |
844 | .ivsize = DES_BLOCK_SIZE, |
845 | }, |
846 | }; |
847 | |
848 | static void atmel_tdes_queue_task(unsigned long data) |
849 | { |
850 | struct atmel_tdes_dev *dd = (struct atmel_tdes_dev *)data; |
851 | |
852 | atmel_tdes_handle_queue(dd, NULL); |
853 | } |
854 | |
855 | static void atmel_tdes_done_task(unsigned long data) |
856 | { |
857 | struct atmel_tdes_dev *dd = (struct atmel_tdes_dev *) data; |
858 | int err; |
859 | |
860 | if (!(dd->flags & TDES_FLAGS_DMA)) |
861 | err = atmel_tdes_crypt_pdc_stop(dd); |
862 | else |
863 | err = atmel_tdes_crypt_dma_stop(dd); |
864 | |
865 | if (dd->total && !err) { |
866 | if (dd->flags & TDES_FLAGS_FAST) { |
867 | dd->in_sg = sg_next(dd->in_sg); |
868 | dd->out_sg = sg_next(dd->out_sg); |
869 | if (!dd->in_sg || !dd->out_sg) |
870 | err = -EINVAL; |
871 | } |
872 | if (!err) |
873 | err = atmel_tdes_crypt_start(dd); |
874 | if (!err) |
875 | return; /* DMA started. Not fininishing. */ |
876 | } |
877 | |
878 | atmel_tdes_finish_req(dd, err); |
879 | atmel_tdes_handle_queue(dd, NULL); |
880 | } |
881 | |
882 | static irqreturn_t atmel_tdes_irq(int irq, void *dev_id) |
883 | { |
884 | struct atmel_tdes_dev *tdes_dd = dev_id; |
885 | u32 reg; |
886 | |
887 | reg = atmel_tdes_read(dd: tdes_dd, TDES_ISR); |
888 | if (reg & atmel_tdes_read(dd: tdes_dd, TDES_IMR)) { |
889 | atmel_tdes_write(dd: tdes_dd, TDES_IDR, value: reg); |
890 | if (TDES_FLAGS_BUSY & tdes_dd->flags) |
891 | tasklet_schedule(t: &tdes_dd->done_task); |
892 | else |
893 | dev_warn(tdes_dd->dev, "TDES interrupt when no active requests.\n" ); |
894 | return IRQ_HANDLED; |
895 | } |
896 | |
897 | return IRQ_NONE; |
898 | } |
899 | |
900 | static void atmel_tdes_unregister_algs(struct atmel_tdes_dev *dd) |
901 | { |
902 | int i; |
903 | |
904 | for (i = 0; i < ARRAY_SIZE(tdes_algs); i++) |
905 | crypto_unregister_skcipher(alg: &tdes_algs[i]); |
906 | } |
907 | |
908 | static int atmel_tdes_register_algs(struct atmel_tdes_dev *dd) |
909 | { |
910 | int err, i, j; |
911 | |
912 | for (i = 0; i < ARRAY_SIZE(tdes_algs); i++) { |
913 | atmel_tdes_skcipher_alg_init(alg: &tdes_algs[i]); |
914 | |
915 | err = crypto_register_skcipher(alg: &tdes_algs[i]); |
916 | if (err) |
917 | goto err_tdes_algs; |
918 | } |
919 | |
920 | return 0; |
921 | |
922 | err_tdes_algs: |
923 | for (j = 0; j < i; j++) |
924 | crypto_unregister_skcipher(alg: &tdes_algs[j]); |
925 | |
926 | return err; |
927 | } |
928 | |
929 | static void atmel_tdes_get_cap(struct atmel_tdes_dev *dd) |
930 | { |
931 | |
932 | dd->caps.has_dma = 0; |
933 | |
934 | /* keep only major version number */ |
935 | switch (dd->hw_version & 0xf00) { |
936 | case 0x800: |
937 | case 0x700: |
938 | dd->caps.has_dma = 1; |
939 | break; |
940 | case 0x600: |
941 | break; |
942 | default: |
943 | dev_warn(dd->dev, |
944 | "Unmanaged tdes version, set minimum capabilities\n" ); |
945 | break; |
946 | } |
947 | } |
948 | |
949 | static const struct of_device_id atmel_tdes_dt_ids[] = { |
950 | { .compatible = "atmel,at91sam9g46-tdes" }, |
951 | { /* sentinel */ } |
952 | }; |
953 | MODULE_DEVICE_TABLE(of, atmel_tdes_dt_ids); |
954 | |
955 | static int atmel_tdes_probe(struct platform_device *pdev) |
956 | { |
957 | struct atmel_tdes_dev *tdes_dd; |
958 | struct device *dev = &pdev->dev; |
959 | struct resource *tdes_res; |
960 | int err; |
961 | |
962 | tdes_dd = devm_kmalloc(dev: &pdev->dev, size: sizeof(*tdes_dd), GFP_KERNEL); |
963 | if (!tdes_dd) |
964 | return -ENOMEM; |
965 | |
966 | tdes_dd->dev = dev; |
967 | |
968 | platform_set_drvdata(pdev, data: tdes_dd); |
969 | |
970 | INIT_LIST_HEAD(list: &tdes_dd->list); |
971 | spin_lock_init(&tdes_dd->lock); |
972 | |
973 | tasklet_init(t: &tdes_dd->done_task, func: atmel_tdes_done_task, |
974 | data: (unsigned long)tdes_dd); |
975 | tasklet_init(t: &tdes_dd->queue_task, func: atmel_tdes_queue_task, |
976 | data: (unsigned long)tdes_dd); |
977 | |
978 | crypto_init_queue(queue: &tdes_dd->queue, ATMEL_TDES_QUEUE_LENGTH); |
979 | |
980 | tdes_dd->io_base = devm_platform_get_and_ioremap_resource(pdev, index: 0, res: &tdes_res); |
981 | if (IS_ERR(ptr: tdes_dd->io_base)) { |
982 | err = PTR_ERR(ptr: tdes_dd->io_base); |
983 | goto err_tasklet_kill; |
984 | } |
985 | tdes_dd->phys_base = tdes_res->start; |
986 | |
987 | /* Get the IRQ */ |
988 | tdes_dd->irq = platform_get_irq(pdev, 0); |
989 | if (tdes_dd->irq < 0) { |
990 | err = tdes_dd->irq; |
991 | goto err_tasklet_kill; |
992 | } |
993 | |
994 | err = devm_request_irq(dev: &pdev->dev, irq: tdes_dd->irq, handler: atmel_tdes_irq, |
995 | IRQF_SHARED, devname: "atmel-tdes" , dev_id: tdes_dd); |
996 | if (err) { |
997 | dev_err(dev, "unable to request tdes irq.\n" ); |
998 | goto err_tasklet_kill; |
999 | } |
1000 | |
1001 | /* Initializing the clock */ |
1002 | tdes_dd->iclk = devm_clk_get(dev: &pdev->dev, id: "tdes_clk" ); |
1003 | if (IS_ERR(ptr: tdes_dd->iclk)) { |
1004 | dev_err(dev, "clock initialization failed.\n" ); |
1005 | err = PTR_ERR(ptr: tdes_dd->iclk); |
1006 | goto err_tasklet_kill; |
1007 | } |
1008 | |
1009 | err = atmel_tdes_hw_version_init(dd: tdes_dd); |
1010 | if (err) |
1011 | goto err_tasklet_kill; |
1012 | |
1013 | atmel_tdes_get_cap(dd: tdes_dd); |
1014 | |
1015 | err = atmel_tdes_buff_init(dd: tdes_dd); |
1016 | if (err) |
1017 | goto err_tasklet_kill; |
1018 | |
1019 | if (tdes_dd->caps.has_dma) { |
1020 | err = atmel_tdes_dma_init(dd: tdes_dd); |
1021 | if (err) |
1022 | goto err_buff_cleanup; |
1023 | |
1024 | dev_info(dev, "using %s, %s for DMA transfers\n" , |
1025 | dma_chan_name(tdes_dd->dma_lch_in.chan), |
1026 | dma_chan_name(tdes_dd->dma_lch_out.chan)); |
1027 | } |
1028 | |
1029 | spin_lock(lock: &atmel_tdes.lock); |
1030 | list_add_tail(new: &tdes_dd->list, head: &atmel_tdes.dev_list); |
1031 | spin_unlock(lock: &atmel_tdes.lock); |
1032 | |
1033 | err = atmel_tdes_register_algs(dd: tdes_dd); |
1034 | if (err) |
1035 | goto err_algs; |
1036 | |
1037 | dev_info(dev, "Atmel DES/TDES\n" ); |
1038 | |
1039 | return 0; |
1040 | |
1041 | err_algs: |
1042 | spin_lock(lock: &atmel_tdes.lock); |
1043 | list_del(entry: &tdes_dd->list); |
1044 | spin_unlock(lock: &atmel_tdes.lock); |
1045 | if (tdes_dd->caps.has_dma) |
1046 | atmel_tdes_dma_cleanup(dd: tdes_dd); |
1047 | err_buff_cleanup: |
1048 | atmel_tdes_buff_cleanup(dd: tdes_dd); |
1049 | err_tasklet_kill: |
1050 | tasklet_kill(t: &tdes_dd->done_task); |
1051 | tasklet_kill(t: &tdes_dd->queue_task); |
1052 | |
1053 | return err; |
1054 | } |
1055 | |
1056 | static void atmel_tdes_remove(struct platform_device *pdev) |
1057 | { |
1058 | struct atmel_tdes_dev *tdes_dd = platform_get_drvdata(pdev); |
1059 | |
1060 | spin_lock(lock: &atmel_tdes.lock); |
1061 | list_del(entry: &tdes_dd->list); |
1062 | spin_unlock(lock: &atmel_tdes.lock); |
1063 | |
1064 | atmel_tdes_unregister_algs(dd: tdes_dd); |
1065 | |
1066 | tasklet_kill(t: &tdes_dd->done_task); |
1067 | tasklet_kill(t: &tdes_dd->queue_task); |
1068 | |
1069 | if (tdes_dd->caps.has_dma) |
1070 | atmel_tdes_dma_cleanup(dd: tdes_dd); |
1071 | |
1072 | atmel_tdes_buff_cleanup(dd: tdes_dd); |
1073 | } |
1074 | |
1075 | static struct platform_driver atmel_tdes_driver = { |
1076 | .probe = atmel_tdes_probe, |
1077 | .remove_new = atmel_tdes_remove, |
1078 | .driver = { |
1079 | .name = "atmel_tdes" , |
1080 | .of_match_table = atmel_tdes_dt_ids, |
1081 | }, |
1082 | }; |
1083 | |
1084 | module_platform_driver(atmel_tdes_driver); |
1085 | |
1086 | MODULE_DESCRIPTION("Atmel DES/TDES hw acceleration support." ); |
1087 | MODULE_LICENSE("GPL v2" ); |
1088 | MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique" ); |
1089 | |