1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Support for Marvell's Cryptographic Engine and Security Accelerator (CESA) |
4 | * that can be found on the following platform: Orion, Kirkwood, Armada. This |
5 | * driver supports the TDMA engine on platforms on which it is available. |
6 | * |
7 | * Author: Boris Brezillon <boris.brezillon@free-electrons.com> |
8 | * Author: Arnaud Ebalard <arno@natisbad.org> |
9 | * |
10 | * This work is based on an initial version written by |
11 | * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc > |
12 | */ |
13 | |
14 | #include <linux/delay.h> |
15 | #include <linux/dma-mapping.h> |
16 | #include <linux/genalloc.h> |
17 | #include <linux/interrupt.h> |
18 | #include <linux/io.h> |
19 | #include <linux/kthread.h> |
20 | #include <linux/mbus.h> |
21 | #include <linux/platform_device.h> |
22 | #include <linux/scatterlist.h> |
23 | #include <linux/slab.h> |
24 | #include <linux/module.h> |
25 | #include <linux/clk.h> |
26 | #include <linux/of.h> |
27 | #include <linux/of_platform.h> |
28 | #include <linux/of_irq.h> |
29 | |
30 | #include "cesa.h" |
31 | |
32 | /* Limit of the crypto queue before reaching the backlog */ |
33 | #define CESA_CRYPTO_DEFAULT_MAX_QLEN 128 |
34 | |
35 | struct mv_cesa_dev *cesa_dev; |
36 | |
37 | struct crypto_async_request * |
38 | mv_cesa_dequeue_req_locked(struct mv_cesa_engine *engine, |
39 | struct crypto_async_request **backlog) |
40 | { |
41 | struct crypto_async_request *req; |
42 | |
43 | *backlog = crypto_get_backlog(queue: &engine->queue); |
44 | req = crypto_dequeue_request(queue: &engine->queue); |
45 | |
46 | if (!req) |
47 | return NULL; |
48 | |
49 | return req; |
50 | } |
51 | |
52 | static void mv_cesa_rearm_engine(struct mv_cesa_engine *engine) |
53 | { |
54 | struct crypto_async_request *req = NULL, *backlog = NULL; |
55 | struct mv_cesa_ctx *ctx; |
56 | |
57 | |
58 | spin_lock_bh(lock: &engine->lock); |
59 | if (!engine->req) { |
60 | req = mv_cesa_dequeue_req_locked(engine, backlog: &backlog); |
61 | engine->req = req; |
62 | } |
63 | spin_unlock_bh(lock: &engine->lock); |
64 | |
65 | if (!req) |
66 | return; |
67 | |
68 | if (backlog) |
69 | crypto_request_complete(req: backlog, err: -EINPROGRESS); |
70 | |
71 | ctx = crypto_tfm_ctx(tfm: req->tfm); |
72 | ctx->ops->step(req); |
73 | } |
74 | |
75 | static int mv_cesa_std_process(struct mv_cesa_engine *engine, u32 status) |
76 | { |
77 | struct crypto_async_request *req; |
78 | struct mv_cesa_ctx *ctx; |
79 | int res; |
80 | |
81 | req = engine->req; |
82 | ctx = crypto_tfm_ctx(tfm: req->tfm); |
83 | res = ctx->ops->process(req, status); |
84 | |
85 | if (res == 0) { |
86 | ctx->ops->complete(req); |
87 | mv_cesa_engine_enqueue_complete_request(engine, req); |
88 | } else if (res == -EINPROGRESS) { |
89 | ctx->ops->step(req); |
90 | } |
91 | |
92 | return res; |
93 | } |
94 | |
95 | static int mv_cesa_int_process(struct mv_cesa_engine *engine, u32 status) |
96 | { |
97 | if (engine->chain.first && engine->chain.last) |
98 | return mv_cesa_tdma_process(engine, status); |
99 | |
100 | return mv_cesa_std_process(engine, status); |
101 | } |
102 | |
103 | static inline void |
104 | mv_cesa_complete_req(struct mv_cesa_ctx *ctx, struct crypto_async_request *req, |
105 | int res) |
106 | { |
107 | ctx->ops->cleanup(req); |
108 | local_bh_disable(); |
109 | crypto_request_complete(req, err: res); |
110 | local_bh_enable(); |
111 | } |
112 | |
113 | static irqreturn_t mv_cesa_int(int irq, void *priv) |
114 | { |
115 | struct mv_cesa_engine *engine = priv; |
116 | struct crypto_async_request *req; |
117 | struct mv_cesa_ctx *ctx; |
118 | u32 status, mask; |
119 | irqreturn_t ret = IRQ_NONE; |
120 | |
121 | while (true) { |
122 | int res; |
123 | |
124 | mask = mv_cesa_get_int_mask(engine); |
125 | status = readl(addr: engine->regs + CESA_SA_INT_STATUS); |
126 | |
127 | if (!(status & mask)) |
128 | break; |
129 | |
130 | /* |
131 | * TODO: avoid clearing the FPGA_INT_STATUS if this not |
132 | * relevant on some platforms. |
133 | */ |
134 | writel(val: ~status, addr: engine->regs + CESA_SA_FPGA_INT_STATUS); |
135 | writel(val: ~status, addr: engine->regs + CESA_SA_INT_STATUS); |
136 | |
137 | /* Process fetched requests */ |
138 | res = mv_cesa_int_process(engine, status: status & mask); |
139 | ret = IRQ_HANDLED; |
140 | |
141 | spin_lock_bh(lock: &engine->lock); |
142 | req = engine->req; |
143 | if (res != -EINPROGRESS) |
144 | engine->req = NULL; |
145 | spin_unlock_bh(lock: &engine->lock); |
146 | |
147 | ctx = crypto_tfm_ctx(tfm: req->tfm); |
148 | |
149 | if (res && res != -EINPROGRESS) |
150 | mv_cesa_complete_req(ctx, req, res); |
151 | |
152 | /* Launch the next pending request */ |
153 | mv_cesa_rearm_engine(engine); |
154 | |
155 | /* Iterate over the complete queue */ |
156 | while (true) { |
157 | req = mv_cesa_engine_dequeue_complete_request(engine); |
158 | if (!req) |
159 | break; |
160 | |
161 | ctx = crypto_tfm_ctx(tfm: req->tfm); |
162 | mv_cesa_complete_req(ctx, req, res: 0); |
163 | } |
164 | } |
165 | |
166 | return ret; |
167 | } |
168 | |
169 | int mv_cesa_queue_req(struct crypto_async_request *req, |
170 | struct mv_cesa_req *creq) |
171 | { |
172 | int ret; |
173 | struct mv_cesa_engine *engine = creq->engine; |
174 | |
175 | spin_lock_bh(lock: &engine->lock); |
176 | ret = crypto_enqueue_request(queue: &engine->queue, request: req); |
177 | if ((mv_cesa_req_get_type(req: creq) == CESA_DMA_REQ) && |
178 | (ret == -EINPROGRESS || ret == -EBUSY)) |
179 | mv_cesa_tdma_chain(engine, dreq: creq); |
180 | spin_unlock_bh(lock: &engine->lock); |
181 | |
182 | if (ret != -EINPROGRESS) |
183 | return ret; |
184 | |
185 | mv_cesa_rearm_engine(engine); |
186 | |
187 | return -EINPROGRESS; |
188 | } |
189 | |
190 | static int mv_cesa_add_algs(struct mv_cesa_dev *cesa) |
191 | { |
192 | int ret; |
193 | int i, j; |
194 | |
195 | for (i = 0; i < cesa->caps->ncipher_algs; i++) { |
196 | ret = crypto_register_skcipher(alg: cesa->caps->cipher_algs[i]); |
197 | if (ret) |
198 | goto err_unregister_crypto; |
199 | } |
200 | |
201 | for (i = 0; i < cesa->caps->nahash_algs; i++) { |
202 | ret = crypto_register_ahash(alg: cesa->caps->ahash_algs[i]); |
203 | if (ret) |
204 | goto err_unregister_ahash; |
205 | } |
206 | |
207 | return 0; |
208 | |
209 | err_unregister_ahash: |
210 | for (j = 0; j < i; j++) |
211 | crypto_unregister_ahash(alg: cesa->caps->ahash_algs[j]); |
212 | i = cesa->caps->ncipher_algs; |
213 | |
214 | err_unregister_crypto: |
215 | for (j = 0; j < i; j++) |
216 | crypto_unregister_skcipher(alg: cesa->caps->cipher_algs[j]); |
217 | |
218 | return ret; |
219 | } |
220 | |
221 | static void mv_cesa_remove_algs(struct mv_cesa_dev *cesa) |
222 | { |
223 | int i; |
224 | |
225 | for (i = 0; i < cesa->caps->nahash_algs; i++) |
226 | crypto_unregister_ahash(alg: cesa->caps->ahash_algs[i]); |
227 | |
228 | for (i = 0; i < cesa->caps->ncipher_algs; i++) |
229 | crypto_unregister_skcipher(alg: cesa->caps->cipher_algs[i]); |
230 | } |
231 | |
232 | static struct skcipher_alg *orion_cipher_algs[] = { |
233 | &mv_cesa_ecb_des_alg, |
234 | &mv_cesa_cbc_des_alg, |
235 | &mv_cesa_ecb_des3_ede_alg, |
236 | &mv_cesa_cbc_des3_ede_alg, |
237 | &mv_cesa_ecb_aes_alg, |
238 | &mv_cesa_cbc_aes_alg, |
239 | }; |
240 | |
241 | static struct ahash_alg *orion_ahash_algs[] = { |
242 | &mv_md5_alg, |
243 | &mv_sha1_alg, |
244 | &mv_ahmac_md5_alg, |
245 | &mv_ahmac_sha1_alg, |
246 | }; |
247 | |
248 | static struct skcipher_alg *armada_370_cipher_algs[] = { |
249 | &mv_cesa_ecb_des_alg, |
250 | &mv_cesa_cbc_des_alg, |
251 | &mv_cesa_ecb_des3_ede_alg, |
252 | &mv_cesa_cbc_des3_ede_alg, |
253 | &mv_cesa_ecb_aes_alg, |
254 | &mv_cesa_cbc_aes_alg, |
255 | }; |
256 | |
257 | static struct ahash_alg *armada_370_ahash_algs[] = { |
258 | &mv_md5_alg, |
259 | &mv_sha1_alg, |
260 | &mv_sha256_alg, |
261 | &mv_ahmac_md5_alg, |
262 | &mv_ahmac_sha1_alg, |
263 | &mv_ahmac_sha256_alg, |
264 | }; |
265 | |
266 | static const struct mv_cesa_caps orion_caps = { |
267 | .nengines = 1, |
268 | .cipher_algs = orion_cipher_algs, |
269 | .ncipher_algs = ARRAY_SIZE(orion_cipher_algs), |
270 | .ahash_algs = orion_ahash_algs, |
271 | .nahash_algs = ARRAY_SIZE(orion_ahash_algs), |
272 | .has_tdma = false, |
273 | }; |
274 | |
275 | static const struct mv_cesa_caps kirkwood_caps = { |
276 | .nengines = 1, |
277 | .cipher_algs = orion_cipher_algs, |
278 | .ncipher_algs = ARRAY_SIZE(orion_cipher_algs), |
279 | .ahash_algs = orion_ahash_algs, |
280 | .nahash_algs = ARRAY_SIZE(orion_ahash_algs), |
281 | .has_tdma = true, |
282 | }; |
283 | |
284 | static const struct mv_cesa_caps armada_370_caps = { |
285 | .nengines = 1, |
286 | .cipher_algs = armada_370_cipher_algs, |
287 | .ncipher_algs = ARRAY_SIZE(armada_370_cipher_algs), |
288 | .ahash_algs = armada_370_ahash_algs, |
289 | .nahash_algs = ARRAY_SIZE(armada_370_ahash_algs), |
290 | .has_tdma = true, |
291 | }; |
292 | |
293 | static const struct mv_cesa_caps armada_xp_caps = { |
294 | .nengines = 2, |
295 | .cipher_algs = armada_370_cipher_algs, |
296 | .ncipher_algs = ARRAY_SIZE(armada_370_cipher_algs), |
297 | .ahash_algs = armada_370_ahash_algs, |
298 | .nahash_algs = ARRAY_SIZE(armada_370_ahash_algs), |
299 | .has_tdma = true, |
300 | }; |
301 | |
302 | static const struct of_device_id mv_cesa_of_match_table[] = { |
303 | { .compatible = "marvell,orion-crypto" , .data = &orion_caps }, |
304 | { .compatible = "marvell,kirkwood-crypto" , .data = &kirkwood_caps }, |
305 | { .compatible = "marvell,dove-crypto" , .data = &kirkwood_caps }, |
306 | { .compatible = "marvell,armada-370-crypto" , .data = &armada_370_caps }, |
307 | { .compatible = "marvell,armada-xp-crypto" , .data = &armada_xp_caps }, |
308 | { .compatible = "marvell,armada-375-crypto" , .data = &armada_xp_caps }, |
309 | { .compatible = "marvell,armada-38x-crypto" , .data = &armada_xp_caps }, |
310 | {} |
311 | }; |
312 | MODULE_DEVICE_TABLE(of, mv_cesa_of_match_table); |
313 | |
314 | static void |
315 | mv_cesa_conf_mbus_windows(struct mv_cesa_engine *engine, |
316 | const struct mbus_dram_target_info *dram) |
317 | { |
318 | void __iomem *iobase = engine->regs; |
319 | int i; |
320 | |
321 | for (i = 0; i < 4; i++) { |
322 | writel(val: 0, addr: iobase + CESA_TDMA_WINDOW_CTRL(i)); |
323 | writel(val: 0, addr: iobase + CESA_TDMA_WINDOW_BASE(i)); |
324 | } |
325 | |
326 | for (i = 0; i < dram->num_cs; i++) { |
327 | const struct mbus_dram_window *cs = dram->cs + i; |
328 | |
329 | writel(val: ((cs->size - 1) & 0xffff0000) | |
330 | (cs->mbus_attr << 8) | |
331 | (dram->mbus_dram_target_id << 4) | 1, |
332 | addr: iobase + CESA_TDMA_WINDOW_CTRL(i)); |
333 | writel(val: cs->base, addr: iobase + CESA_TDMA_WINDOW_BASE(i)); |
334 | } |
335 | } |
336 | |
337 | static int mv_cesa_dev_dma_init(struct mv_cesa_dev *cesa) |
338 | { |
339 | struct device *dev = cesa->dev; |
340 | struct mv_cesa_dev_dma *dma; |
341 | |
342 | if (!cesa->caps->has_tdma) |
343 | return 0; |
344 | |
345 | dma = devm_kzalloc(dev, size: sizeof(*dma), GFP_KERNEL); |
346 | if (!dma) |
347 | return -ENOMEM; |
348 | |
349 | dma->tdma_desc_pool = dmam_pool_create(name: "tdma_desc" , dev, |
350 | size: sizeof(struct mv_cesa_tdma_desc), |
351 | align: 16, allocation: 0); |
352 | if (!dma->tdma_desc_pool) |
353 | return -ENOMEM; |
354 | |
355 | dma->op_pool = dmam_pool_create(name: "cesa_op" , dev, |
356 | size: sizeof(struct mv_cesa_op_ctx), align: 16, allocation: 0); |
357 | if (!dma->op_pool) |
358 | return -ENOMEM; |
359 | |
360 | dma->cache_pool = dmam_pool_create(name: "cesa_cache" , dev, |
361 | CESA_MAX_HASH_BLOCK_SIZE, align: 1, allocation: 0); |
362 | if (!dma->cache_pool) |
363 | return -ENOMEM; |
364 | |
365 | dma->padding_pool = dmam_pool_create(name: "cesa_padding" , dev, size: 72, align: 1, allocation: 0); |
366 | if (!dma->padding_pool) |
367 | return -ENOMEM; |
368 | |
369 | cesa->dma = dma; |
370 | |
371 | return 0; |
372 | } |
373 | |
374 | static int mv_cesa_get_sram(struct platform_device *pdev, int idx) |
375 | { |
376 | struct mv_cesa_dev *cesa = platform_get_drvdata(pdev); |
377 | struct mv_cesa_engine *engine = &cesa->engines[idx]; |
378 | const char *res_name = "sram" ; |
379 | struct resource *res; |
380 | |
381 | engine->pool = of_gen_pool_get(np: cesa->dev->of_node, |
382 | propname: "marvell,crypto-srams" , index: idx); |
383 | if (engine->pool) { |
384 | engine->sram_pool = gen_pool_dma_alloc(pool: engine->pool, |
385 | size: cesa->sram_size, |
386 | dma: &engine->sram_dma); |
387 | if (engine->sram_pool) |
388 | return 0; |
389 | |
390 | engine->pool = NULL; |
391 | return -ENOMEM; |
392 | } |
393 | |
394 | if (cesa->caps->nengines > 1) { |
395 | if (!idx) |
396 | res_name = "sram0" ; |
397 | else |
398 | res_name = "sram1" ; |
399 | } |
400 | |
401 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, |
402 | res_name); |
403 | if (!res || resource_size(res) < cesa->sram_size) |
404 | return -EINVAL; |
405 | |
406 | engine->sram = devm_ioremap_resource(dev: cesa->dev, res); |
407 | if (IS_ERR(ptr: engine->sram)) |
408 | return PTR_ERR(ptr: engine->sram); |
409 | |
410 | engine->sram_dma = dma_map_resource(dev: cesa->dev, phys_addr: res->start, |
411 | size: cesa->sram_size, |
412 | dir: DMA_BIDIRECTIONAL, attrs: 0); |
413 | if (dma_mapping_error(dev: cesa->dev, dma_addr: engine->sram_dma)) |
414 | return -ENOMEM; |
415 | |
416 | return 0; |
417 | } |
418 | |
419 | static void mv_cesa_put_sram(struct platform_device *pdev, int idx) |
420 | { |
421 | struct mv_cesa_dev *cesa = platform_get_drvdata(pdev); |
422 | struct mv_cesa_engine *engine = &cesa->engines[idx]; |
423 | |
424 | if (engine->pool) |
425 | gen_pool_free(pool: engine->pool, addr: (unsigned long)engine->sram_pool, |
426 | size: cesa->sram_size); |
427 | else |
428 | dma_unmap_resource(dev: cesa->dev, addr: engine->sram_dma, |
429 | size: cesa->sram_size, dir: DMA_BIDIRECTIONAL, attrs: 0); |
430 | } |
431 | |
432 | static int mv_cesa_probe(struct platform_device *pdev) |
433 | { |
434 | const struct mv_cesa_caps *caps = &orion_caps; |
435 | const struct mbus_dram_target_info *dram; |
436 | const struct of_device_id *match; |
437 | struct device *dev = &pdev->dev; |
438 | struct mv_cesa_dev *cesa; |
439 | struct mv_cesa_engine *engines; |
440 | int irq, ret, i, cpu; |
441 | u32 sram_size; |
442 | |
443 | if (cesa_dev) { |
444 | dev_err(&pdev->dev, "Only one CESA device authorized\n" ); |
445 | return -EEXIST; |
446 | } |
447 | |
448 | if (dev->of_node) { |
449 | match = of_match_node(matches: mv_cesa_of_match_table, node: dev->of_node); |
450 | if (!match || !match->data) |
451 | return -ENOTSUPP; |
452 | |
453 | caps = match->data; |
454 | } |
455 | |
456 | cesa = devm_kzalloc(dev, size: sizeof(*cesa), GFP_KERNEL); |
457 | if (!cesa) |
458 | return -ENOMEM; |
459 | |
460 | cesa->caps = caps; |
461 | cesa->dev = dev; |
462 | |
463 | sram_size = CESA_SA_DEFAULT_SRAM_SIZE; |
464 | of_property_read_u32(np: cesa->dev->of_node, propname: "marvell,crypto-sram-size" , |
465 | out_value: &sram_size); |
466 | if (sram_size < CESA_SA_MIN_SRAM_SIZE) |
467 | sram_size = CESA_SA_MIN_SRAM_SIZE; |
468 | |
469 | cesa->sram_size = sram_size; |
470 | cesa->engines = devm_kcalloc(dev, n: caps->nengines, size: sizeof(*engines), |
471 | GFP_KERNEL); |
472 | if (!cesa->engines) |
473 | return -ENOMEM; |
474 | |
475 | spin_lock_init(&cesa->lock); |
476 | |
477 | cesa->regs = devm_platform_ioremap_resource_byname(pdev, name: "regs" ); |
478 | if (IS_ERR(ptr: cesa->regs)) |
479 | return PTR_ERR(ptr: cesa->regs); |
480 | |
481 | ret = mv_cesa_dev_dma_init(cesa); |
482 | if (ret) |
483 | return ret; |
484 | |
485 | dram = mv_mbus_dram_info_nooverlap(); |
486 | |
487 | platform_set_drvdata(pdev, data: cesa); |
488 | |
489 | for (i = 0; i < caps->nengines; i++) { |
490 | struct mv_cesa_engine *engine = &cesa->engines[i]; |
491 | char res_name[16]; |
492 | |
493 | engine->id = i; |
494 | spin_lock_init(&engine->lock); |
495 | |
496 | ret = mv_cesa_get_sram(pdev, idx: i); |
497 | if (ret) |
498 | goto err_cleanup; |
499 | |
500 | irq = platform_get_irq(pdev, i); |
501 | if (irq < 0) { |
502 | ret = irq; |
503 | goto err_cleanup; |
504 | } |
505 | |
506 | engine->irq = irq; |
507 | |
508 | /* |
509 | * Not all platforms can gate the CESA clocks: do not complain |
510 | * if the clock does not exist. |
511 | */ |
512 | snprintf(buf: res_name, size: sizeof(res_name), fmt: "cesa%u" , i); |
513 | engine->clk = devm_clk_get(dev, id: res_name); |
514 | if (IS_ERR(ptr: engine->clk)) { |
515 | engine->clk = devm_clk_get(dev, NULL); |
516 | if (IS_ERR(ptr: engine->clk)) |
517 | engine->clk = NULL; |
518 | } |
519 | |
520 | snprintf(buf: res_name, size: sizeof(res_name), fmt: "cesaz%u" , i); |
521 | engine->zclk = devm_clk_get(dev, id: res_name); |
522 | if (IS_ERR(ptr: engine->zclk)) |
523 | engine->zclk = NULL; |
524 | |
525 | ret = clk_prepare_enable(clk: engine->clk); |
526 | if (ret) |
527 | goto err_cleanup; |
528 | |
529 | ret = clk_prepare_enable(clk: engine->zclk); |
530 | if (ret) |
531 | goto err_cleanup; |
532 | |
533 | engine->regs = cesa->regs + CESA_ENGINE_OFF(i); |
534 | |
535 | if (dram && cesa->caps->has_tdma) |
536 | mv_cesa_conf_mbus_windows(engine, dram); |
537 | |
538 | writel(val: 0, addr: engine->regs + CESA_SA_INT_STATUS); |
539 | writel(CESA_SA_CFG_STOP_DIG_ERR, |
540 | addr: engine->regs + CESA_SA_CFG); |
541 | writel(val: engine->sram_dma & CESA_SA_SRAM_MSK, |
542 | addr: engine->regs + CESA_SA_DESC_P0); |
543 | |
544 | ret = devm_request_threaded_irq(dev, irq, NULL, thread_fn: mv_cesa_int, |
545 | IRQF_ONESHOT, |
546 | devname: dev_name(dev: &pdev->dev), |
547 | dev_id: engine); |
548 | if (ret) |
549 | goto err_cleanup; |
550 | |
551 | /* Set affinity */ |
552 | cpu = cpumask_local_spread(i: engine->id, NUMA_NO_NODE); |
553 | irq_set_affinity_hint(irq, m: get_cpu_mask(cpu)); |
554 | |
555 | crypto_init_queue(queue: &engine->queue, CESA_CRYPTO_DEFAULT_MAX_QLEN); |
556 | atomic_set(v: &engine->load, i: 0); |
557 | INIT_LIST_HEAD(list: &engine->complete_queue); |
558 | } |
559 | |
560 | cesa_dev = cesa; |
561 | |
562 | ret = mv_cesa_add_algs(cesa); |
563 | if (ret) { |
564 | cesa_dev = NULL; |
565 | goto err_cleanup; |
566 | } |
567 | |
568 | dev_info(dev, "CESA device successfully registered\n" ); |
569 | |
570 | return 0; |
571 | |
572 | err_cleanup: |
573 | for (i = 0; i < caps->nengines; i++) { |
574 | clk_disable_unprepare(clk: cesa->engines[i].zclk); |
575 | clk_disable_unprepare(clk: cesa->engines[i].clk); |
576 | mv_cesa_put_sram(pdev, idx: i); |
577 | if (cesa->engines[i].irq > 0) |
578 | irq_set_affinity_hint(irq: cesa->engines[i].irq, NULL); |
579 | } |
580 | |
581 | return ret; |
582 | } |
583 | |
584 | static void mv_cesa_remove(struct platform_device *pdev) |
585 | { |
586 | struct mv_cesa_dev *cesa = platform_get_drvdata(pdev); |
587 | int i; |
588 | |
589 | mv_cesa_remove_algs(cesa); |
590 | |
591 | for (i = 0; i < cesa->caps->nengines; i++) { |
592 | clk_disable_unprepare(clk: cesa->engines[i].zclk); |
593 | clk_disable_unprepare(clk: cesa->engines[i].clk); |
594 | mv_cesa_put_sram(pdev, idx: i); |
595 | irq_set_affinity_hint(irq: cesa->engines[i].irq, NULL); |
596 | } |
597 | } |
598 | |
599 | static const struct platform_device_id mv_cesa_plat_id_table[] = { |
600 | { .name = "mv_crypto" }, |
601 | { /* sentinel */ }, |
602 | }; |
603 | MODULE_DEVICE_TABLE(platform, mv_cesa_plat_id_table); |
604 | |
605 | static struct platform_driver marvell_cesa = { |
606 | .probe = mv_cesa_probe, |
607 | .remove_new = mv_cesa_remove, |
608 | .id_table = mv_cesa_plat_id_table, |
609 | .driver = { |
610 | .name = "marvell-cesa" , |
611 | .of_match_table = mv_cesa_of_match_table, |
612 | }, |
613 | }; |
614 | module_platform_driver(marvell_cesa); |
615 | |
616 | MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>" ); |
617 | MODULE_AUTHOR("Arnaud Ebalard <arno@natisbad.org>" ); |
618 | MODULE_DESCRIPTION("Support for Marvell's cryptographic engine" ); |
619 | MODULE_LICENSE("GPL v2" ); |
620 | |