1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * SiFive FU540 Platform DMA driver
4 * Copyright (C) 2019 SiFive
5 *
6 * Based partially on:
7 * - drivers/dma/fsl-edma.c
8 * - drivers/dma/dw-edma/
9 * - drivers/dma/pxa-dma.c
10 *
11 * See the following sources for further documentation:
12 * - Chapter 12 "Platform DMA Engine (PDMA)" of
13 * SiFive FU540-C000 v1.0
14 * https://static.dev.sifive.com/FU540-C000-v1.0.pdf
15 */
16#include <linux/module.h>
17#include <linux/device.h>
18#include <linux/kernel.h>
19#include <linux/platform_device.h>
20#include <linux/mod_devicetable.h>
21#include <linux/dma-mapping.h>
22#include <linux/of.h>
23#include <linux/of_dma.h>
24#include <linux/slab.h>
25
26#include "sf-pdma.h"
27
28#define PDMA_QUIRK_NO_STRICT_ORDERING BIT(0)
29
30#ifndef readq
31static inline unsigned long long readq(void __iomem *addr)
32{
33 return readl(addr) | (((unsigned long long)readl(addr + 4)) << 32LL);
34}
35#endif
36
37#ifndef writeq
38static inline void writeq(unsigned long long v, void __iomem *addr)
39{
40 writel(lower_32_bits(v), addr);
41 writel(upper_32_bits(v), addr + 4);
42}
43#endif
44
45static inline struct sf_pdma_chan *to_sf_pdma_chan(struct dma_chan *dchan)
46{
47 return container_of(dchan, struct sf_pdma_chan, vchan.chan);
48}
49
50static inline struct sf_pdma_desc *to_sf_pdma_desc(struct virt_dma_desc *vd)
51{
52 return container_of(vd, struct sf_pdma_desc, vdesc);
53}
54
55static struct sf_pdma_desc *sf_pdma_alloc_desc(struct sf_pdma_chan *chan)
56{
57 struct sf_pdma_desc *desc;
58
59 desc = kzalloc(size: sizeof(*desc), GFP_NOWAIT);
60 if (!desc)
61 return NULL;
62
63 desc->chan = chan;
64
65 return desc;
66}
67
68static void sf_pdma_fill_desc(struct sf_pdma_desc *desc,
69 u64 dst, u64 src, u64 size)
70{
71 desc->xfer_type = desc->chan->pdma->transfer_type;
72 desc->xfer_size = size;
73 desc->dst_addr = dst;
74 desc->src_addr = src;
75}
76
77static void sf_pdma_disclaim_chan(struct sf_pdma_chan *chan)
78{
79 struct pdma_regs *regs = &chan->regs;
80
81 writel(PDMA_CLEAR_CTRL, addr: regs->ctrl);
82}
83
84static struct dma_async_tx_descriptor *
85sf_pdma_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dest, dma_addr_t src,
86 size_t len, unsigned long flags)
87{
88 struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan);
89 struct sf_pdma_desc *desc;
90 unsigned long iflags;
91
92 if (chan && (!len || !dest || !src)) {
93 dev_err(chan->pdma->dma_dev.dev,
94 "Please check dma len, dest, src!\n");
95 return NULL;
96 }
97
98 desc = sf_pdma_alloc_desc(chan);
99 if (!desc)
100 return NULL;
101
102 desc->dirn = DMA_MEM_TO_MEM;
103 desc->async_tx = vchan_tx_prep(vc: &chan->vchan, vd: &desc->vdesc, tx_flags: flags);
104
105 spin_lock_irqsave(&chan->vchan.lock, iflags);
106 sf_pdma_fill_desc(desc, dst: dest, src, size: len);
107 spin_unlock_irqrestore(lock: &chan->vchan.lock, flags: iflags);
108
109 return desc->async_tx;
110}
111
112static int sf_pdma_slave_config(struct dma_chan *dchan,
113 struct dma_slave_config *cfg)
114{
115 struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan);
116
117 memcpy(&chan->cfg, cfg, sizeof(*cfg));
118
119 return 0;
120}
121
122static int sf_pdma_alloc_chan_resources(struct dma_chan *dchan)
123{
124 struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan);
125 struct pdma_regs *regs = &chan->regs;
126
127 dma_cookie_init(chan: dchan);
128 writel(PDMA_CLAIM_MASK, addr: regs->ctrl);
129
130 return 0;
131}
132
133static void sf_pdma_disable_request(struct sf_pdma_chan *chan)
134{
135 struct pdma_regs *regs = &chan->regs;
136
137 writel(readl(addr: regs->ctrl) & ~PDMA_RUN_MASK, addr: regs->ctrl);
138}
139
140static void sf_pdma_free_chan_resources(struct dma_chan *dchan)
141{
142 struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan);
143 unsigned long flags;
144 LIST_HEAD(head);
145
146 spin_lock_irqsave(&chan->vchan.lock, flags);
147 sf_pdma_disable_request(chan);
148 kfree(objp: chan->desc);
149 chan->desc = NULL;
150 vchan_get_all_descriptors(vc: &chan->vchan, head: &head);
151 sf_pdma_disclaim_chan(chan);
152 spin_unlock_irqrestore(lock: &chan->vchan.lock, flags);
153 vchan_dma_desc_free_list(vc: &chan->vchan, head: &head);
154}
155
156static size_t sf_pdma_desc_residue(struct sf_pdma_chan *chan,
157 dma_cookie_t cookie)
158{
159 struct virt_dma_desc *vd = NULL;
160 struct pdma_regs *regs = &chan->regs;
161 unsigned long flags;
162 u64 residue = 0;
163 struct sf_pdma_desc *desc;
164 struct dma_async_tx_descriptor *tx = NULL;
165
166 spin_lock_irqsave(&chan->vchan.lock, flags);
167
168 list_for_each_entry(vd, &chan->vchan.desc_submitted, node)
169 if (vd->tx.cookie == cookie)
170 tx = &vd->tx;
171
172 if (!tx)
173 goto out;
174
175 if (cookie == tx->chan->completed_cookie)
176 goto out;
177
178 if (cookie == tx->cookie) {
179 residue = readq(addr: regs->residue);
180 } else {
181 vd = vchan_find_desc(&chan->vchan, cookie);
182 if (!vd)
183 goto out;
184
185 desc = to_sf_pdma_desc(vd);
186 residue = desc->xfer_size;
187 }
188
189out:
190 spin_unlock_irqrestore(lock: &chan->vchan.lock, flags);
191 return residue;
192}
193
194static enum dma_status
195sf_pdma_tx_status(struct dma_chan *dchan,
196 dma_cookie_t cookie,
197 struct dma_tx_state *txstate)
198{
199 struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan);
200 enum dma_status status;
201
202 status = dma_cookie_status(chan: dchan, cookie, state: txstate);
203
204 if (txstate && status != DMA_ERROR)
205 dma_set_residue(state: txstate, residue: sf_pdma_desc_residue(chan, cookie));
206
207 return status;
208}
209
210static int sf_pdma_terminate_all(struct dma_chan *dchan)
211{
212 struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan);
213 unsigned long flags;
214 LIST_HEAD(head);
215
216 spin_lock_irqsave(&chan->vchan.lock, flags);
217 sf_pdma_disable_request(chan);
218 kfree(objp: chan->desc);
219 chan->desc = NULL;
220 chan->xfer_err = false;
221 vchan_get_all_descriptors(vc: &chan->vchan, head: &head);
222 spin_unlock_irqrestore(lock: &chan->vchan.lock, flags);
223 vchan_dma_desc_free_list(vc: &chan->vchan, head: &head);
224
225 return 0;
226}
227
228static void sf_pdma_enable_request(struct sf_pdma_chan *chan)
229{
230 struct pdma_regs *regs = &chan->regs;
231 u32 v;
232
233 v = PDMA_CLAIM_MASK |
234 PDMA_ENABLE_DONE_INT_MASK |
235 PDMA_ENABLE_ERR_INT_MASK |
236 PDMA_RUN_MASK;
237
238 writel(val: v, addr: regs->ctrl);
239}
240
241static struct sf_pdma_desc *sf_pdma_get_first_pending_desc(struct sf_pdma_chan *chan)
242{
243 struct virt_dma_chan *vchan = &chan->vchan;
244 struct virt_dma_desc *vdesc;
245
246 if (list_empty(head: &vchan->desc_issued))
247 return NULL;
248
249 vdesc = list_first_entry(&vchan->desc_issued, struct virt_dma_desc, node);
250
251 return container_of(vdesc, struct sf_pdma_desc, vdesc);
252}
253
254static void sf_pdma_xfer_desc(struct sf_pdma_chan *chan)
255{
256 struct sf_pdma_desc *desc = chan->desc;
257 struct pdma_regs *regs = &chan->regs;
258
259 if (!desc) {
260 dev_err(chan->pdma->dma_dev.dev, "NULL desc.\n");
261 return;
262 }
263
264 writel(val: desc->xfer_type, addr: regs->xfer_type);
265 writeq(val: desc->xfer_size, addr: regs->xfer_size);
266 writeq(val: desc->dst_addr, addr: regs->dst_addr);
267 writeq(val: desc->src_addr, addr: regs->src_addr);
268
269 chan->desc = desc;
270 chan->status = DMA_IN_PROGRESS;
271 sf_pdma_enable_request(chan);
272}
273
274static void sf_pdma_issue_pending(struct dma_chan *dchan)
275{
276 struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan);
277 unsigned long flags;
278
279 spin_lock_irqsave(&chan->vchan.lock, flags);
280
281 if (!chan->desc && vchan_issue_pending(vc: &chan->vchan)) {
282 /* vchan_issue_pending has made a check that desc in not NULL */
283 chan->desc = sf_pdma_get_first_pending_desc(chan);
284 sf_pdma_xfer_desc(chan);
285 }
286
287 spin_unlock_irqrestore(lock: &chan->vchan.lock, flags);
288}
289
290static void sf_pdma_free_desc(struct virt_dma_desc *vdesc)
291{
292 struct sf_pdma_desc *desc;
293
294 desc = to_sf_pdma_desc(vd: vdesc);
295 kfree(objp: desc);
296}
297
298static void sf_pdma_donebh_tasklet(struct tasklet_struct *t)
299{
300 struct sf_pdma_chan *chan = from_tasklet(chan, t, done_tasklet);
301 unsigned long flags;
302
303 spin_lock_irqsave(&chan->lock, flags);
304 if (chan->xfer_err) {
305 chan->retries = MAX_RETRY;
306 chan->status = DMA_COMPLETE;
307 chan->xfer_err = false;
308 }
309 spin_unlock_irqrestore(lock: &chan->lock, flags);
310
311 spin_lock_irqsave(&chan->vchan.lock, flags);
312 list_del(entry: &chan->desc->vdesc.node);
313 vchan_cookie_complete(vd: &chan->desc->vdesc);
314
315 chan->desc = sf_pdma_get_first_pending_desc(chan);
316 if (chan->desc)
317 sf_pdma_xfer_desc(chan);
318
319 spin_unlock_irqrestore(lock: &chan->vchan.lock, flags);
320}
321
322static void sf_pdma_errbh_tasklet(struct tasklet_struct *t)
323{
324 struct sf_pdma_chan *chan = from_tasklet(chan, t, err_tasklet);
325 struct sf_pdma_desc *desc = chan->desc;
326 unsigned long flags;
327
328 spin_lock_irqsave(&chan->lock, flags);
329 if (chan->retries <= 0) {
330 /* fail to recover */
331 spin_unlock_irqrestore(lock: &chan->lock, flags);
332 dmaengine_desc_get_callback_invoke(tx: desc->async_tx, NULL);
333 } else {
334 /* retry */
335 chan->retries--;
336 chan->xfer_err = true;
337 chan->status = DMA_ERROR;
338
339 sf_pdma_enable_request(chan);
340 spin_unlock_irqrestore(lock: &chan->lock, flags);
341 }
342}
343
344static irqreturn_t sf_pdma_done_isr(int irq, void *dev_id)
345{
346 struct sf_pdma_chan *chan = dev_id;
347 struct pdma_regs *regs = &chan->regs;
348 u64 residue;
349
350 spin_lock(lock: &chan->vchan.lock);
351 writel(val: (readl(addr: regs->ctrl)) & ~PDMA_DONE_STATUS_MASK, addr: regs->ctrl);
352 residue = readq(addr: regs->residue);
353
354 if (!residue) {
355 tasklet_hi_schedule(t: &chan->done_tasklet);
356 } else {
357 /* submit next trascatioin if possible */
358 struct sf_pdma_desc *desc = chan->desc;
359
360 desc->src_addr += desc->xfer_size - residue;
361 desc->dst_addr += desc->xfer_size - residue;
362 desc->xfer_size = residue;
363
364 sf_pdma_xfer_desc(chan);
365 }
366
367 spin_unlock(lock: &chan->vchan.lock);
368
369 return IRQ_HANDLED;
370}
371
372static irqreturn_t sf_pdma_err_isr(int irq, void *dev_id)
373{
374 struct sf_pdma_chan *chan = dev_id;
375 struct pdma_regs *regs = &chan->regs;
376
377 spin_lock(lock: &chan->lock);
378 writel(val: (readl(addr: regs->ctrl)) & ~PDMA_ERR_STATUS_MASK, addr: regs->ctrl);
379 spin_unlock(lock: &chan->lock);
380
381 tasklet_schedule(t: &chan->err_tasklet);
382
383 return IRQ_HANDLED;
384}
385
386/**
387 * sf_pdma_irq_init() - Init PDMA IRQ Handlers
388 * @pdev: pointer of platform_device
389 * @pdma: pointer of PDMA engine. Caller should check NULL
390 *
391 * Initialize DONE and ERROR interrupt handler for 4 channels. Caller should
392 * make sure the pointer passed in are non-NULL. This function should be called
393 * only one time during the device probe.
394 *
395 * Context: Any context.
396 *
397 * Return:
398 * * 0 - OK to init all IRQ handlers
399 * * -EINVAL - Fail to request IRQ
400 */
401static int sf_pdma_irq_init(struct platform_device *pdev, struct sf_pdma *pdma)
402{
403 int irq, r, i;
404 struct sf_pdma_chan *chan;
405
406 for (i = 0; i < pdma->n_chans; i++) {
407 chan = &pdma->chans[i];
408
409 irq = platform_get_irq(pdev, i * 2);
410 if (irq < 0)
411 return -EINVAL;
412
413 r = devm_request_irq(dev: &pdev->dev, irq, handler: sf_pdma_done_isr, irqflags: 0,
414 devname: dev_name(dev: &pdev->dev), dev_id: (void *)chan);
415 if (r) {
416 dev_err(&pdev->dev, "Fail to attach done ISR: %d\n", r);
417 return -EINVAL;
418 }
419
420 chan->txirq = irq;
421
422 irq = platform_get_irq(pdev, (i * 2) + 1);
423 if (irq < 0)
424 return -EINVAL;
425
426 r = devm_request_irq(dev: &pdev->dev, irq, handler: sf_pdma_err_isr, irqflags: 0,
427 devname: dev_name(dev: &pdev->dev), dev_id: (void *)chan);
428 if (r) {
429 dev_err(&pdev->dev, "Fail to attach err ISR: %d\n", r);
430 return -EINVAL;
431 }
432
433 chan->errirq = irq;
434 }
435
436 return 0;
437}
438
439/**
440 * sf_pdma_setup_chans() - Init settings of each channel
441 * @pdma: pointer of PDMA engine. Caller should check NULL
442 *
443 * Initialize all data structure and register base. Caller should make sure
444 * the pointer passed in are non-NULL. This function should be called only
445 * one time during the device probe.
446 *
447 * Context: Any context.
448 *
449 * Return: none
450 */
451static void sf_pdma_setup_chans(struct sf_pdma *pdma)
452{
453 int i;
454 struct sf_pdma_chan *chan;
455
456 INIT_LIST_HEAD(list: &pdma->dma_dev.channels);
457
458 for (i = 0; i < pdma->n_chans; i++) {
459 chan = &pdma->chans[i];
460
461 chan->regs.ctrl =
462 SF_PDMA_REG_BASE(i) + PDMA_CTRL;
463 chan->regs.xfer_type =
464 SF_PDMA_REG_BASE(i) + PDMA_XFER_TYPE;
465 chan->regs.xfer_size =
466 SF_PDMA_REG_BASE(i) + PDMA_XFER_SIZE;
467 chan->regs.dst_addr =
468 SF_PDMA_REG_BASE(i) + PDMA_DST_ADDR;
469 chan->regs.src_addr =
470 SF_PDMA_REG_BASE(i) + PDMA_SRC_ADDR;
471 chan->regs.act_type =
472 SF_PDMA_REG_BASE(i) + PDMA_ACT_TYPE;
473 chan->regs.residue =
474 SF_PDMA_REG_BASE(i) + PDMA_REMAINING_BYTE;
475 chan->regs.cur_dst_addr =
476 SF_PDMA_REG_BASE(i) + PDMA_CUR_DST_ADDR;
477 chan->regs.cur_src_addr =
478 SF_PDMA_REG_BASE(i) + PDMA_CUR_SRC_ADDR;
479
480 chan->pdma = pdma;
481 chan->pm_state = RUNNING;
482 chan->slave_id = i;
483 chan->xfer_err = false;
484 spin_lock_init(&chan->lock);
485
486 chan->vchan.desc_free = sf_pdma_free_desc;
487 vchan_init(vc: &chan->vchan, dmadev: &pdma->dma_dev);
488
489 writel(PDMA_CLEAR_CTRL, addr: chan->regs.ctrl);
490
491 tasklet_setup(t: &chan->done_tasklet, callback: sf_pdma_donebh_tasklet);
492 tasklet_setup(t: &chan->err_tasklet, callback: sf_pdma_errbh_tasklet);
493 }
494}
495
496static int sf_pdma_probe(struct platform_device *pdev)
497{
498 const struct sf_pdma_driver_platdata *ddata;
499 struct sf_pdma *pdma;
500 int ret, n_chans;
501 const enum dma_slave_buswidth widths =
502 DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES |
503 DMA_SLAVE_BUSWIDTH_4_BYTES | DMA_SLAVE_BUSWIDTH_8_BYTES |
504 DMA_SLAVE_BUSWIDTH_16_BYTES | DMA_SLAVE_BUSWIDTH_32_BYTES |
505 DMA_SLAVE_BUSWIDTH_64_BYTES;
506
507 ret = of_property_read_u32(np: pdev->dev.of_node, propname: "dma-channels", out_value: &n_chans);
508 if (ret) {
509 /* backwards-compatibility for no dma-channels property */
510 dev_dbg(&pdev->dev, "set number of channels to default value: 4\n");
511 n_chans = PDMA_MAX_NR_CH;
512 } else if (n_chans > PDMA_MAX_NR_CH) {
513 dev_err(&pdev->dev, "the number of channels exceeds the maximum\n");
514 return -EINVAL;
515 }
516
517 pdma = devm_kzalloc(dev: &pdev->dev, struct_size(pdma, chans, n_chans),
518 GFP_KERNEL);
519 if (!pdma)
520 return -ENOMEM;
521
522 pdma->n_chans = n_chans;
523
524 pdma->transfer_type = PDMA_FULL_SPEED | PDMA_STRICT_ORDERING;
525
526 ddata = device_get_match_data(dev: &pdev->dev);
527 if (ddata) {
528 if (ddata->quirks & PDMA_QUIRK_NO_STRICT_ORDERING)
529 pdma->transfer_type &= ~PDMA_STRICT_ORDERING;
530 }
531
532 pdma->membase = devm_platform_ioremap_resource(pdev, index: 0);
533 if (IS_ERR(ptr: pdma->membase))
534 return PTR_ERR(ptr: pdma->membase);
535
536 ret = sf_pdma_irq_init(pdev, pdma);
537 if (ret)
538 return ret;
539
540 sf_pdma_setup_chans(pdma);
541
542 pdma->dma_dev.dev = &pdev->dev;
543
544 /* Setup capability */
545 dma_cap_set(DMA_MEMCPY, pdma->dma_dev.cap_mask);
546 pdma->dma_dev.copy_align = 2;
547 pdma->dma_dev.src_addr_widths = widths;
548 pdma->dma_dev.dst_addr_widths = widths;
549 pdma->dma_dev.directions = BIT(DMA_MEM_TO_MEM);
550 pdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
551 pdma->dma_dev.descriptor_reuse = true;
552
553 /* Setup DMA APIs */
554 pdma->dma_dev.device_alloc_chan_resources =
555 sf_pdma_alloc_chan_resources;
556 pdma->dma_dev.device_free_chan_resources =
557 sf_pdma_free_chan_resources;
558 pdma->dma_dev.device_tx_status = sf_pdma_tx_status;
559 pdma->dma_dev.device_prep_dma_memcpy = sf_pdma_prep_dma_memcpy;
560 pdma->dma_dev.device_config = sf_pdma_slave_config;
561 pdma->dma_dev.device_terminate_all = sf_pdma_terminate_all;
562 pdma->dma_dev.device_issue_pending = sf_pdma_issue_pending;
563
564 platform_set_drvdata(pdev, data: pdma);
565
566 ret = dma_set_mask_and_coherent(dev: &pdev->dev, DMA_BIT_MASK(64));
567 if (ret)
568 dev_warn(&pdev->dev,
569 "Failed to set DMA mask. Fall back to default.\n");
570
571 ret = dma_async_device_register(device: &pdma->dma_dev);
572 if (ret) {
573 dev_err(&pdev->dev,
574 "Can't register SiFive Platform DMA. (%d)\n", ret);
575 return ret;
576 }
577
578 ret = of_dma_controller_register(np: pdev->dev.of_node,
579 of_dma_xlate: of_dma_xlate_by_chan_id, data: pdma);
580 if (ret < 0) {
581 dev_err(&pdev->dev,
582 "Can't register SiFive Platform OF_DMA. (%d)\n", ret);
583 goto err_unregister;
584 }
585
586 return 0;
587
588err_unregister:
589 dma_async_device_unregister(device: &pdma->dma_dev);
590
591 return ret;
592}
593
594static void sf_pdma_remove(struct platform_device *pdev)
595{
596 struct sf_pdma *pdma = platform_get_drvdata(pdev);
597 struct sf_pdma_chan *ch;
598 int i;
599
600 for (i = 0; i < pdma->n_chans; i++) {
601 ch = &pdma->chans[i];
602
603 devm_free_irq(dev: &pdev->dev, irq: ch->txirq, dev_id: ch);
604 devm_free_irq(dev: &pdev->dev, irq: ch->errirq, dev_id: ch);
605 list_del(entry: &ch->vchan.chan.device_node);
606 tasklet_kill(t: &ch->vchan.task);
607 tasklet_kill(t: &ch->done_tasklet);
608 tasklet_kill(t: &ch->err_tasklet);
609 }
610
611 if (pdev->dev.of_node)
612 of_dma_controller_free(np: pdev->dev.of_node);
613
614 dma_async_device_unregister(device: &pdma->dma_dev);
615}
616
617static const struct sf_pdma_driver_platdata mpfs_pdma = {
618 .quirks = PDMA_QUIRK_NO_STRICT_ORDERING,
619};
620
621static const struct of_device_id sf_pdma_dt_ids[] = {
622 {
623 .compatible = "sifive,fu540-c000-pdma",
624 }, {
625 .compatible = "sifive,pdma0",
626 }, {
627 .compatible = "microchip,mpfs-pdma",
628 .data = &mpfs_pdma,
629 },
630 {},
631};
632MODULE_DEVICE_TABLE(of, sf_pdma_dt_ids);
633
634static struct platform_driver sf_pdma_driver = {
635 .probe = sf_pdma_probe,
636 .remove_new = sf_pdma_remove,
637 .driver = {
638 .name = "sf-pdma",
639 .of_match_table = sf_pdma_dt_ids,
640 },
641};
642
643static int __init sf_pdma_init(void)
644{
645 return platform_driver_register(&sf_pdma_driver);
646}
647
648static void __exit sf_pdma_exit(void)
649{
650 platform_driver_unregister(&sf_pdma_driver);
651}
652
653/* do early init */
654subsys_initcall(sf_pdma_init);
655module_exit(sf_pdma_exit);
656
657MODULE_LICENSE("GPL v2");
658MODULE_DESCRIPTION("SiFive Platform DMA driver");
659MODULE_AUTHOR("Green Wan <green.wan@sifive.com>");
660

source code of linux/drivers/dma/sf-pdma/sf-pdma.c