1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008. |
4 | * Copyright (C) Semihalf 2009 |
5 | * Copyright (C) Ilya Yanok, Emcraft Systems 2010 |
6 | * Copyright (C) Alexander Popov, Promcontroller 2014 |
7 | * Copyright (C) Mario Six, Guntermann & Drunck GmbH, 2016 |
8 | * |
9 | * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description |
10 | * (defines, structures and comments) was taken from MPC5121 DMA driver |
11 | * written by Hongjun Chen <hong-jun.chen@freescale.com>. |
12 | * |
13 | * Approved as OSADL project by a majority of OSADL members and funded |
14 | * by OSADL membership fees in 2009; for details see www.osadl.org. |
15 | */ |
16 | |
17 | /* |
18 | * MPC512x and MPC8308 DMA driver. It supports memory to memory data transfers |
19 | * (tested using dmatest module) and data transfers between memory and |
20 | * peripheral I/O memory by means of slave scatter/gather with these |
21 | * limitations: |
22 | * - chunked transfers (described by s/g lists with more than one item) are |
23 | * refused as long as proper support for scatter/gather is missing |
24 | * - transfers on MPC8308 always start from software as this SoC does not have |
25 | * external request lines for peripheral flow control |
26 | * - memory <-> I/O memory transfer chunks of sizes of 1, 2, 4, 16 (for |
27 | * MPC512x), and 32 bytes are supported, and, consequently, source |
28 | * addresses and destination addresses must be aligned accordingly; |
29 | * furthermore, for MPC512x SoCs, the transfer size must be aligned on |
30 | * (chunk size * maxburst) |
31 | */ |
32 | |
33 | #include <linux/module.h> |
34 | #include <linux/dmaengine.h> |
35 | #include <linux/dma-mapping.h> |
36 | #include <linux/interrupt.h> |
37 | #include <linux/io.h> |
38 | #include <linux/slab.h> |
39 | #include <linux/of.h> |
40 | #include <linux/of_address.h> |
41 | #include <linux/of_irq.h> |
42 | #include <linux/of_dma.h> |
43 | #include <linux/platform_device.h> |
44 | |
45 | #include <linux/random.h> |
46 | |
47 | #include "dmaengine.h" |
48 | |
49 | /* Number of DMA Transfer descriptors allocated per channel */ |
50 | #define MPC_DMA_DESCRIPTORS 64 |
51 | |
52 | /* Macro definitions */ |
53 | #define MPC_DMA_TCD_OFFSET 0x1000 |
54 | |
55 | /* |
56 | * Maximum channel counts for individual hardware variants |
57 | * and the maximum channel count over all supported controllers, |
58 | * used for data structure size |
59 | */ |
60 | #define MPC8308_DMACHAN_MAX 16 |
61 | #define MPC512x_DMACHAN_MAX 64 |
62 | #define MPC_DMA_CHANNELS 64 |
63 | |
64 | /* Arbitration mode of group and channel */ |
65 | #define MPC_DMA_DMACR_EDCG (1 << 31) |
66 | #define MPC_DMA_DMACR_ERGA (1 << 3) |
67 | #define MPC_DMA_DMACR_ERCA (1 << 2) |
68 | |
69 | /* Error codes */ |
70 | #define MPC_DMA_DMAES_VLD (1 << 31) |
71 | #define MPC_DMA_DMAES_GPE (1 << 15) |
72 | #define MPC_DMA_DMAES_CPE (1 << 14) |
73 | #define MPC_DMA_DMAES_ERRCHN(err) \ |
74 | (((err) >> 8) & 0x3f) |
75 | #define MPC_DMA_DMAES_SAE (1 << 7) |
76 | #define MPC_DMA_DMAES_SOE (1 << 6) |
77 | #define MPC_DMA_DMAES_DAE (1 << 5) |
78 | #define MPC_DMA_DMAES_DOE (1 << 4) |
79 | #define MPC_DMA_DMAES_NCE (1 << 3) |
80 | #define MPC_DMA_DMAES_SGE (1 << 2) |
81 | #define MPC_DMA_DMAES_SBE (1 << 1) |
82 | #define MPC_DMA_DMAES_DBE (1 << 0) |
83 | |
84 | #define MPC_DMA_DMAGPOR_SNOOP_ENABLE (1 << 6) |
85 | |
86 | #define MPC_DMA_TSIZE_1 0x00 |
87 | #define MPC_DMA_TSIZE_2 0x01 |
88 | #define MPC_DMA_TSIZE_4 0x02 |
89 | #define MPC_DMA_TSIZE_16 0x04 |
90 | #define MPC_DMA_TSIZE_32 0x05 |
91 | |
92 | /* MPC5121 DMA engine registers */ |
93 | struct __attribute__ ((__packed__)) mpc_dma_regs { |
94 | /* 0x00 */ |
95 | u32 dmacr; /* DMA control register */ |
96 | u32 dmaes; /* DMA error status */ |
97 | /* 0x08 */ |
98 | u32 dmaerqh; /* DMA enable request high(channels 63~32) */ |
99 | u32 dmaerql; /* DMA enable request low(channels 31~0) */ |
100 | u32 dmaeeih; /* DMA enable error interrupt high(ch63~32) */ |
101 | u32 dmaeeil; /* DMA enable error interrupt low(ch31~0) */ |
102 | /* 0x18 */ |
103 | u8 dmaserq; /* DMA set enable request */ |
104 | u8 dmacerq; /* DMA clear enable request */ |
105 | u8 dmaseei; /* DMA set enable error interrupt */ |
106 | u8 dmaceei; /* DMA clear enable error interrupt */ |
107 | /* 0x1c */ |
108 | u8 dmacint; /* DMA clear interrupt request */ |
109 | u8 dmacerr; /* DMA clear error */ |
110 | u8 dmassrt; /* DMA set start bit */ |
111 | u8 dmacdne; /* DMA clear DONE status bit */ |
112 | /* 0x20 */ |
113 | u32 dmainth; /* DMA interrupt request high(ch63~32) */ |
114 | u32 dmaintl; /* DMA interrupt request low(ch31~0) */ |
115 | u32 dmaerrh; /* DMA error high(ch63~32) */ |
116 | u32 dmaerrl; /* DMA error low(ch31~0) */ |
117 | /* 0x30 */ |
118 | u32 dmahrsh; /* DMA hw request status high(ch63~32) */ |
119 | u32 dmahrsl; /* DMA hardware request status low(ch31~0) */ |
120 | union { |
121 | u32 dmaihsa; /* DMA interrupt high select AXE(ch63~32) */ |
122 | u32 dmagpor; /* (General purpose register on MPC8308) */ |
123 | }; |
124 | u32 dmailsa; /* DMA interrupt low select AXE(ch31~0) */ |
125 | /* 0x40 ~ 0xff */ |
126 | u32 reserve0[48]; /* Reserved */ |
127 | /* 0x100 */ |
128 | u8 dchpri[MPC_DMA_CHANNELS]; |
129 | /* DMA channels(0~63) priority */ |
130 | }; |
131 | |
132 | struct __attribute__ ((__packed__)) mpc_dma_tcd { |
133 | /* 0x00 */ |
134 | u32 saddr; /* Source address */ |
135 | |
136 | u32 smod:5; /* Source address modulo */ |
137 | u32 ssize:3; /* Source data transfer size */ |
138 | u32 dmod:5; /* Destination address modulo */ |
139 | u32 dsize:3; /* Destination data transfer size */ |
140 | u32 soff:16; /* Signed source address offset */ |
141 | |
142 | /* 0x08 */ |
143 | u32 nbytes; /* Inner "minor" byte count */ |
144 | u32 slast; /* Last source address adjustment */ |
145 | u32 daddr; /* Destination address */ |
146 | |
147 | /* 0x14 */ |
148 | u32 citer_elink:1; /* Enable channel-to-channel linking on |
149 | * minor loop complete |
150 | */ |
151 | u32 citer_linkch:6; /* Link channel for minor loop complete */ |
152 | u32 citer:9; /* Current "major" iteration count */ |
153 | u32 doff:16; /* Signed destination address offset */ |
154 | |
155 | /* 0x18 */ |
156 | u32 dlast_sga; /* Last Destination address adjustment/scatter |
157 | * gather address |
158 | */ |
159 | |
160 | /* 0x1c */ |
161 | u32 biter_elink:1; /* Enable channel-to-channel linking on major |
162 | * loop complete |
163 | */ |
164 | u32 biter_linkch:6; |
165 | u32 biter:9; /* Beginning "major" iteration count */ |
166 | u32 bwc:2; /* Bandwidth control */ |
167 | u32 major_linkch:6; /* Link channel number */ |
168 | u32 done:1; /* Channel done */ |
169 | u32 active:1; /* Channel active */ |
170 | u32 major_elink:1; /* Enable channel-to-channel linking on major |
171 | * loop complete |
172 | */ |
173 | u32 e_sg:1; /* Enable scatter/gather processing */ |
174 | u32 d_req:1; /* Disable request */ |
175 | u32 int_half:1; /* Enable an interrupt when major counter is |
176 | * half complete |
177 | */ |
178 | u32 int_maj:1; /* Enable an interrupt when major iteration |
179 | * count completes |
180 | */ |
181 | u32 start:1; /* Channel start */ |
182 | }; |
183 | |
184 | struct mpc_dma_desc { |
185 | struct dma_async_tx_descriptor desc; |
186 | struct mpc_dma_tcd *tcd; |
187 | dma_addr_t tcd_paddr; |
188 | int error; |
189 | struct list_head node; |
190 | int will_access_peripheral; |
191 | }; |
192 | |
193 | struct mpc_dma_chan { |
194 | struct dma_chan chan; |
195 | struct list_head free; |
196 | struct list_head prepared; |
197 | struct list_head queued; |
198 | struct list_head active; |
199 | struct list_head completed; |
200 | struct mpc_dma_tcd *tcd; |
201 | dma_addr_t tcd_paddr; |
202 | |
203 | /* Settings for access to peripheral FIFO */ |
204 | dma_addr_t src_per_paddr; |
205 | u32 src_tcd_nunits; |
206 | u8 swidth; |
207 | dma_addr_t dst_per_paddr; |
208 | u32 dst_tcd_nunits; |
209 | u8 dwidth; |
210 | |
211 | /* Lock for this structure */ |
212 | spinlock_t lock; |
213 | }; |
214 | |
215 | struct mpc_dma { |
216 | struct dma_device dma; |
217 | struct tasklet_struct tasklet; |
218 | struct mpc_dma_chan channels[MPC_DMA_CHANNELS]; |
219 | struct mpc_dma_regs __iomem *regs; |
220 | struct mpc_dma_tcd __iomem *tcd; |
221 | int irq; |
222 | int irq2; |
223 | uint error_status; |
224 | int is_mpc8308; |
225 | |
226 | /* Lock for error_status field in this structure */ |
227 | spinlock_t error_status_lock; |
228 | }; |
229 | |
230 | #define DRV_NAME "mpc512x_dma" |
231 | |
232 | /* Convert struct dma_chan to struct mpc_dma_chan */ |
233 | static inline struct mpc_dma_chan *dma_chan_to_mpc_dma_chan(struct dma_chan *c) |
234 | { |
235 | return container_of(c, struct mpc_dma_chan, chan); |
236 | } |
237 | |
238 | /* Convert struct dma_chan to struct mpc_dma */ |
239 | static inline struct mpc_dma *dma_chan_to_mpc_dma(struct dma_chan *c) |
240 | { |
241 | struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(c); |
242 | |
243 | return container_of(mchan, struct mpc_dma, channels[c->chan_id]); |
244 | } |
245 | |
246 | /* |
247 | * Execute all queued DMA descriptors. |
248 | * |
249 | * Following requirements must be met while calling mpc_dma_execute(): |
250 | * a) mchan->lock is acquired, |
251 | * b) mchan->active list is empty, |
252 | * c) mchan->queued list contains at least one entry. |
253 | */ |
254 | static void mpc_dma_execute(struct mpc_dma_chan *mchan) |
255 | { |
256 | struct mpc_dma *mdma = dma_chan_to_mpc_dma(c: &mchan->chan); |
257 | struct mpc_dma_desc *first = NULL; |
258 | struct mpc_dma_desc *prev = NULL; |
259 | struct mpc_dma_desc *mdesc; |
260 | int cid = mchan->chan.chan_id; |
261 | |
262 | while (!list_empty(head: &mchan->queued)) { |
263 | mdesc = list_first_entry(&mchan->queued, |
264 | struct mpc_dma_desc, node); |
265 | /* |
266 | * Grab either several mem-to-mem transfer descriptors |
267 | * or one peripheral transfer descriptor, |
268 | * don't mix mem-to-mem and peripheral transfer descriptors |
269 | * within the same 'active' list. |
270 | */ |
271 | if (mdesc->will_access_peripheral) { |
272 | if (list_empty(head: &mchan->active)) |
273 | list_move_tail(list: &mdesc->node, head: &mchan->active); |
274 | break; |
275 | } else { |
276 | list_move_tail(list: &mdesc->node, head: &mchan->active); |
277 | } |
278 | } |
279 | |
280 | /* Chain descriptors into one transaction */ |
281 | list_for_each_entry(mdesc, &mchan->active, node) { |
282 | if (!first) |
283 | first = mdesc; |
284 | |
285 | if (!prev) { |
286 | prev = mdesc; |
287 | continue; |
288 | } |
289 | |
290 | prev->tcd->dlast_sga = mdesc->tcd_paddr; |
291 | prev->tcd->e_sg = 1; |
292 | mdesc->tcd->start = 1; |
293 | |
294 | prev = mdesc; |
295 | } |
296 | |
297 | prev->tcd->int_maj = 1; |
298 | |
299 | /* Send first descriptor in chain into hardware */ |
300 | memcpy_toio(&mdma->tcd[cid], first->tcd, sizeof(struct mpc_dma_tcd)); |
301 | |
302 | if (first != prev) |
303 | mdma->tcd[cid].e_sg = 1; |
304 | |
305 | if (mdma->is_mpc8308) { |
306 | /* MPC8308, no request lines, software initiated start */ |
307 | out_8(&mdma->regs->dmassrt, cid); |
308 | } else if (first->will_access_peripheral) { |
309 | /* Peripherals involved, start by external request signal */ |
310 | out_8(&mdma->regs->dmaserq, cid); |
311 | } else { |
312 | /* Memory to memory transfer, software initiated start */ |
313 | out_8(&mdma->regs->dmassrt, cid); |
314 | } |
315 | } |
316 | |
317 | /* Handle interrupt on one half of DMA controller (32 channels) */ |
318 | static void mpc_dma_irq_process(struct mpc_dma *mdma, u32 is, u32 es, int off) |
319 | { |
320 | struct mpc_dma_chan *mchan; |
321 | struct mpc_dma_desc *mdesc; |
322 | u32 status = is | es; |
323 | int ch; |
324 | |
325 | while ((ch = fls(x: status) - 1) >= 0) { |
326 | status &= ~(1 << ch); |
327 | mchan = &mdma->channels[ch + off]; |
328 | |
329 | spin_lock(lock: &mchan->lock); |
330 | |
331 | out_8(&mdma->regs->dmacint, ch + off); |
332 | out_8(&mdma->regs->dmacerr, ch + off); |
333 | |
334 | /* Check error status */ |
335 | if (es & (1 << ch)) |
336 | list_for_each_entry(mdesc, &mchan->active, node) |
337 | mdesc->error = -EIO; |
338 | |
339 | /* Execute queued descriptors */ |
340 | list_splice_tail_init(list: &mchan->active, head: &mchan->completed); |
341 | if (!list_empty(head: &mchan->queued)) |
342 | mpc_dma_execute(mchan); |
343 | |
344 | spin_unlock(lock: &mchan->lock); |
345 | } |
346 | } |
347 | |
348 | /* Interrupt handler */ |
349 | static irqreturn_t mpc_dma_irq(int irq, void *data) |
350 | { |
351 | struct mpc_dma *mdma = data; |
352 | uint es; |
353 | |
354 | /* Save error status register */ |
355 | es = in_be32(&mdma->regs->dmaes); |
356 | spin_lock(lock: &mdma->error_status_lock); |
357 | if ((es & MPC_DMA_DMAES_VLD) && mdma->error_status == 0) |
358 | mdma->error_status = es; |
359 | spin_unlock(lock: &mdma->error_status_lock); |
360 | |
361 | /* Handle interrupt on each channel */ |
362 | if (mdma->dma.chancnt > 32) { |
363 | mpc_dma_irq_process(mdma, is: in_be32(&mdma->regs->dmainth), |
364 | es: in_be32(&mdma->regs->dmaerrh), off: 32); |
365 | } |
366 | mpc_dma_irq_process(mdma, is: in_be32(&mdma->regs->dmaintl), |
367 | es: in_be32(&mdma->regs->dmaerrl), off: 0); |
368 | |
369 | /* Schedule tasklet */ |
370 | tasklet_schedule(t: &mdma->tasklet); |
371 | |
372 | return IRQ_HANDLED; |
373 | } |
374 | |
375 | /* process completed descriptors */ |
376 | static void mpc_dma_process_completed(struct mpc_dma *mdma) |
377 | { |
378 | dma_cookie_t last_cookie = 0; |
379 | struct mpc_dma_chan *mchan; |
380 | struct mpc_dma_desc *mdesc; |
381 | struct dma_async_tx_descriptor *desc; |
382 | unsigned long flags; |
383 | LIST_HEAD(list); |
384 | int i; |
385 | |
386 | for (i = 0; i < mdma->dma.chancnt; i++) { |
387 | mchan = &mdma->channels[i]; |
388 | |
389 | /* Get all completed descriptors */ |
390 | spin_lock_irqsave(&mchan->lock, flags); |
391 | if (!list_empty(head: &mchan->completed)) |
392 | list_splice_tail_init(list: &mchan->completed, head: &list); |
393 | spin_unlock_irqrestore(lock: &mchan->lock, flags); |
394 | |
395 | if (list_empty(head: &list)) |
396 | continue; |
397 | |
398 | /* Execute callbacks and run dependencies */ |
399 | list_for_each_entry(mdesc, &list, node) { |
400 | desc = &mdesc->desc; |
401 | |
402 | dmaengine_desc_get_callback_invoke(tx: desc, NULL); |
403 | |
404 | last_cookie = desc->cookie; |
405 | dma_run_dependencies(tx: desc); |
406 | } |
407 | |
408 | /* Free descriptors */ |
409 | spin_lock_irqsave(&mchan->lock, flags); |
410 | list_splice_tail_init(list: &list, head: &mchan->free); |
411 | mchan->chan.completed_cookie = last_cookie; |
412 | spin_unlock_irqrestore(lock: &mchan->lock, flags); |
413 | } |
414 | } |
415 | |
416 | /* DMA Tasklet */ |
417 | static void mpc_dma_tasklet(struct tasklet_struct *t) |
418 | { |
419 | struct mpc_dma *mdma = from_tasklet(mdma, t, tasklet); |
420 | unsigned long flags; |
421 | uint es; |
422 | |
423 | spin_lock_irqsave(&mdma->error_status_lock, flags); |
424 | es = mdma->error_status; |
425 | mdma->error_status = 0; |
426 | spin_unlock_irqrestore(lock: &mdma->error_status_lock, flags); |
427 | |
428 | /* Print nice error report */ |
429 | if (es) { |
430 | dev_err(mdma->dma.dev, |
431 | "Hardware reported following error(s) on channel %u:\n" , |
432 | MPC_DMA_DMAES_ERRCHN(es)); |
433 | |
434 | if (es & MPC_DMA_DMAES_GPE) |
435 | dev_err(mdma->dma.dev, "- Group Priority Error\n" ); |
436 | if (es & MPC_DMA_DMAES_CPE) |
437 | dev_err(mdma->dma.dev, "- Channel Priority Error\n" ); |
438 | if (es & MPC_DMA_DMAES_SAE) |
439 | dev_err(mdma->dma.dev, "- Source Address Error\n" ); |
440 | if (es & MPC_DMA_DMAES_SOE) |
441 | dev_err(mdma->dma.dev, "- Source Offset Configuration Error\n" ); |
442 | if (es & MPC_DMA_DMAES_DAE) |
443 | dev_err(mdma->dma.dev, "- Destination Address Error\n" ); |
444 | if (es & MPC_DMA_DMAES_DOE) |
445 | dev_err(mdma->dma.dev, "- Destination Offset Configuration Error\n" ); |
446 | if (es & MPC_DMA_DMAES_NCE) |
447 | dev_err(mdma->dma.dev, "- NBytes/Citter Configuration Error\n" ); |
448 | if (es & MPC_DMA_DMAES_SGE) |
449 | dev_err(mdma->dma.dev, "- Scatter/Gather Configuration Error\n" ); |
450 | if (es & MPC_DMA_DMAES_SBE) |
451 | dev_err(mdma->dma.dev, "- Source Bus Error\n" ); |
452 | if (es & MPC_DMA_DMAES_DBE) |
453 | dev_err(mdma->dma.dev, "- Destination Bus Error\n" ); |
454 | } |
455 | |
456 | mpc_dma_process_completed(mdma); |
457 | } |
458 | |
459 | /* Submit descriptor to hardware */ |
460 | static dma_cookie_t mpc_dma_tx_submit(struct dma_async_tx_descriptor *txd) |
461 | { |
462 | struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(c: txd->chan); |
463 | struct mpc_dma_desc *mdesc; |
464 | unsigned long flags; |
465 | dma_cookie_t cookie; |
466 | |
467 | mdesc = container_of(txd, struct mpc_dma_desc, desc); |
468 | |
469 | spin_lock_irqsave(&mchan->lock, flags); |
470 | |
471 | /* Move descriptor to queue */ |
472 | list_move_tail(list: &mdesc->node, head: &mchan->queued); |
473 | |
474 | /* If channel is idle, execute all queued descriptors */ |
475 | if (list_empty(head: &mchan->active)) |
476 | mpc_dma_execute(mchan); |
477 | |
478 | /* Update cookie */ |
479 | cookie = dma_cookie_assign(tx: txd); |
480 | spin_unlock_irqrestore(lock: &mchan->lock, flags); |
481 | |
482 | return cookie; |
483 | } |
484 | |
485 | /* Alloc channel resources */ |
486 | static int mpc_dma_alloc_chan_resources(struct dma_chan *chan) |
487 | { |
488 | struct mpc_dma *mdma = dma_chan_to_mpc_dma(c: chan); |
489 | struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(c: chan); |
490 | struct mpc_dma_desc *mdesc; |
491 | struct mpc_dma_tcd *tcd; |
492 | dma_addr_t tcd_paddr; |
493 | unsigned long flags; |
494 | LIST_HEAD(descs); |
495 | int i; |
496 | |
497 | /* Alloc DMA memory for Transfer Control Descriptors */ |
498 | tcd = dma_alloc_coherent(dev: mdma->dma.dev, |
499 | MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd), |
500 | dma_handle: &tcd_paddr, GFP_KERNEL); |
501 | if (!tcd) |
502 | return -ENOMEM; |
503 | |
504 | /* Alloc descriptors for this channel */ |
505 | for (i = 0; i < MPC_DMA_DESCRIPTORS; i++) { |
506 | mdesc = kzalloc(size: sizeof(struct mpc_dma_desc), GFP_KERNEL); |
507 | if (!mdesc) { |
508 | dev_notice(mdma->dma.dev, |
509 | "Memory allocation error. Allocated only %u descriptors\n" , i); |
510 | break; |
511 | } |
512 | |
513 | dma_async_tx_descriptor_init(tx: &mdesc->desc, chan); |
514 | mdesc->desc.flags = DMA_CTRL_ACK; |
515 | mdesc->desc.tx_submit = mpc_dma_tx_submit; |
516 | |
517 | mdesc->tcd = &tcd[i]; |
518 | mdesc->tcd_paddr = tcd_paddr + (i * sizeof(struct mpc_dma_tcd)); |
519 | |
520 | list_add_tail(new: &mdesc->node, head: &descs); |
521 | } |
522 | |
523 | /* Return error only if no descriptors were allocated */ |
524 | if (i == 0) { |
525 | dma_free_coherent(dev: mdma->dma.dev, |
526 | MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd), |
527 | cpu_addr: tcd, dma_handle: tcd_paddr); |
528 | return -ENOMEM; |
529 | } |
530 | |
531 | spin_lock_irqsave(&mchan->lock, flags); |
532 | mchan->tcd = tcd; |
533 | mchan->tcd_paddr = tcd_paddr; |
534 | list_splice_tail_init(list: &descs, head: &mchan->free); |
535 | spin_unlock_irqrestore(lock: &mchan->lock, flags); |
536 | |
537 | /* Enable Error Interrupt */ |
538 | out_8(&mdma->regs->dmaseei, chan->chan_id); |
539 | |
540 | return 0; |
541 | } |
542 | |
543 | /* Free channel resources */ |
544 | static void mpc_dma_free_chan_resources(struct dma_chan *chan) |
545 | { |
546 | struct mpc_dma *mdma = dma_chan_to_mpc_dma(c: chan); |
547 | struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(c: chan); |
548 | struct mpc_dma_desc *mdesc, *tmp; |
549 | struct mpc_dma_tcd *tcd; |
550 | dma_addr_t tcd_paddr; |
551 | unsigned long flags; |
552 | LIST_HEAD(descs); |
553 | |
554 | spin_lock_irqsave(&mchan->lock, flags); |
555 | |
556 | /* Channel must be idle */ |
557 | BUG_ON(!list_empty(&mchan->prepared)); |
558 | BUG_ON(!list_empty(&mchan->queued)); |
559 | BUG_ON(!list_empty(&mchan->active)); |
560 | BUG_ON(!list_empty(&mchan->completed)); |
561 | |
562 | /* Move data */ |
563 | list_splice_tail_init(list: &mchan->free, head: &descs); |
564 | tcd = mchan->tcd; |
565 | tcd_paddr = mchan->tcd_paddr; |
566 | |
567 | spin_unlock_irqrestore(lock: &mchan->lock, flags); |
568 | |
569 | /* Free DMA memory used by descriptors */ |
570 | dma_free_coherent(dev: mdma->dma.dev, |
571 | MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd), |
572 | cpu_addr: tcd, dma_handle: tcd_paddr); |
573 | |
574 | /* Free descriptors */ |
575 | list_for_each_entry_safe(mdesc, tmp, &descs, node) |
576 | kfree(objp: mdesc); |
577 | |
578 | /* Disable Error Interrupt */ |
579 | out_8(&mdma->regs->dmaceei, chan->chan_id); |
580 | } |
581 | |
582 | /* Send all pending descriptor to hardware */ |
583 | static void mpc_dma_issue_pending(struct dma_chan *chan) |
584 | { |
585 | /* |
586 | * We are posting descriptors to the hardware as soon as |
587 | * they are ready, so this function does nothing. |
588 | */ |
589 | } |
590 | |
591 | /* Check request completion status */ |
592 | static enum dma_status |
593 | mpc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie, |
594 | struct dma_tx_state *txstate) |
595 | { |
596 | return dma_cookie_status(chan, cookie, state: txstate); |
597 | } |
598 | |
599 | /* Prepare descriptor for memory to memory copy */ |
600 | static struct dma_async_tx_descriptor * |
601 | mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src, |
602 | size_t len, unsigned long flags) |
603 | { |
604 | struct mpc_dma *mdma = dma_chan_to_mpc_dma(c: chan); |
605 | struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(c: chan); |
606 | struct mpc_dma_desc *mdesc = NULL; |
607 | struct mpc_dma_tcd *tcd; |
608 | unsigned long iflags; |
609 | |
610 | /* Get free descriptor */ |
611 | spin_lock_irqsave(&mchan->lock, iflags); |
612 | if (!list_empty(head: &mchan->free)) { |
613 | mdesc = list_first_entry(&mchan->free, struct mpc_dma_desc, |
614 | node); |
615 | list_del(entry: &mdesc->node); |
616 | } |
617 | spin_unlock_irqrestore(lock: &mchan->lock, flags: iflags); |
618 | |
619 | if (!mdesc) { |
620 | /* try to free completed descriptors */ |
621 | mpc_dma_process_completed(mdma); |
622 | return NULL; |
623 | } |
624 | |
625 | mdesc->error = 0; |
626 | mdesc->will_access_peripheral = 0; |
627 | tcd = mdesc->tcd; |
628 | |
629 | /* Prepare Transfer Control Descriptor for this transaction */ |
630 | memset(tcd, 0, sizeof(struct mpc_dma_tcd)); |
631 | |
632 | if (IS_ALIGNED(src | dst | len, 32)) { |
633 | tcd->ssize = MPC_DMA_TSIZE_32; |
634 | tcd->dsize = MPC_DMA_TSIZE_32; |
635 | tcd->soff = 32; |
636 | tcd->doff = 32; |
637 | } else if (!mdma->is_mpc8308 && IS_ALIGNED(src | dst | len, 16)) { |
638 | /* MPC8308 doesn't support 16 byte transfers */ |
639 | tcd->ssize = MPC_DMA_TSIZE_16; |
640 | tcd->dsize = MPC_DMA_TSIZE_16; |
641 | tcd->soff = 16; |
642 | tcd->doff = 16; |
643 | } else if (IS_ALIGNED(src | dst | len, 4)) { |
644 | tcd->ssize = MPC_DMA_TSIZE_4; |
645 | tcd->dsize = MPC_DMA_TSIZE_4; |
646 | tcd->soff = 4; |
647 | tcd->doff = 4; |
648 | } else if (IS_ALIGNED(src | dst | len, 2)) { |
649 | tcd->ssize = MPC_DMA_TSIZE_2; |
650 | tcd->dsize = MPC_DMA_TSIZE_2; |
651 | tcd->soff = 2; |
652 | tcd->doff = 2; |
653 | } else { |
654 | tcd->ssize = MPC_DMA_TSIZE_1; |
655 | tcd->dsize = MPC_DMA_TSIZE_1; |
656 | tcd->soff = 1; |
657 | tcd->doff = 1; |
658 | } |
659 | |
660 | tcd->saddr = src; |
661 | tcd->daddr = dst; |
662 | tcd->nbytes = len; |
663 | tcd->biter = 1; |
664 | tcd->citer = 1; |
665 | |
666 | /* Place descriptor in prepared list */ |
667 | spin_lock_irqsave(&mchan->lock, iflags); |
668 | list_add_tail(new: &mdesc->node, head: &mchan->prepared); |
669 | spin_unlock_irqrestore(lock: &mchan->lock, flags: iflags); |
670 | |
671 | return &mdesc->desc; |
672 | } |
673 | |
674 | inline u8 buswidth_to_dmatsize(u8 buswidth) |
675 | { |
676 | u8 res; |
677 | |
678 | for (res = 0; buswidth > 1; buswidth /= 2) |
679 | res++; |
680 | return res; |
681 | } |
682 | |
683 | static struct dma_async_tx_descriptor * |
684 | mpc_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, |
685 | unsigned int sg_len, enum dma_transfer_direction direction, |
686 | unsigned long flags, void *context) |
687 | { |
688 | struct mpc_dma *mdma = dma_chan_to_mpc_dma(c: chan); |
689 | struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(c: chan); |
690 | struct mpc_dma_desc *mdesc = NULL; |
691 | dma_addr_t per_paddr; |
692 | u32 tcd_nunits; |
693 | struct mpc_dma_tcd *tcd; |
694 | unsigned long iflags; |
695 | struct scatterlist *sg; |
696 | size_t len; |
697 | int iter, i; |
698 | |
699 | /* Currently there is no proper support for scatter/gather */ |
700 | if (sg_len != 1) |
701 | return NULL; |
702 | |
703 | if (!is_slave_direction(direction)) |
704 | return NULL; |
705 | |
706 | for_each_sg(sgl, sg, sg_len, i) { |
707 | spin_lock_irqsave(&mchan->lock, iflags); |
708 | |
709 | mdesc = list_first_entry(&mchan->free, |
710 | struct mpc_dma_desc, node); |
711 | if (!mdesc) { |
712 | spin_unlock_irqrestore(lock: &mchan->lock, flags: iflags); |
713 | /* Try to free completed descriptors */ |
714 | mpc_dma_process_completed(mdma); |
715 | return NULL; |
716 | } |
717 | |
718 | list_del(entry: &mdesc->node); |
719 | |
720 | if (direction == DMA_DEV_TO_MEM) { |
721 | per_paddr = mchan->src_per_paddr; |
722 | tcd_nunits = mchan->src_tcd_nunits; |
723 | } else { |
724 | per_paddr = mchan->dst_per_paddr; |
725 | tcd_nunits = mchan->dst_tcd_nunits; |
726 | } |
727 | |
728 | spin_unlock_irqrestore(lock: &mchan->lock, flags: iflags); |
729 | |
730 | if (per_paddr == 0 || tcd_nunits == 0) |
731 | goto err_prep; |
732 | |
733 | mdesc->error = 0; |
734 | mdesc->will_access_peripheral = 1; |
735 | |
736 | /* Prepare Transfer Control Descriptor for this transaction */ |
737 | tcd = mdesc->tcd; |
738 | |
739 | memset(tcd, 0, sizeof(struct mpc_dma_tcd)); |
740 | |
741 | if (direction == DMA_DEV_TO_MEM) { |
742 | tcd->saddr = per_paddr; |
743 | tcd->daddr = sg_dma_address(sg); |
744 | |
745 | if (!IS_ALIGNED(sg_dma_address(sg), mchan->dwidth)) |
746 | goto err_prep; |
747 | |
748 | tcd->soff = 0; |
749 | tcd->doff = mchan->dwidth; |
750 | } else { |
751 | tcd->saddr = sg_dma_address(sg); |
752 | tcd->daddr = per_paddr; |
753 | |
754 | if (!IS_ALIGNED(sg_dma_address(sg), mchan->swidth)) |
755 | goto err_prep; |
756 | |
757 | tcd->soff = mchan->swidth; |
758 | tcd->doff = 0; |
759 | } |
760 | |
761 | tcd->ssize = buswidth_to_dmatsize(buswidth: mchan->swidth); |
762 | tcd->dsize = buswidth_to_dmatsize(buswidth: mchan->dwidth); |
763 | |
764 | if (mdma->is_mpc8308) { |
765 | tcd->nbytes = sg_dma_len(sg); |
766 | if (!IS_ALIGNED(tcd->nbytes, mchan->swidth)) |
767 | goto err_prep; |
768 | |
769 | /* No major loops for MPC8303 */ |
770 | tcd->biter = 1; |
771 | tcd->citer = 1; |
772 | } else { |
773 | len = sg_dma_len(sg); |
774 | tcd->nbytes = tcd_nunits * tcd->ssize; |
775 | if (!IS_ALIGNED(len, tcd->nbytes)) |
776 | goto err_prep; |
777 | |
778 | iter = len / tcd->nbytes; |
779 | if (iter >= 1 << 15) { |
780 | /* len is too big */ |
781 | goto err_prep; |
782 | } |
783 | /* citer_linkch contains the high bits of iter */ |
784 | tcd->biter = iter & 0x1ff; |
785 | tcd->biter_linkch = iter >> 9; |
786 | tcd->citer = tcd->biter; |
787 | tcd->citer_linkch = tcd->biter_linkch; |
788 | } |
789 | |
790 | tcd->e_sg = 0; |
791 | tcd->d_req = 1; |
792 | |
793 | /* Place descriptor in prepared list */ |
794 | spin_lock_irqsave(&mchan->lock, iflags); |
795 | list_add_tail(new: &mdesc->node, head: &mchan->prepared); |
796 | spin_unlock_irqrestore(lock: &mchan->lock, flags: iflags); |
797 | } |
798 | |
799 | return &mdesc->desc; |
800 | |
801 | err_prep: |
802 | /* Put the descriptor back */ |
803 | spin_lock_irqsave(&mchan->lock, iflags); |
804 | list_add_tail(new: &mdesc->node, head: &mchan->free); |
805 | spin_unlock_irqrestore(lock: &mchan->lock, flags: iflags); |
806 | |
807 | return NULL; |
808 | } |
809 | |
810 | inline bool is_buswidth_valid(u8 buswidth, bool is_mpc8308) |
811 | { |
812 | switch (buswidth) { |
813 | case 16: |
814 | if (is_mpc8308) |
815 | return false; |
816 | break; |
817 | case 1: |
818 | case 2: |
819 | case 4: |
820 | case 32: |
821 | break; |
822 | default: |
823 | return false; |
824 | } |
825 | |
826 | return true; |
827 | } |
828 | |
829 | static int mpc_dma_device_config(struct dma_chan *chan, |
830 | struct dma_slave_config *cfg) |
831 | { |
832 | struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(c: chan); |
833 | struct mpc_dma *mdma = dma_chan_to_mpc_dma(c: &mchan->chan); |
834 | unsigned long flags; |
835 | |
836 | /* |
837 | * Software constraints: |
838 | * - only transfers between a peripheral device and memory are |
839 | * supported |
840 | * - transfer chunk sizes of 1, 2, 4, 16 (for MPC512x), and 32 bytes |
841 | * are supported, and, consequently, source addresses and |
842 | * destination addresses; must be aligned accordingly; furthermore, |
843 | * for MPC512x SoCs, the transfer size must be aligned on (chunk |
844 | * size * maxburst) |
845 | * - during the transfer, the RAM address is incremented by the size |
846 | * of transfer chunk |
847 | * - the peripheral port's address is constant during the transfer. |
848 | */ |
849 | |
850 | if (!IS_ALIGNED(cfg->src_addr, cfg->src_addr_width) || |
851 | !IS_ALIGNED(cfg->dst_addr, cfg->dst_addr_width)) { |
852 | return -EINVAL; |
853 | } |
854 | |
855 | if (!is_buswidth_valid(buswidth: cfg->src_addr_width, is_mpc8308: mdma->is_mpc8308) || |
856 | !is_buswidth_valid(buswidth: cfg->dst_addr_width, is_mpc8308: mdma->is_mpc8308)) |
857 | return -EINVAL; |
858 | |
859 | spin_lock_irqsave(&mchan->lock, flags); |
860 | |
861 | mchan->src_per_paddr = cfg->src_addr; |
862 | mchan->src_tcd_nunits = cfg->src_maxburst; |
863 | mchan->swidth = cfg->src_addr_width; |
864 | mchan->dst_per_paddr = cfg->dst_addr; |
865 | mchan->dst_tcd_nunits = cfg->dst_maxburst; |
866 | mchan->dwidth = cfg->dst_addr_width; |
867 | |
868 | /* Apply defaults */ |
869 | if (mchan->src_tcd_nunits == 0) |
870 | mchan->src_tcd_nunits = 1; |
871 | if (mchan->dst_tcd_nunits == 0) |
872 | mchan->dst_tcd_nunits = 1; |
873 | |
874 | spin_unlock_irqrestore(lock: &mchan->lock, flags); |
875 | |
876 | return 0; |
877 | } |
878 | |
879 | static int mpc_dma_device_terminate_all(struct dma_chan *chan) |
880 | { |
881 | struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(c: chan); |
882 | struct mpc_dma *mdma = dma_chan_to_mpc_dma(c: chan); |
883 | unsigned long flags; |
884 | |
885 | /* Disable channel requests */ |
886 | spin_lock_irqsave(&mchan->lock, flags); |
887 | |
888 | out_8(&mdma->regs->dmacerq, chan->chan_id); |
889 | list_splice_tail_init(list: &mchan->prepared, head: &mchan->free); |
890 | list_splice_tail_init(list: &mchan->queued, head: &mchan->free); |
891 | list_splice_tail_init(list: &mchan->active, head: &mchan->free); |
892 | |
893 | spin_unlock_irqrestore(lock: &mchan->lock, flags); |
894 | |
895 | return 0; |
896 | } |
897 | |
898 | static int mpc_dma_probe(struct platform_device *op) |
899 | { |
900 | struct device_node *dn = op->dev.of_node; |
901 | struct device *dev = &op->dev; |
902 | struct dma_device *dma; |
903 | struct mpc_dma *mdma; |
904 | struct mpc_dma_chan *mchan; |
905 | struct resource res; |
906 | ulong regs_start, regs_size; |
907 | int retval, i; |
908 | u8 chancnt; |
909 | |
910 | mdma = devm_kzalloc(dev, size: sizeof(struct mpc_dma), GFP_KERNEL); |
911 | if (!mdma) { |
912 | retval = -ENOMEM; |
913 | goto err; |
914 | } |
915 | |
916 | mdma->irq = irq_of_parse_and_map(node: dn, index: 0); |
917 | if (!mdma->irq) { |
918 | dev_err(dev, "Error mapping IRQ!\n" ); |
919 | retval = -EINVAL; |
920 | goto err; |
921 | } |
922 | |
923 | if (of_device_is_compatible(device: dn, "fsl,mpc8308-dma" )) { |
924 | mdma->is_mpc8308 = 1; |
925 | mdma->irq2 = irq_of_parse_and_map(node: dn, index: 1); |
926 | if (!mdma->irq2) { |
927 | dev_err(dev, "Error mapping IRQ!\n" ); |
928 | retval = -EINVAL; |
929 | goto err_dispose1; |
930 | } |
931 | } |
932 | |
933 | retval = of_address_to_resource(dev: dn, index: 0, r: &res); |
934 | if (retval) { |
935 | dev_err(dev, "Error parsing memory region!\n" ); |
936 | goto err_dispose2; |
937 | } |
938 | |
939 | regs_start = res.start; |
940 | regs_size = resource_size(res: &res); |
941 | |
942 | if (!devm_request_mem_region(dev, regs_start, regs_size, DRV_NAME)) { |
943 | dev_err(dev, "Error requesting memory region!\n" ); |
944 | retval = -EBUSY; |
945 | goto err_dispose2; |
946 | } |
947 | |
948 | mdma->regs = devm_ioremap(dev, offset: regs_start, size: regs_size); |
949 | if (!mdma->regs) { |
950 | dev_err(dev, "Error mapping memory region!\n" ); |
951 | retval = -ENOMEM; |
952 | goto err_dispose2; |
953 | } |
954 | |
955 | mdma->tcd = (struct mpc_dma_tcd *)((u8 *)(mdma->regs) |
956 | + MPC_DMA_TCD_OFFSET); |
957 | |
958 | retval = request_irq(irq: mdma->irq, handler: &mpc_dma_irq, flags: 0, DRV_NAME, dev: mdma); |
959 | if (retval) { |
960 | dev_err(dev, "Error requesting IRQ!\n" ); |
961 | retval = -EINVAL; |
962 | goto err_dispose2; |
963 | } |
964 | |
965 | if (mdma->is_mpc8308) { |
966 | retval = request_irq(irq: mdma->irq2, handler: &mpc_dma_irq, flags: 0, |
967 | DRV_NAME, dev: mdma); |
968 | if (retval) { |
969 | dev_err(dev, "Error requesting IRQ2!\n" ); |
970 | retval = -EINVAL; |
971 | goto err_free1; |
972 | } |
973 | } |
974 | |
975 | spin_lock_init(&mdma->error_status_lock); |
976 | |
977 | dma = &mdma->dma; |
978 | dma->dev = dev; |
979 | dma->device_alloc_chan_resources = mpc_dma_alloc_chan_resources; |
980 | dma->device_free_chan_resources = mpc_dma_free_chan_resources; |
981 | dma->device_issue_pending = mpc_dma_issue_pending; |
982 | dma->device_tx_status = mpc_dma_tx_status; |
983 | dma->device_prep_dma_memcpy = mpc_dma_prep_memcpy; |
984 | dma->device_prep_slave_sg = mpc_dma_prep_slave_sg; |
985 | dma->device_config = mpc_dma_device_config; |
986 | dma->device_terminate_all = mpc_dma_device_terminate_all; |
987 | |
988 | INIT_LIST_HEAD(list: &dma->channels); |
989 | dma_cap_set(DMA_MEMCPY, dma->cap_mask); |
990 | dma_cap_set(DMA_SLAVE, dma->cap_mask); |
991 | |
992 | if (mdma->is_mpc8308) |
993 | chancnt = MPC8308_DMACHAN_MAX; |
994 | else |
995 | chancnt = MPC512x_DMACHAN_MAX; |
996 | |
997 | for (i = 0; i < chancnt; i++) { |
998 | mchan = &mdma->channels[i]; |
999 | |
1000 | mchan->chan.device = dma; |
1001 | dma_cookie_init(chan: &mchan->chan); |
1002 | |
1003 | INIT_LIST_HEAD(list: &mchan->free); |
1004 | INIT_LIST_HEAD(list: &mchan->prepared); |
1005 | INIT_LIST_HEAD(list: &mchan->queued); |
1006 | INIT_LIST_HEAD(list: &mchan->active); |
1007 | INIT_LIST_HEAD(list: &mchan->completed); |
1008 | |
1009 | spin_lock_init(&mchan->lock); |
1010 | list_add_tail(new: &mchan->chan.device_node, head: &dma->channels); |
1011 | } |
1012 | |
1013 | tasklet_setup(t: &mdma->tasklet, callback: mpc_dma_tasklet); |
1014 | |
1015 | /* |
1016 | * Configure DMA Engine: |
1017 | * - Dynamic clock, |
1018 | * - Round-robin group arbitration, |
1019 | * - Round-robin channel arbitration. |
1020 | */ |
1021 | if (mdma->is_mpc8308) { |
1022 | /* MPC8308 has 16 channels and lacks some registers */ |
1023 | out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_ERCA); |
1024 | |
1025 | /* enable snooping */ |
1026 | out_be32(&mdma->regs->dmagpor, MPC_DMA_DMAGPOR_SNOOP_ENABLE); |
1027 | /* Disable error interrupts */ |
1028 | out_be32(&mdma->regs->dmaeeil, 0); |
1029 | |
1030 | /* Clear interrupts status */ |
1031 | out_be32(&mdma->regs->dmaintl, 0xFFFF); |
1032 | out_be32(&mdma->regs->dmaerrl, 0xFFFF); |
1033 | } else { |
1034 | out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_EDCG | |
1035 | MPC_DMA_DMACR_ERGA | |
1036 | MPC_DMA_DMACR_ERCA); |
1037 | |
1038 | /* Disable hardware DMA requests */ |
1039 | out_be32(&mdma->regs->dmaerqh, 0); |
1040 | out_be32(&mdma->regs->dmaerql, 0); |
1041 | |
1042 | /* Disable error interrupts */ |
1043 | out_be32(&mdma->regs->dmaeeih, 0); |
1044 | out_be32(&mdma->regs->dmaeeil, 0); |
1045 | |
1046 | /* Clear interrupts status */ |
1047 | out_be32(&mdma->regs->dmainth, 0xFFFFFFFF); |
1048 | out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF); |
1049 | out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF); |
1050 | out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF); |
1051 | |
1052 | /* Route interrupts to IPIC */ |
1053 | out_be32(&mdma->regs->dmaihsa, 0); |
1054 | out_be32(&mdma->regs->dmailsa, 0); |
1055 | } |
1056 | |
1057 | /* Register DMA engine */ |
1058 | dev_set_drvdata(dev, data: mdma); |
1059 | retval = dma_async_device_register(device: dma); |
1060 | if (retval) |
1061 | goto err_free2; |
1062 | |
1063 | /* Register with OF helpers for DMA lookups (nonfatal) */ |
1064 | if (dev->of_node) { |
1065 | retval = of_dma_controller_register(np: dev->of_node, |
1066 | of_dma_xlate: of_dma_xlate_by_chan_id, data: mdma); |
1067 | if (retval) |
1068 | dev_warn(dev, "Could not register for OF lookup\n" ); |
1069 | } |
1070 | |
1071 | return 0; |
1072 | |
1073 | err_free2: |
1074 | if (mdma->is_mpc8308) |
1075 | free_irq(mdma->irq2, mdma); |
1076 | err_free1: |
1077 | free_irq(mdma->irq, mdma); |
1078 | err_dispose2: |
1079 | if (mdma->is_mpc8308) |
1080 | irq_dispose_mapping(virq: mdma->irq2); |
1081 | err_dispose1: |
1082 | irq_dispose_mapping(virq: mdma->irq); |
1083 | err: |
1084 | return retval; |
1085 | } |
1086 | |
1087 | static void mpc_dma_remove(struct platform_device *op) |
1088 | { |
1089 | struct device *dev = &op->dev; |
1090 | struct mpc_dma *mdma = dev_get_drvdata(dev); |
1091 | |
1092 | if (dev->of_node) |
1093 | of_dma_controller_free(np: dev->of_node); |
1094 | dma_async_device_unregister(device: &mdma->dma); |
1095 | if (mdma->is_mpc8308) { |
1096 | free_irq(mdma->irq2, mdma); |
1097 | irq_dispose_mapping(virq: mdma->irq2); |
1098 | } |
1099 | free_irq(mdma->irq, mdma); |
1100 | irq_dispose_mapping(virq: mdma->irq); |
1101 | tasklet_kill(t: &mdma->tasklet); |
1102 | } |
1103 | |
1104 | static const struct of_device_id mpc_dma_match[] = { |
1105 | { .compatible = "fsl,mpc5121-dma" , }, |
1106 | { .compatible = "fsl,mpc8308-dma" , }, |
1107 | {}, |
1108 | }; |
1109 | MODULE_DEVICE_TABLE(of, mpc_dma_match); |
1110 | |
1111 | static struct platform_driver mpc_dma_driver = { |
1112 | .probe = mpc_dma_probe, |
1113 | .remove_new = mpc_dma_remove, |
1114 | .driver = { |
1115 | .name = DRV_NAME, |
1116 | .of_match_table = mpc_dma_match, |
1117 | }, |
1118 | }; |
1119 | |
1120 | module_platform_driver(mpc_dma_driver); |
1121 | |
1122 | MODULE_LICENSE("GPL" ); |
1123 | MODULE_AUTHOR("Piotr Ziecik <kosmo@semihalf.com>" ); |
1124 | |