1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * SA11x0 DMAengine support |
4 | * |
5 | * Copyright (C) 2012 Russell King |
6 | * Derived in part from arch/arm/mach-sa1100/dma.c, |
7 | * Copyright (C) 2000, 2001 by Nicolas Pitre |
8 | */ |
9 | #include <linux/sched.h> |
10 | #include <linux/device.h> |
11 | #include <linux/dmaengine.h> |
12 | #include <linux/init.h> |
13 | #include <linux/interrupt.h> |
14 | #include <linux/kernel.h> |
15 | #include <linux/module.h> |
16 | #include <linux/platform_device.h> |
17 | #include <linux/slab.h> |
18 | #include <linux/spinlock.h> |
19 | |
20 | #include "virt-dma.h" |
21 | |
22 | #define NR_PHY_CHAN 6 |
23 | #define DMA_ALIGN 3 |
24 | #define DMA_MAX_SIZE 0x1fff |
25 | #define DMA_CHUNK_SIZE 0x1000 |
26 | |
27 | #define DMA_DDAR 0x00 |
28 | #define DMA_DCSR_S 0x04 |
29 | #define DMA_DCSR_C 0x08 |
30 | #define DMA_DCSR_R 0x0c |
31 | #define DMA_DBSA 0x10 |
32 | #define DMA_DBTA 0x14 |
33 | #define DMA_DBSB 0x18 |
34 | #define DMA_DBTB 0x1c |
35 | #define DMA_SIZE 0x20 |
36 | |
37 | #define DCSR_RUN (1 << 0) |
38 | #define DCSR_IE (1 << 1) |
39 | #define DCSR_ERROR (1 << 2) |
40 | #define DCSR_DONEA (1 << 3) |
41 | #define DCSR_STRTA (1 << 4) |
42 | #define DCSR_DONEB (1 << 5) |
43 | #define DCSR_STRTB (1 << 6) |
44 | #define DCSR_BIU (1 << 7) |
45 | |
46 | #define DDAR_RW (1 << 0) /* 0 = W, 1 = R */ |
47 | #define DDAR_E (1 << 1) /* 0 = LE, 1 = BE */ |
48 | #define DDAR_BS (1 << 2) /* 0 = BS4, 1 = BS8 */ |
49 | #define DDAR_DW (1 << 3) /* 0 = 8b, 1 = 16b */ |
50 | #define DDAR_Ser0UDCTr (0x0 << 4) |
51 | #define DDAR_Ser0UDCRc (0x1 << 4) |
52 | #define DDAR_Ser1SDLCTr (0x2 << 4) |
53 | #define DDAR_Ser1SDLCRc (0x3 << 4) |
54 | #define DDAR_Ser1UARTTr (0x4 << 4) |
55 | #define DDAR_Ser1UARTRc (0x5 << 4) |
56 | #define DDAR_Ser2ICPTr (0x6 << 4) |
57 | #define DDAR_Ser2ICPRc (0x7 << 4) |
58 | #define DDAR_Ser3UARTTr (0x8 << 4) |
59 | #define DDAR_Ser3UARTRc (0x9 << 4) |
60 | #define DDAR_Ser4MCP0Tr (0xa << 4) |
61 | #define DDAR_Ser4MCP0Rc (0xb << 4) |
62 | #define DDAR_Ser4MCP1Tr (0xc << 4) |
63 | #define DDAR_Ser4MCP1Rc (0xd << 4) |
64 | #define DDAR_Ser4SSPTr (0xe << 4) |
65 | #define DDAR_Ser4SSPRc (0xf << 4) |
66 | |
67 | struct sa11x0_dma_sg { |
68 | u32 addr; |
69 | u32 len; |
70 | }; |
71 | |
72 | struct sa11x0_dma_desc { |
73 | struct virt_dma_desc vd; |
74 | |
75 | u32 ddar; |
76 | size_t size; |
77 | unsigned period; |
78 | bool cyclic; |
79 | |
80 | unsigned sglen; |
81 | struct sa11x0_dma_sg sg[] __counted_by(sglen); |
82 | }; |
83 | |
84 | struct sa11x0_dma_phy; |
85 | |
86 | struct sa11x0_dma_chan { |
87 | struct virt_dma_chan vc; |
88 | |
89 | /* protected by c->vc.lock */ |
90 | struct sa11x0_dma_phy *phy; |
91 | enum dma_status status; |
92 | |
93 | /* protected by d->lock */ |
94 | struct list_head node; |
95 | |
96 | u32 ddar; |
97 | const char *name; |
98 | }; |
99 | |
100 | struct sa11x0_dma_phy { |
101 | void __iomem *base; |
102 | struct sa11x0_dma_dev *dev; |
103 | unsigned num; |
104 | |
105 | struct sa11x0_dma_chan *vchan; |
106 | |
107 | /* Protected by c->vc.lock */ |
108 | unsigned sg_load; |
109 | struct sa11x0_dma_desc *txd_load; |
110 | unsigned sg_done; |
111 | struct sa11x0_dma_desc *txd_done; |
112 | u32 dbs[2]; |
113 | u32 dbt[2]; |
114 | u32 dcsr; |
115 | }; |
116 | |
117 | struct sa11x0_dma_dev { |
118 | struct dma_device slave; |
119 | void __iomem *base; |
120 | spinlock_t lock; |
121 | struct tasklet_struct task; |
122 | struct list_head chan_pending; |
123 | struct sa11x0_dma_phy phy[NR_PHY_CHAN]; |
124 | }; |
125 | |
126 | static struct sa11x0_dma_chan *to_sa11x0_dma_chan(struct dma_chan *chan) |
127 | { |
128 | return container_of(chan, struct sa11x0_dma_chan, vc.chan); |
129 | } |
130 | |
131 | static struct sa11x0_dma_dev *to_sa11x0_dma(struct dma_device *dmadev) |
132 | { |
133 | return container_of(dmadev, struct sa11x0_dma_dev, slave); |
134 | } |
135 | |
136 | static struct sa11x0_dma_desc *sa11x0_dma_next_desc(struct sa11x0_dma_chan *c) |
137 | { |
138 | struct virt_dma_desc *vd = vchan_next_desc(vc: &c->vc); |
139 | |
140 | return vd ? container_of(vd, struct sa11x0_dma_desc, vd) : NULL; |
141 | } |
142 | |
143 | static void sa11x0_dma_free_desc(struct virt_dma_desc *vd) |
144 | { |
145 | kfree(container_of(vd, struct sa11x0_dma_desc, vd)); |
146 | } |
147 | |
148 | static void sa11x0_dma_start_desc(struct sa11x0_dma_phy *p, struct sa11x0_dma_desc *txd) |
149 | { |
150 | list_del(entry: &txd->vd.node); |
151 | p->txd_load = txd; |
152 | p->sg_load = 0; |
153 | |
154 | dev_vdbg(p->dev->slave.dev, "pchan %u: txd %p[%x]: starting: DDAR:%x\n" , |
155 | p->num, &txd->vd, txd->vd.tx.cookie, txd->ddar); |
156 | } |
157 | |
158 | static void noinline sa11x0_dma_start_sg(struct sa11x0_dma_phy *p, |
159 | struct sa11x0_dma_chan *c) |
160 | { |
161 | struct sa11x0_dma_desc *txd = p->txd_load; |
162 | struct sa11x0_dma_sg *sg; |
163 | void __iomem *base = p->base; |
164 | unsigned dbsx, dbtx; |
165 | u32 dcsr; |
166 | |
167 | if (!txd) |
168 | return; |
169 | |
170 | dcsr = readl_relaxed(base + DMA_DCSR_R); |
171 | |
172 | /* Don't try to load the next transfer if both buffers are started */ |
173 | if ((dcsr & (DCSR_STRTA | DCSR_STRTB)) == (DCSR_STRTA | DCSR_STRTB)) |
174 | return; |
175 | |
176 | if (p->sg_load == txd->sglen) { |
177 | if (!txd->cyclic) { |
178 | struct sa11x0_dma_desc *txn = sa11x0_dma_next_desc(c); |
179 | |
180 | /* |
181 | * We have reached the end of the current descriptor. |
182 | * Peek at the next descriptor, and if compatible with |
183 | * the current, start processing it. |
184 | */ |
185 | if (txn && txn->ddar == txd->ddar) { |
186 | txd = txn; |
187 | sa11x0_dma_start_desc(p, txd: txn); |
188 | } else { |
189 | p->txd_load = NULL; |
190 | return; |
191 | } |
192 | } else { |
193 | /* Cyclic: reset back to beginning */ |
194 | p->sg_load = 0; |
195 | } |
196 | } |
197 | |
198 | sg = &txd->sg[p->sg_load++]; |
199 | |
200 | /* Select buffer to load according to channel status */ |
201 | if (((dcsr & (DCSR_BIU | DCSR_STRTB)) == (DCSR_BIU | DCSR_STRTB)) || |
202 | ((dcsr & (DCSR_BIU | DCSR_STRTA)) == 0)) { |
203 | dbsx = DMA_DBSA; |
204 | dbtx = DMA_DBTA; |
205 | dcsr = DCSR_STRTA | DCSR_IE | DCSR_RUN; |
206 | } else { |
207 | dbsx = DMA_DBSB; |
208 | dbtx = DMA_DBTB; |
209 | dcsr = DCSR_STRTB | DCSR_IE | DCSR_RUN; |
210 | } |
211 | |
212 | writel_relaxed(sg->addr, base + dbsx); |
213 | writel_relaxed(sg->len, base + dbtx); |
214 | writel(val: dcsr, addr: base + DMA_DCSR_S); |
215 | |
216 | dev_dbg(p->dev->slave.dev, "pchan %u: load: DCSR:%02x DBS%c:%08x DBT%c:%08x\n" , |
217 | p->num, dcsr, |
218 | 'A' + (dbsx == DMA_DBSB), sg->addr, |
219 | 'A' + (dbtx == DMA_DBTB), sg->len); |
220 | } |
221 | |
222 | static void noinline sa11x0_dma_complete(struct sa11x0_dma_phy *p, |
223 | struct sa11x0_dma_chan *c) |
224 | { |
225 | struct sa11x0_dma_desc *txd = p->txd_done; |
226 | |
227 | if (++p->sg_done == txd->sglen) { |
228 | if (!txd->cyclic) { |
229 | vchan_cookie_complete(vd: &txd->vd); |
230 | |
231 | p->sg_done = 0; |
232 | p->txd_done = p->txd_load; |
233 | |
234 | if (!p->txd_done) |
235 | tasklet_schedule(t: &p->dev->task); |
236 | } else { |
237 | if ((p->sg_done % txd->period) == 0) |
238 | vchan_cyclic_callback(vd: &txd->vd); |
239 | |
240 | /* Cyclic: reset back to beginning */ |
241 | p->sg_done = 0; |
242 | } |
243 | } |
244 | |
245 | sa11x0_dma_start_sg(p, c); |
246 | } |
247 | |
248 | static irqreturn_t sa11x0_dma_irq(int irq, void *dev_id) |
249 | { |
250 | struct sa11x0_dma_phy *p = dev_id; |
251 | struct sa11x0_dma_dev *d = p->dev; |
252 | struct sa11x0_dma_chan *c; |
253 | u32 dcsr; |
254 | |
255 | dcsr = readl_relaxed(p->base + DMA_DCSR_R); |
256 | if (!(dcsr & (DCSR_ERROR | DCSR_DONEA | DCSR_DONEB))) |
257 | return IRQ_NONE; |
258 | |
259 | /* Clear reported status bits */ |
260 | writel_relaxed(dcsr & (DCSR_ERROR | DCSR_DONEA | DCSR_DONEB), |
261 | p->base + DMA_DCSR_C); |
262 | |
263 | dev_dbg(d->slave.dev, "pchan %u: irq: DCSR:%02x\n" , p->num, dcsr); |
264 | |
265 | if (dcsr & DCSR_ERROR) { |
266 | dev_err(d->slave.dev, "pchan %u: error. DCSR:%02x DDAR:%08x DBSA:%08x DBTA:%08x DBSB:%08x DBTB:%08x\n" , |
267 | p->num, dcsr, |
268 | readl_relaxed(p->base + DMA_DDAR), |
269 | readl_relaxed(p->base + DMA_DBSA), |
270 | readl_relaxed(p->base + DMA_DBTA), |
271 | readl_relaxed(p->base + DMA_DBSB), |
272 | readl_relaxed(p->base + DMA_DBTB)); |
273 | } |
274 | |
275 | c = p->vchan; |
276 | if (c) { |
277 | unsigned long flags; |
278 | |
279 | spin_lock_irqsave(&c->vc.lock, flags); |
280 | /* |
281 | * Now that we're holding the lock, check that the vchan |
282 | * really is associated with this pchan before touching the |
283 | * hardware. This should always succeed, because we won't |
284 | * change p->vchan or c->phy while the channel is actively |
285 | * transferring. |
286 | */ |
287 | if (c->phy == p) { |
288 | if (dcsr & DCSR_DONEA) |
289 | sa11x0_dma_complete(p, c); |
290 | if (dcsr & DCSR_DONEB) |
291 | sa11x0_dma_complete(p, c); |
292 | } |
293 | spin_unlock_irqrestore(lock: &c->vc.lock, flags); |
294 | } |
295 | |
296 | return IRQ_HANDLED; |
297 | } |
298 | |
299 | static void sa11x0_dma_start_txd(struct sa11x0_dma_chan *c) |
300 | { |
301 | struct sa11x0_dma_desc *txd = sa11x0_dma_next_desc(c); |
302 | |
303 | /* If the issued list is empty, we have no further txds to process */ |
304 | if (txd) { |
305 | struct sa11x0_dma_phy *p = c->phy; |
306 | |
307 | sa11x0_dma_start_desc(p, txd); |
308 | p->txd_done = txd; |
309 | p->sg_done = 0; |
310 | |
311 | /* The channel should not have any transfers started */ |
312 | WARN_ON(readl_relaxed(p->base + DMA_DCSR_R) & |
313 | (DCSR_STRTA | DCSR_STRTB)); |
314 | |
315 | /* Clear the run and start bits before changing DDAR */ |
316 | writel_relaxed(DCSR_RUN | DCSR_STRTA | DCSR_STRTB, |
317 | p->base + DMA_DCSR_C); |
318 | writel_relaxed(txd->ddar, p->base + DMA_DDAR); |
319 | |
320 | /* Try to start both buffers */ |
321 | sa11x0_dma_start_sg(p, c); |
322 | sa11x0_dma_start_sg(p, c); |
323 | } |
324 | } |
325 | |
326 | static void sa11x0_dma_tasklet(struct tasklet_struct *t) |
327 | { |
328 | struct sa11x0_dma_dev *d = from_tasklet(d, t, task); |
329 | struct sa11x0_dma_phy *p; |
330 | struct sa11x0_dma_chan *c; |
331 | unsigned pch, pch_alloc = 0; |
332 | |
333 | dev_dbg(d->slave.dev, "tasklet enter\n" ); |
334 | |
335 | list_for_each_entry(c, &d->slave.channels, vc.chan.device_node) { |
336 | spin_lock_irq(lock: &c->vc.lock); |
337 | p = c->phy; |
338 | if (p && !p->txd_done) { |
339 | sa11x0_dma_start_txd(c); |
340 | if (!p->txd_done) { |
341 | /* No current txd associated with this channel */ |
342 | dev_dbg(d->slave.dev, "pchan %u: free\n" , p->num); |
343 | |
344 | /* Mark this channel free */ |
345 | c->phy = NULL; |
346 | p->vchan = NULL; |
347 | } |
348 | } |
349 | spin_unlock_irq(lock: &c->vc.lock); |
350 | } |
351 | |
352 | spin_lock_irq(lock: &d->lock); |
353 | for (pch = 0; pch < NR_PHY_CHAN; pch++) { |
354 | p = &d->phy[pch]; |
355 | |
356 | if (p->vchan == NULL && !list_empty(head: &d->chan_pending)) { |
357 | c = list_first_entry(&d->chan_pending, |
358 | struct sa11x0_dma_chan, node); |
359 | list_del_init(entry: &c->node); |
360 | |
361 | pch_alloc |= 1 << pch; |
362 | |
363 | /* Mark this channel allocated */ |
364 | p->vchan = c; |
365 | |
366 | dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n" , pch, &c->vc); |
367 | } |
368 | } |
369 | spin_unlock_irq(lock: &d->lock); |
370 | |
371 | for (pch = 0; pch < NR_PHY_CHAN; pch++) { |
372 | if (pch_alloc & (1 << pch)) { |
373 | p = &d->phy[pch]; |
374 | c = p->vchan; |
375 | |
376 | spin_lock_irq(lock: &c->vc.lock); |
377 | c->phy = p; |
378 | |
379 | sa11x0_dma_start_txd(c); |
380 | spin_unlock_irq(lock: &c->vc.lock); |
381 | } |
382 | } |
383 | |
384 | dev_dbg(d->slave.dev, "tasklet exit\n" ); |
385 | } |
386 | |
387 | |
388 | static void sa11x0_dma_free_chan_resources(struct dma_chan *chan) |
389 | { |
390 | struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); |
391 | struct sa11x0_dma_dev *d = to_sa11x0_dma(dmadev: chan->device); |
392 | unsigned long flags; |
393 | |
394 | spin_lock_irqsave(&d->lock, flags); |
395 | list_del_init(entry: &c->node); |
396 | spin_unlock_irqrestore(lock: &d->lock, flags); |
397 | |
398 | vchan_free_chan_resources(vc: &c->vc); |
399 | } |
400 | |
401 | static dma_addr_t sa11x0_dma_pos(struct sa11x0_dma_phy *p) |
402 | { |
403 | unsigned reg; |
404 | u32 dcsr; |
405 | |
406 | dcsr = readl_relaxed(p->base + DMA_DCSR_R); |
407 | |
408 | if ((dcsr & (DCSR_BIU | DCSR_STRTA)) == DCSR_STRTA || |
409 | (dcsr & (DCSR_BIU | DCSR_STRTB)) == DCSR_BIU) |
410 | reg = DMA_DBSA; |
411 | else |
412 | reg = DMA_DBSB; |
413 | |
414 | return readl_relaxed(p->base + reg); |
415 | } |
416 | |
417 | static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan, |
418 | dma_cookie_t cookie, struct dma_tx_state *state) |
419 | { |
420 | struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); |
421 | struct sa11x0_dma_dev *d = to_sa11x0_dma(dmadev: chan->device); |
422 | struct sa11x0_dma_phy *p; |
423 | struct virt_dma_desc *vd; |
424 | unsigned long flags; |
425 | enum dma_status ret; |
426 | |
427 | ret = dma_cookie_status(chan: &c->vc.chan, cookie, state); |
428 | if (ret == DMA_COMPLETE) |
429 | return ret; |
430 | |
431 | if (!state) |
432 | return c->status; |
433 | |
434 | spin_lock_irqsave(&c->vc.lock, flags); |
435 | p = c->phy; |
436 | |
437 | /* |
438 | * If the cookie is on our issue queue, then the residue is |
439 | * its total size. |
440 | */ |
441 | vd = vchan_find_desc(&c->vc, cookie); |
442 | if (vd) { |
443 | state->residue = container_of(vd, struct sa11x0_dma_desc, vd)->size; |
444 | } else if (!p) { |
445 | state->residue = 0; |
446 | } else { |
447 | struct sa11x0_dma_desc *txd; |
448 | size_t bytes = 0; |
449 | |
450 | if (p->txd_done && p->txd_done->vd.tx.cookie == cookie) |
451 | txd = p->txd_done; |
452 | else if (p->txd_load && p->txd_load->vd.tx.cookie == cookie) |
453 | txd = p->txd_load; |
454 | else |
455 | txd = NULL; |
456 | |
457 | ret = c->status; |
458 | if (txd) { |
459 | dma_addr_t addr = sa11x0_dma_pos(p); |
460 | unsigned i; |
461 | |
462 | dev_vdbg(d->slave.dev, "tx_status: addr:%pad\n" , &addr); |
463 | |
464 | for (i = 0; i < txd->sglen; i++) { |
465 | dev_vdbg(d->slave.dev, "tx_status: [%u] %x+%x\n" , |
466 | i, txd->sg[i].addr, txd->sg[i].len); |
467 | if (addr >= txd->sg[i].addr && |
468 | addr < txd->sg[i].addr + txd->sg[i].len) { |
469 | unsigned len; |
470 | |
471 | len = txd->sg[i].len - |
472 | (addr - txd->sg[i].addr); |
473 | dev_vdbg(d->slave.dev, "tx_status: [%u] +%x\n" , |
474 | i, len); |
475 | bytes += len; |
476 | i++; |
477 | break; |
478 | } |
479 | } |
480 | for (; i < txd->sglen; i++) { |
481 | dev_vdbg(d->slave.dev, "tx_status: [%u] %x+%x ++\n" , |
482 | i, txd->sg[i].addr, txd->sg[i].len); |
483 | bytes += txd->sg[i].len; |
484 | } |
485 | } |
486 | state->residue = bytes; |
487 | } |
488 | spin_unlock_irqrestore(lock: &c->vc.lock, flags); |
489 | |
490 | dev_vdbg(d->slave.dev, "tx_status: bytes 0x%x\n" , state->residue); |
491 | |
492 | return ret; |
493 | } |
494 | |
495 | /* |
496 | * Move pending txds to the issued list, and re-init pending list. |
497 | * If not already pending, add this channel to the list of pending |
498 | * channels and trigger the tasklet to run. |
499 | */ |
500 | static void sa11x0_dma_issue_pending(struct dma_chan *chan) |
501 | { |
502 | struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); |
503 | struct sa11x0_dma_dev *d = to_sa11x0_dma(dmadev: chan->device); |
504 | unsigned long flags; |
505 | |
506 | spin_lock_irqsave(&c->vc.lock, flags); |
507 | if (vchan_issue_pending(vc: &c->vc)) { |
508 | if (!c->phy) { |
509 | spin_lock(lock: &d->lock); |
510 | if (list_empty(head: &c->node)) { |
511 | list_add_tail(new: &c->node, head: &d->chan_pending); |
512 | tasklet_schedule(t: &d->task); |
513 | dev_dbg(d->slave.dev, "vchan %p: issued\n" , &c->vc); |
514 | } |
515 | spin_unlock(lock: &d->lock); |
516 | } |
517 | } else |
518 | dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n" , &c->vc); |
519 | spin_unlock_irqrestore(lock: &c->vc.lock, flags); |
520 | } |
521 | |
522 | static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg( |
523 | struct dma_chan *chan, struct scatterlist *sg, unsigned int sglen, |
524 | enum dma_transfer_direction dir, unsigned long flags, void *context) |
525 | { |
526 | struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); |
527 | struct sa11x0_dma_desc *txd; |
528 | struct scatterlist *sgent; |
529 | unsigned i, j = sglen; |
530 | size_t size = 0; |
531 | |
532 | /* SA11x0 channels can only operate in their native direction */ |
533 | if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) { |
534 | dev_err(chan->device->dev, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n" , |
535 | &c->vc, c->ddar, dir); |
536 | return NULL; |
537 | } |
538 | |
539 | /* Do not allow zero-sized txds */ |
540 | if (sglen == 0) |
541 | return NULL; |
542 | |
543 | for_each_sg(sg, sgent, sglen, i) { |
544 | dma_addr_t addr = sg_dma_address(sgent); |
545 | unsigned int len = sg_dma_len(sgent); |
546 | |
547 | if (len > DMA_MAX_SIZE) |
548 | j += DIV_ROUND_UP(len, DMA_MAX_SIZE & ~DMA_ALIGN) - 1; |
549 | if (addr & DMA_ALIGN) { |
550 | dev_dbg(chan->device->dev, "vchan %p: bad buffer alignment: %pad\n" , |
551 | &c->vc, &addr); |
552 | return NULL; |
553 | } |
554 | } |
555 | |
556 | txd = kzalloc(struct_size(txd, sg, j), GFP_ATOMIC); |
557 | if (!txd) { |
558 | dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n" , &c->vc); |
559 | return NULL; |
560 | } |
561 | txd->sglen = j; |
562 | |
563 | j = 0; |
564 | for_each_sg(sg, sgent, sglen, i) { |
565 | dma_addr_t addr = sg_dma_address(sgent); |
566 | unsigned len = sg_dma_len(sgent); |
567 | |
568 | size += len; |
569 | |
570 | do { |
571 | unsigned tlen = len; |
572 | |
573 | /* |
574 | * Check whether the transfer will fit. If not, try |
575 | * to split the transfer up such that we end up with |
576 | * equal chunks - but make sure that we preserve the |
577 | * alignment. This avoids small segments. |
578 | */ |
579 | if (tlen > DMA_MAX_SIZE) { |
580 | unsigned mult = DIV_ROUND_UP(tlen, |
581 | DMA_MAX_SIZE & ~DMA_ALIGN); |
582 | |
583 | tlen = (tlen / mult) & ~DMA_ALIGN; |
584 | } |
585 | |
586 | txd->sg[j].addr = addr; |
587 | txd->sg[j].len = tlen; |
588 | |
589 | addr += tlen; |
590 | len -= tlen; |
591 | j++; |
592 | } while (len); |
593 | } |
594 | |
595 | txd->ddar = c->ddar; |
596 | txd->size = size; |
597 | |
598 | dev_dbg(chan->device->dev, "vchan %p: txd %p: size %zu nr %u\n" , |
599 | &c->vc, &txd->vd, txd->size, txd->sglen); |
600 | |
601 | return vchan_tx_prep(vc: &c->vc, vd: &txd->vd, tx_flags: flags); |
602 | } |
603 | |
604 | static struct dma_async_tx_descriptor *sa11x0_dma_prep_dma_cyclic( |
605 | struct dma_chan *chan, dma_addr_t addr, size_t size, size_t period, |
606 | enum dma_transfer_direction dir, unsigned long flags) |
607 | { |
608 | struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); |
609 | struct sa11x0_dma_desc *txd; |
610 | unsigned i, j, k, sglen, sgperiod; |
611 | |
612 | /* SA11x0 channels can only operate in their native direction */ |
613 | if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) { |
614 | dev_err(chan->device->dev, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n" , |
615 | &c->vc, c->ddar, dir); |
616 | return NULL; |
617 | } |
618 | |
619 | sgperiod = DIV_ROUND_UP(period, DMA_MAX_SIZE & ~DMA_ALIGN); |
620 | sglen = size * sgperiod / period; |
621 | |
622 | /* Do not allow zero-sized txds */ |
623 | if (sglen == 0) |
624 | return NULL; |
625 | |
626 | txd = kzalloc(struct_size(txd, sg, sglen), GFP_ATOMIC); |
627 | if (!txd) { |
628 | dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n" , &c->vc); |
629 | return NULL; |
630 | } |
631 | txd->sglen = sglen; |
632 | |
633 | for (i = k = 0; i < size / period; i++) { |
634 | size_t tlen, len = period; |
635 | |
636 | for (j = 0; j < sgperiod; j++, k++) { |
637 | tlen = len; |
638 | |
639 | if (tlen > DMA_MAX_SIZE) { |
640 | unsigned mult = DIV_ROUND_UP(tlen, DMA_MAX_SIZE & ~DMA_ALIGN); |
641 | tlen = (tlen / mult) & ~DMA_ALIGN; |
642 | } |
643 | |
644 | txd->sg[k].addr = addr; |
645 | txd->sg[k].len = tlen; |
646 | addr += tlen; |
647 | len -= tlen; |
648 | } |
649 | |
650 | WARN_ON(len != 0); |
651 | } |
652 | |
653 | WARN_ON(k != sglen); |
654 | |
655 | txd->ddar = c->ddar; |
656 | txd->size = size; |
657 | txd->cyclic = 1; |
658 | txd->period = sgperiod; |
659 | |
660 | return vchan_tx_prep(vc: &c->vc, vd: &txd->vd, tx_flags: DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
661 | } |
662 | |
663 | static int sa11x0_dma_device_config(struct dma_chan *chan, |
664 | struct dma_slave_config *cfg) |
665 | { |
666 | struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); |
667 | u32 ddar = c->ddar & ((0xf << 4) | DDAR_RW); |
668 | dma_addr_t addr; |
669 | enum dma_slave_buswidth width; |
670 | u32 maxburst; |
671 | |
672 | if (ddar & DDAR_RW) { |
673 | addr = cfg->src_addr; |
674 | width = cfg->src_addr_width; |
675 | maxburst = cfg->src_maxburst; |
676 | } else { |
677 | addr = cfg->dst_addr; |
678 | width = cfg->dst_addr_width; |
679 | maxburst = cfg->dst_maxburst; |
680 | } |
681 | |
682 | if ((width != DMA_SLAVE_BUSWIDTH_1_BYTE && |
683 | width != DMA_SLAVE_BUSWIDTH_2_BYTES) || |
684 | (maxburst != 4 && maxburst != 8)) |
685 | return -EINVAL; |
686 | |
687 | if (width == DMA_SLAVE_BUSWIDTH_2_BYTES) |
688 | ddar |= DDAR_DW; |
689 | if (maxburst == 8) |
690 | ddar |= DDAR_BS; |
691 | |
692 | dev_dbg(c->vc.chan.device->dev, "vchan %p: dma_slave_config addr %pad width %u burst %u\n" , |
693 | &c->vc, &addr, width, maxburst); |
694 | |
695 | c->ddar = ddar | (addr & 0xf0000000) | (addr & 0x003ffffc) << 6; |
696 | |
697 | return 0; |
698 | } |
699 | |
700 | static int sa11x0_dma_device_pause(struct dma_chan *chan) |
701 | { |
702 | struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); |
703 | struct sa11x0_dma_dev *d = to_sa11x0_dma(dmadev: chan->device); |
704 | struct sa11x0_dma_phy *p; |
705 | unsigned long flags; |
706 | |
707 | dev_dbg(d->slave.dev, "vchan %p: pause\n" , &c->vc); |
708 | spin_lock_irqsave(&c->vc.lock, flags); |
709 | if (c->status == DMA_IN_PROGRESS) { |
710 | c->status = DMA_PAUSED; |
711 | |
712 | p = c->phy; |
713 | if (p) { |
714 | writel(DCSR_RUN | DCSR_IE, addr: p->base + DMA_DCSR_C); |
715 | } else { |
716 | spin_lock(lock: &d->lock); |
717 | list_del_init(entry: &c->node); |
718 | spin_unlock(lock: &d->lock); |
719 | } |
720 | } |
721 | spin_unlock_irqrestore(lock: &c->vc.lock, flags); |
722 | |
723 | return 0; |
724 | } |
725 | |
726 | static int sa11x0_dma_device_resume(struct dma_chan *chan) |
727 | { |
728 | struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); |
729 | struct sa11x0_dma_dev *d = to_sa11x0_dma(dmadev: chan->device); |
730 | struct sa11x0_dma_phy *p; |
731 | unsigned long flags; |
732 | |
733 | dev_dbg(d->slave.dev, "vchan %p: resume\n" , &c->vc); |
734 | spin_lock_irqsave(&c->vc.lock, flags); |
735 | if (c->status == DMA_PAUSED) { |
736 | c->status = DMA_IN_PROGRESS; |
737 | |
738 | p = c->phy; |
739 | if (p) { |
740 | writel(DCSR_RUN | DCSR_IE, addr: p->base + DMA_DCSR_S); |
741 | } else if (!list_empty(head: &c->vc.desc_issued)) { |
742 | spin_lock(lock: &d->lock); |
743 | list_add_tail(new: &c->node, head: &d->chan_pending); |
744 | spin_unlock(lock: &d->lock); |
745 | } |
746 | } |
747 | spin_unlock_irqrestore(lock: &c->vc.lock, flags); |
748 | |
749 | return 0; |
750 | } |
751 | |
752 | static int sa11x0_dma_device_terminate_all(struct dma_chan *chan) |
753 | { |
754 | struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); |
755 | struct sa11x0_dma_dev *d = to_sa11x0_dma(dmadev: chan->device); |
756 | struct sa11x0_dma_phy *p; |
757 | LIST_HEAD(head); |
758 | unsigned long flags; |
759 | |
760 | dev_dbg(d->slave.dev, "vchan %p: terminate all\n" , &c->vc); |
761 | /* Clear the tx descriptor lists */ |
762 | spin_lock_irqsave(&c->vc.lock, flags); |
763 | vchan_get_all_descriptors(vc: &c->vc, head: &head); |
764 | |
765 | p = c->phy; |
766 | if (p) { |
767 | dev_dbg(d->slave.dev, "pchan %u: terminating\n" , p->num); |
768 | /* vchan is assigned to a pchan - stop the channel */ |
769 | writel(DCSR_RUN | DCSR_IE | |
770 | DCSR_STRTA | DCSR_DONEA | |
771 | DCSR_STRTB | DCSR_DONEB, |
772 | addr: p->base + DMA_DCSR_C); |
773 | |
774 | if (p->txd_load) { |
775 | if (p->txd_load != p->txd_done) |
776 | list_add_tail(new: &p->txd_load->vd.node, head: &head); |
777 | p->txd_load = NULL; |
778 | } |
779 | if (p->txd_done) { |
780 | list_add_tail(new: &p->txd_done->vd.node, head: &head); |
781 | p->txd_done = NULL; |
782 | } |
783 | c->phy = NULL; |
784 | spin_lock(lock: &d->lock); |
785 | p->vchan = NULL; |
786 | spin_unlock(lock: &d->lock); |
787 | tasklet_schedule(t: &d->task); |
788 | } |
789 | spin_unlock_irqrestore(lock: &c->vc.lock, flags); |
790 | vchan_dma_desc_free_list(vc: &c->vc, head: &head); |
791 | |
792 | return 0; |
793 | } |
794 | |
795 | struct sa11x0_dma_channel_desc { |
796 | u32 ddar; |
797 | const char *name; |
798 | }; |
799 | |
800 | #define CD(d1, d2) { .ddar = DDAR_##d1 | d2, .name = #d1 } |
801 | static const struct sa11x0_dma_channel_desc chan_desc[] = { |
802 | CD(Ser0UDCTr, 0), |
803 | CD(Ser0UDCRc, DDAR_RW), |
804 | CD(Ser1SDLCTr, 0), |
805 | CD(Ser1SDLCRc, DDAR_RW), |
806 | CD(Ser1UARTTr, 0), |
807 | CD(Ser1UARTRc, DDAR_RW), |
808 | CD(Ser2ICPTr, 0), |
809 | CD(Ser2ICPRc, DDAR_RW), |
810 | CD(Ser3UARTTr, 0), |
811 | CD(Ser3UARTRc, DDAR_RW), |
812 | CD(Ser4MCP0Tr, 0), |
813 | CD(Ser4MCP0Rc, DDAR_RW), |
814 | CD(Ser4MCP1Tr, 0), |
815 | CD(Ser4MCP1Rc, DDAR_RW), |
816 | CD(Ser4SSPTr, 0), |
817 | CD(Ser4SSPRc, DDAR_RW), |
818 | }; |
819 | |
820 | static const struct dma_slave_map sa11x0_dma_map[] = { |
821 | { "sa11x0-ir" , "tx" , "Ser2ICPTr" }, |
822 | { "sa11x0-ir" , "rx" , "Ser2ICPRc" }, |
823 | { "sa11x0-ssp" , "tx" , "Ser4SSPTr" }, |
824 | { "sa11x0-ssp" , "rx" , "Ser4SSPRc" }, |
825 | }; |
826 | |
827 | static bool sa11x0_dma_filter_fn(struct dma_chan *chan, void *param) |
828 | { |
829 | struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); |
830 | const char *p = param; |
831 | |
832 | return !strcmp(c->name, p); |
833 | } |
834 | |
835 | static int sa11x0_dma_init_dmadev(struct dma_device *dmadev, |
836 | struct device *dev) |
837 | { |
838 | unsigned i; |
839 | |
840 | INIT_LIST_HEAD(list: &dmadev->channels); |
841 | dmadev->dev = dev; |
842 | dmadev->device_free_chan_resources = sa11x0_dma_free_chan_resources; |
843 | dmadev->device_config = sa11x0_dma_device_config; |
844 | dmadev->device_pause = sa11x0_dma_device_pause; |
845 | dmadev->device_resume = sa11x0_dma_device_resume; |
846 | dmadev->device_terminate_all = sa11x0_dma_device_terminate_all; |
847 | dmadev->device_tx_status = sa11x0_dma_tx_status; |
848 | dmadev->device_issue_pending = sa11x0_dma_issue_pending; |
849 | |
850 | for (i = 0; i < ARRAY_SIZE(chan_desc); i++) { |
851 | struct sa11x0_dma_chan *c; |
852 | |
853 | c = kzalloc(size: sizeof(*c), GFP_KERNEL); |
854 | if (!c) { |
855 | dev_err(dev, "no memory for channel %u\n" , i); |
856 | return -ENOMEM; |
857 | } |
858 | |
859 | c->status = DMA_IN_PROGRESS; |
860 | c->ddar = chan_desc[i].ddar; |
861 | c->name = chan_desc[i].name; |
862 | INIT_LIST_HEAD(list: &c->node); |
863 | |
864 | c->vc.desc_free = sa11x0_dma_free_desc; |
865 | vchan_init(vc: &c->vc, dmadev); |
866 | } |
867 | |
868 | return dma_async_device_register(device: dmadev); |
869 | } |
870 | |
871 | static int sa11x0_dma_request_irq(struct platform_device *pdev, int nr, |
872 | void *data) |
873 | { |
874 | int irq = platform_get_irq(pdev, nr); |
875 | |
876 | if (irq <= 0) |
877 | return -ENXIO; |
878 | |
879 | return request_irq(irq, handler: sa11x0_dma_irq, flags: 0, name: dev_name(dev: &pdev->dev), dev: data); |
880 | } |
881 | |
882 | static void sa11x0_dma_free_irq(struct platform_device *pdev, int nr, |
883 | void *data) |
884 | { |
885 | int irq = platform_get_irq(pdev, nr); |
886 | if (irq > 0) |
887 | free_irq(irq, data); |
888 | } |
889 | |
890 | static void sa11x0_dma_free_channels(struct dma_device *dmadev) |
891 | { |
892 | struct sa11x0_dma_chan *c, *cn; |
893 | |
894 | list_for_each_entry_safe(c, cn, &dmadev->channels, vc.chan.device_node) { |
895 | list_del(entry: &c->vc.chan.device_node); |
896 | tasklet_kill(t: &c->vc.task); |
897 | kfree(objp: c); |
898 | } |
899 | } |
900 | |
901 | static int sa11x0_dma_probe(struct platform_device *pdev) |
902 | { |
903 | struct sa11x0_dma_dev *d; |
904 | struct resource *res; |
905 | unsigned i; |
906 | int ret; |
907 | |
908 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
909 | if (!res) |
910 | return -ENXIO; |
911 | |
912 | d = kzalloc(size: sizeof(*d), GFP_KERNEL); |
913 | if (!d) { |
914 | ret = -ENOMEM; |
915 | goto err_alloc; |
916 | } |
917 | |
918 | spin_lock_init(&d->lock); |
919 | INIT_LIST_HEAD(list: &d->chan_pending); |
920 | |
921 | d->slave.filter.fn = sa11x0_dma_filter_fn; |
922 | d->slave.filter.mapcnt = ARRAY_SIZE(sa11x0_dma_map); |
923 | d->slave.filter.map = sa11x0_dma_map; |
924 | |
925 | d->base = ioremap(offset: res->start, size: resource_size(res)); |
926 | if (!d->base) { |
927 | ret = -ENOMEM; |
928 | goto err_ioremap; |
929 | } |
930 | |
931 | tasklet_setup(t: &d->task, callback: sa11x0_dma_tasklet); |
932 | |
933 | for (i = 0; i < NR_PHY_CHAN; i++) { |
934 | struct sa11x0_dma_phy *p = &d->phy[i]; |
935 | |
936 | p->dev = d; |
937 | p->num = i; |
938 | p->base = d->base + i * DMA_SIZE; |
939 | writel_relaxed(DCSR_RUN | DCSR_IE | DCSR_ERROR | |
940 | DCSR_DONEA | DCSR_STRTA | DCSR_DONEB | DCSR_STRTB, |
941 | p->base + DMA_DCSR_C); |
942 | writel_relaxed(0, p->base + DMA_DDAR); |
943 | |
944 | ret = sa11x0_dma_request_irq(pdev, nr: i, data: p); |
945 | if (ret) { |
946 | while (i) { |
947 | i--; |
948 | sa11x0_dma_free_irq(pdev, nr: i, data: &d->phy[i]); |
949 | } |
950 | goto err_irq; |
951 | } |
952 | } |
953 | |
954 | dma_cap_set(DMA_SLAVE, d->slave.cap_mask); |
955 | dma_cap_set(DMA_CYCLIC, d->slave.cap_mask); |
956 | d->slave.device_prep_slave_sg = sa11x0_dma_prep_slave_sg; |
957 | d->slave.device_prep_dma_cyclic = sa11x0_dma_prep_dma_cyclic; |
958 | d->slave.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); |
959 | d->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; |
960 | d->slave.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | |
961 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES); |
962 | d->slave.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | |
963 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES); |
964 | ret = sa11x0_dma_init_dmadev(dmadev: &d->slave, dev: &pdev->dev); |
965 | if (ret) { |
966 | dev_warn(d->slave.dev, "failed to register slave async device: %d\n" , |
967 | ret); |
968 | goto err_slave_reg; |
969 | } |
970 | |
971 | platform_set_drvdata(pdev, data: d); |
972 | return 0; |
973 | |
974 | err_slave_reg: |
975 | sa11x0_dma_free_channels(dmadev: &d->slave); |
976 | for (i = 0; i < NR_PHY_CHAN; i++) |
977 | sa11x0_dma_free_irq(pdev, nr: i, data: &d->phy[i]); |
978 | err_irq: |
979 | tasklet_kill(t: &d->task); |
980 | iounmap(addr: d->base); |
981 | err_ioremap: |
982 | kfree(objp: d); |
983 | err_alloc: |
984 | return ret; |
985 | } |
986 | |
987 | static void sa11x0_dma_remove(struct platform_device *pdev) |
988 | { |
989 | struct sa11x0_dma_dev *d = platform_get_drvdata(pdev); |
990 | unsigned pch; |
991 | |
992 | dma_async_device_unregister(device: &d->slave); |
993 | |
994 | sa11x0_dma_free_channels(dmadev: &d->slave); |
995 | for (pch = 0; pch < NR_PHY_CHAN; pch++) |
996 | sa11x0_dma_free_irq(pdev, nr: pch, data: &d->phy[pch]); |
997 | tasklet_kill(t: &d->task); |
998 | iounmap(addr: d->base); |
999 | kfree(objp: d); |
1000 | } |
1001 | |
1002 | static __maybe_unused int sa11x0_dma_suspend(struct device *dev) |
1003 | { |
1004 | struct sa11x0_dma_dev *d = dev_get_drvdata(dev); |
1005 | unsigned pch; |
1006 | |
1007 | for (pch = 0; pch < NR_PHY_CHAN; pch++) { |
1008 | struct sa11x0_dma_phy *p = &d->phy[pch]; |
1009 | u32 dcsr, saved_dcsr; |
1010 | |
1011 | dcsr = saved_dcsr = readl_relaxed(p->base + DMA_DCSR_R); |
1012 | if (dcsr & DCSR_RUN) { |
1013 | writel(DCSR_RUN | DCSR_IE, addr: p->base + DMA_DCSR_C); |
1014 | dcsr = readl_relaxed(p->base + DMA_DCSR_R); |
1015 | } |
1016 | |
1017 | saved_dcsr &= DCSR_RUN | DCSR_IE; |
1018 | if (dcsr & DCSR_BIU) { |
1019 | p->dbs[0] = readl_relaxed(p->base + DMA_DBSB); |
1020 | p->dbt[0] = readl_relaxed(p->base + DMA_DBTB); |
1021 | p->dbs[1] = readl_relaxed(p->base + DMA_DBSA); |
1022 | p->dbt[1] = readl_relaxed(p->base + DMA_DBTA); |
1023 | saved_dcsr |= (dcsr & DCSR_STRTA ? DCSR_STRTB : 0) | |
1024 | (dcsr & DCSR_STRTB ? DCSR_STRTA : 0); |
1025 | } else { |
1026 | p->dbs[0] = readl_relaxed(p->base + DMA_DBSA); |
1027 | p->dbt[0] = readl_relaxed(p->base + DMA_DBTA); |
1028 | p->dbs[1] = readl_relaxed(p->base + DMA_DBSB); |
1029 | p->dbt[1] = readl_relaxed(p->base + DMA_DBTB); |
1030 | saved_dcsr |= dcsr & (DCSR_STRTA | DCSR_STRTB); |
1031 | } |
1032 | p->dcsr = saved_dcsr; |
1033 | |
1034 | writel(DCSR_STRTA | DCSR_STRTB, addr: p->base + DMA_DCSR_C); |
1035 | } |
1036 | |
1037 | return 0; |
1038 | } |
1039 | |
1040 | static __maybe_unused int sa11x0_dma_resume(struct device *dev) |
1041 | { |
1042 | struct sa11x0_dma_dev *d = dev_get_drvdata(dev); |
1043 | unsigned pch; |
1044 | |
1045 | for (pch = 0; pch < NR_PHY_CHAN; pch++) { |
1046 | struct sa11x0_dma_phy *p = &d->phy[pch]; |
1047 | struct sa11x0_dma_desc *txd = NULL; |
1048 | u32 dcsr = readl_relaxed(p->base + DMA_DCSR_R); |
1049 | |
1050 | WARN_ON(dcsr & (DCSR_BIU | DCSR_STRTA | DCSR_STRTB | DCSR_RUN)); |
1051 | |
1052 | if (p->txd_done) |
1053 | txd = p->txd_done; |
1054 | else if (p->txd_load) |
1055 | txd = p->txd_load; |
1056 | |
1057 | if (!txd) |
1058 | continue; |
1059 | |
1060 | writel_relaxed(txd->ddar, p->base + DMA_DDAR); |
1061 | |
1062 | writel_relaxed(p->dbs[0], p->base + DMA_DBSA); |
1063 | writel_relaxed(p->dbt[0], p->base + DMA_DBTA); |
1064 | writel_relaxed(p->dbs[1], p->base + DMA_DBSB); |
1065 | writel_relaxed(p->dbt[1], p->base + DMA_DBTB); |
1066 | writel_relaxed(p->dcsr, p->base + DMA_DCSR_S); |
1067 | } |
1068 | |
1069 | return 0; |
1070 | } |
1071 | |
1072 | static const struct dev_pm_ops sa11x0_dma_pm_ops = { |
1073 | SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(sa11x0_dma_suspend, sa11x0_dma_resume) |
1074 | }; |
1075 | |
1076 | static struct platform_driver sa11x0_dma_driver = { |
1077 | .driver = { |
1078 | .name = "sa11x0-dma" , |
1079 | .pm = &sa11x0_dma_pm_ops, |
1080 | }, |
1081 | .probe = sa11x0_dma_probe, |
1082 | .remove_new = sa11x0_dma_remove, |
1083 | }; |
1084 | |
1085 | static int __init sa11x0_dma_init(void) |
1086 | { |
1087 | return platform_driver_register(&sa11x0_dma_driver); |
1088 | } |
1089 | subsys_initcall(sa11x0_dma_init); |
1090 | |
1091 | static void __exit sa11x0_dma_exit(void) |
1092 | { |
1093 | platform_driver_unregister(&sa11x0_dma_driver); |
1094 | } |
1095 | module_exit(sa11x0_dma_exit); |
1096 | |
1097 | MODULE_AUTHOR("Russell King" ); |
1098 | MODULE_DESCRIPTION("SA-11x0 DMA driver" ); |
1099 | MODULE_LICENSE("GPL v2" ); |
1100 | MODULE_ALIAS("platform:sa11x0-dma" ); |
1101 | |