1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Driver for the TXx9 SoC DMA Controller |
4 | * |
5 | * Copyright (C) 2009 Atsushi Nemoto |
6 | */ |
7 | #include <linux/dma-mapping.h> |
8 | #include <linux/init.h> |
9 | #include <linux/interrupt.h> |
10 | #include <linux/io.h> |
11 | #include <linux/module.h> |
12 | #include <linux/platform_device.h> |
13 | #include <linux/slab.h> |
14 | #include <linux/scatterlist.h> |
15 | |
16 | #include "dmaengine.h" |
17 | #include "txx9dmac.h" |
18 | |
19 | static struct txx9dmac_chan *to_txx9dmac_chan(struct dma_chan *chan) |
20 | { |
21 | return container_of(chan, struct txx9dmac_chan, chan); |
22 | } |
23 | |
24 | static struct txx9dmac_cregs __iomem *__dma_regs(const struct txx9dmac_chan *dc) |
25 | { |
26 | return dc->ch_regs; |
27 | } |
28 | |
29 | static struct txx9dmac_cregs32 __iomem *__dma_regs32( |
30 | const struct txx9dmac_chan *dc) |
31 | { |
32 | return dc->ch_regs; |
33 | } |
34 | |
35 | #define channel64_readq(dc, name) \ |
36 | __raw_readq(&(__dma_regs(dc)->name)) |
37 | #define channel64_writeq(dc, name, val) \ |
38 | __raw_writeq((val), &(__dma_regs(dc)->name)) |
39 | #define channel64_readl(dc, name) \ |
40 | __raw_readl(&(__dma_regs(dc)->name)) |
41 | #define channel64_writel(dc, name, val) \ |
42 | __raw_writel((val), &(__dma_regs(dc)->name)) |
43 | |
44 | #define channel32_readl(dc, name) \ |
45 | __raw_readl(&(__dma_regs32(dc)->name)) |
46 | #define channel32_writel(dc, name, val) \ |
47 | __raw_writel((val), &(__dma_regs32(dc)->name)) |
48 | |
49 | #define channel_readq(dc, name) channel64_readq(dc, name) |
50 | #define channel_writeq(dc, name, val) channel64_writeq(dc, name, val) |
51 | #define channel_readl(dc, name) \ |
52 | (is_dmac64(dc) ? \ |
53 | channel64_readl(dc, name) : channel32_readl(dc, name)) |
54 | #define channel_writel(dc, name, val) \ |
55 | (is_dmac64(dc) ? \ |
56 | channel64_writel(dc, name, val) : channel32_writel(dc, name, val)) |
57 | |
58 | static dma_addr_t channel64_read_CHAR(const struct txx9dmac_chan *dc) |
59 | { |
60 | if (sizeof(__dma_regs(dc)->CHAR) == sizeof(u64)) |
61 | return channel64_readq(dc, CHAR); |
62 | else |
63 | return channel64_readl(dc, CHAR); |
64 | } |
65 | |
66 | static void channel64_write_CHAR(const struct txx9dmac_chan *dc, dma_addr_t val) |
67 | { |
68 | if (sizeof(__dma_regs(dc)->CHAR) == sizeof(u64)) |
69 | channel64_writeq(dc, CHAR, val); |
70 | else |
71 | channel64_writel(dc, CHAR, val); |
72 | } |
73 | |
74 | static void channel64_clear_CHAR(const struct txx9dmac_chan *dc) |
75 | { |
76 | #if defined(CONFIG_32BIT) && !defined(CONFIG_PHYS_ADDR_T_64BIT) |
77 | channel64_writel(dc, CHAR, 0); |
78 | channel64_writel(dc, __pad_CHAR, 0); |
79 | #else |
80 | channel64_writeq(dc, CHAR, 0); |
81 | #endif |
82 | } |
83 | |
84 | static dma_addr_t channel_read_CHAR(const struct txx9dmac_chan *dc) |
85 | { |
86 | if (is_dmac64(dc)) |
87 | return channel64_read_CHAR(dc); |
88 | else |
89 | return channel32_readl(dc, CHAR); |
90 | } |
91 | |
92 | static void channel_write_CHAR(const struct txx9dmac_chan *dc, dma_addr_t val) |
93 | { |
94 | if (is_dmac64(dc)) |
95 | channel64_write_CHAR(dc, val); |
96 | else |
97 | channel32_writel(dc, CHAR, val); |
98 | } |
99 | |
100 | static struct txx9dmac_regs __iomem *__txx9dmac_regs( |
101 | const struct txx9dmac_dev *ddev) |
102 | { |
103 | return ddev->regs; |
104 | } |
105 | |
106 | static struct txx9dmac_regs32 __iomem *__txx9dmac_regs32( |
107 | const struct txx9dmac_dev *ddev) |
108 | { |
109 | return ddev->regs; |
110 | } |
111 | |
112 | #define dma64_readl(ddev, name) \ |
113 | __raw_readl(&(__txx9dmac_regs(ddev)->name)) |
114 | #define dma64_writel(ddev, name, val) \ |
115 | __raw_writel((val), &(__txx9dmac_regs(ddev)->name)) |
116 | |
117 | #define dma32_readl(ddev, name) \ |
118 | __raw_readl(&(__txx9dmac_regs32(ddev)->name)) |
119 | #define dma32_writel(ddev, name, val) \ |
120 | __raw_writel((val), &(__txx9dmac_regs32(ddev)->name)) |
121 | |
122 | #define dma_readl(ddev, name) \ |
123 | (__is_dmac64(ddev) ? \ |
124 | dma64_readl(ddev, name) : dma32_readl(ddev, name)) |
125 | #define dma_writel(ddev, name, val) \ |
126 | (__is_dmac64(ddev) ? \ |
127 | dma64_writel(ddev, name, val) : dma32_writel(ddev, name, val)) |
128 | |
129 | static struct device *chan2dev(struct dma_chan *chan) |
130 | { |
131 | return &chan->dev->device; |
132 | } |
133 | static struct device *chan2parent(struct dma_chan *chan) |
134 | { |
135 | return chan->dev->device.parent; |
136 | } |
137 | |
138 | static struct txx9dmac_desc * |
139 | txd_to_txx9dmac_desc(struct dma_async_tx_descriptor *txd) |
140 | { |
141 | return container_of(txd, struct txx9dmac_desc, txd); |
142 | } |
143 | |
144 | static dma_addr_t desc_read_CHAR(const struct txx9dmac_chan *dc, |
145 | const struct txx9dmac_desc *desc) |
146 | { |
147 | return is_dmac64(dc) ? desc->hwdesc.CHAR : desc->hwdesc32.CHAR; |
148 | } |
149 | |
150 | static void desc_write_CHAR(const struct txx9dmac_chan *dc, |
151 | struct txx9dmac_desc *desc, dma_addr_t val) |
152 | { |
153 | if (is_dmac64(dc)) |
154 | desc->hwdesc.CHAR = val; |
155 | else |
156 | desc->hwdesc32.CHAR = val; |
157 | } |
158 | |
159 | #define TXX9_DMA_MAX_COUNT 0x04000000 |
160 | |
161 | #define TXX9_DMA_INITIAL_DESC_COUNT 64 |
162 | |
163 | static struct txx9dmac_desc *txx9dmac_first_active(struct txx9dmac_chan *dc) |
164 | { |
165 | return list_entry(dc->active_list.next, |
166 | struct txx9dmac_desc, desc_node); |
167 | } |
168 | |
169 | static struct txx9dmac_desc *txx9dmac_last_active(struct txx9dmac_chan *dc) |
170 | { |
171 | return list_entry(dc->active_list.prev, |
172 | struct txx9dmac_desc, desc_node); |
173 | } |
174 | |
175 | static struct txx9dmac_desc *txx9dmac_first_queued(struct txx9dmac_chan *dc) |
176 | { |
177 | return list_entry(dc->queue.next, struct txx9dmac_desc, desc_node); |
178 | } |
179 | |
180 | static struct txx9dmac_desc *txx9dmac_last_child(struct txx9dmac_desc *desc) |
181 | { |
182 | if (!list_empty(head: &desc->tx_list)) |
183 | desc = list_entry(desc->tx_list.prev, typeof(*desc), desc_node); |
184 | return desc; |
185 | } |
186 | |
187 | static dma_cookie_t txx9dmac_tx_submit(struct dma_async_tx_descriptor *tx); |
188 | |
189 | static struct txx9dmac_desc *txx9dmac_desc_alloc(struct txx9dmac_chan *dc, |
190 | gfp_t flags) |
191 | { |
192 | struct txx9dmac_dev *ddev = dc->ddev; |
193 | struct txx9dmac_desc *desc; |
194 | |
195 | desc = kzalloc(size: sizeof(*desc), flags); |
196 | if (!desc) |
197 | return NULL; |
198 | INIT_LIST_HEAD(list: &desc->tx_list); |
199 | dma_async_tx_descriptor_init(tx: &desc->txd, chan: &dc->chan); |
200 | desc->txd.tx_submit = txx9dmac_tx_submit; |
201 | /* txd.flags will be overwritten in prep funcs */ |
202 | desc->txd.flags = DMA_CTRL_ACK; |
203 | desc->txd.phys = dma_map_single(chan2parent(&dc->chan), &desc->hwdesc, |
204 | ddev->descsize, DMA_TO_DEVICE); |
205 | return desc; |
206 | } |
207 | |
208 | static struct txx9dmac_desc *txx9dmac_desc_get(struct txx9dmac_chan *dc) |
209 | { |
210 | struct txx9dmac_desc *desc, *_desc; |
211 | struct txx9dmac_desc *ret = NULL; |
212 | unsigned int i = 0; |
213 | |
214 | spin_lock_bh(lock: &dc->lock); |
215 | list_for_each_entry_safe(desc, _desc, &dc->free_list, desc_node) { |
216 | if (async_tx_test_ack(tx: &desc->txd)) { |
217 | list_del(entry: &desc->desc_node); |
218 | ret = desc; |
219 | break; |
220 | } |
221 | dev_dbg(chan2dev(&dc->chan), "desc %p not ACKed\n" , desc); |
222 | i++; |
223 | } |
224 | spin_unlock_bh(lock: &dc->lock); |
225 | |
226 | dev_vdbg(chan2dev(&dc->chan), "scanned %u descriptors on freelist\n" , |
227 | i); |
228 | if (!ret) { |
229 | ret = txx9dmac_desc_alloc(dc, GFP_ATOMIC); |
230 | if (ret) { |
231 | spin_lock_bh(lock: &dc->lock); |
232 | dc->descs_allocated++; |
233 | spin_unlock_bh(lock: &dc->lock); |
234 | } else |
235 | dev_err(chan2dev(&dc->chan), |
236 | "not enough descriptors available\n" ); |
237 | } |
238 | return ret; |
239 | } |
240 | |
241 | static void txx9dmac_sync_desc_for_cpu(struct txx9dmac_chan *dc, |
242 | struct txx9dmac_desc *desc) |
243 | { |
244 | struct txx9dmac_dev *ddev = dc->ddev; |
245 | struct txx9dmac_desc *child; |
246 | |
247 | list_for_each_entry(child, &desc->tx_list, desc_node) |
248 | dma_sync_single_for_cpu(dev: chan2parent(chan: &dc->chan), |
249 | addr: child->txd.phys, size: ddev->descsize, |
250 | dir: DMA_TO_DEVICE); |
251 | dma_sync_single_for_cpu(dev: chan2parent(chan: &dc->chan), |
252 | addr: desc->txd.phys, size: ddev->descsize, |
253 | dir: DMA_TO_DEVICE); |
254 | } |
255 | |
256 | /* |
257 | * Move a descriptor, including any children, to the free list. |
258 | * `desc' must not be on any lists. |
259 | */ |
260 | static void txx9dmac_desc_put(struct txx9dmac_chan *dc, |
261 | struct txx9dmac_desc *desc) |
262 | { |
263 | if (desc) { |
264 | struct txx9dmac_desc *child; |
265 | |
266 | txx9dmac_sync_desc_for_cpu(dc, desc); |
267 | |
268 | spin_lock_bh(lock: &dc->lock); |
269 | list_for_each_entry(child, &desc->tx_list, desc_node) |
270 | dev_vdbg(chan2dev(&dc->chan), |
271 | "moving child desc %p to freelist\n" , |
272 | child); |
273 | list_splice_init(list: &desc->tx_list, head: &dc->free_list); |
274 | dev_vdbg(chan2dev(&dc->chan), "moving desc %p to freelist\n" , |
275 | desc); |
276 | list_add(new: &desc->desc_node, head: &dc->free_list); |
277 | spin_unlock_bh(lock: &dc->lock); |
278 | } |
279 | } |
280 | |
281 | /*----------------------------------------------------------------------*/ |
282 | |
283 | static void txx9dmac_dump_regs(struct txx9dmac_chan *dc) |
284 | { |
285 | if (is_dmac64(dc)) |
286 | dev_err(chan2dev(&dc->chan), |
287 | " CHAR: %#llx SAR: %#llx DAR: %#llx CNTR: %#x" |
288 | " SAIR: %#x DAIR: %#x CCR: %#x CSR: %#x\n" , |
289 | (u64)channel64_read_CHAR(dc), |
290 | channel64_readq(dc, SAR), |
291 | channel64_readq(dc, DAR), |
292 | channel64_readl(dc, CNTR), |
293 | channel64_readl(dc, SAIR), |
294 | channel64_readl(dc, DAIR), |
295 | channel64_readl(dc, CCR), |
296 | channel64_readl(dc, CSR)); |
297 | else |
298 | dev_err(chan2dev(&dc->chan), |
299 | " CHAR: %#x SAR: %#x DAR: %#x CNTR: %#x" |
300 | " SAIR: %#x DAIR: %#x CCR: %#x CSR: %#x\n" , |
301 | channel32_readl(dc, CHAR), |
302 | channel32_readl(dc, SAR), |
303 | channel32_readl(dc, DAR), |
304 | channel32_readl(dc, CNTR), |
305 | channel32_readl(dc, SAIR), |
306 | channel32_readl(dc, DAIR), |
307 | channel32_readl(dc, CCR), |
308 | channel32_readl(dc, CSR)); |
309 | } |
310 | |
311 | static void txx9dmac_reset_chan(struct txx9dmac_chan *dc) |
312 | { |
313 | channel_writel(dc, CCR, TXX9_DMA_CCR_CHRST); |
314 | if (is_dmac64(dc)) { |
315 | channel64_clear_CHAR(dc); |
316 | channel_writeq(dc, SAR, 0); |
317 | channel_writeq(dc, DAR, 0); |
318 | } else { |
319 | channel_writel(dc, CHAR, 0); |
320 | channel_writel(dc, SAR, 0); |
321 | channel_writel(dc, DAR, 0); |
322 | } |
323 | channel_writel(dc, CNTR, 0); |
324 | channel_writel(dc, SAIR, 0); |
325 | channel_writel(dc, DAIR, 0); |
326 | channel_writel(dc, CCR, 0); |
327 | } |
328 | |
329 | /* Called with dc->lock held and bh disabled */ |
330 | static void txx9dmac_dostart(struct txx9dmac_chan *dc, |
331 | struct txx9dmac_desc *first) |
332 | { |
333 | struct txx9dmac_slave *ds = dc->chan.private; |
334 | u32 sai, dai; |
335 | |
336 | dev_vdbg(chan2dev(&dc->chan), "dostart %u %p\n" , |
337 | first->txd.cookie, first); |
338 | /* ASSERT: channel is idle */ |
339 | if (channel_readl(dc, CSR) & TXX9_DMA_CSR_XFACT) { |
340 | dev_err(chan2dev(&dc->chan), |
341 | "BUG: Attempted to start non-idle channel\n" ); |
342 | txx9dmac_dump_regs(dc); |
343 | /* The tasklet will hopefully advance the queue... */ |
344 | return; |
345 | } |
346 | |
347 | if (is_dmac64(dc)) { |
348 | channel64_writel(dc, CNTR, 0); |
349 | channel64_writel(dc, CSR, 0xffffffff); |
350 | if (ds) { |
351 | if (ds->tx_reg) { |
352 | sai = ds->reg_width; |
353 | dai = 0; |
354 | } else { |
355 | sai = 0; |
356 | dai = ds->reg_width; |
357 | } |
358 | } else { |
359 | sai = 8; |
360 | dai = 8; |
361 | } |
362 | channel64_writel(dc, SAIR, sai); |
363 | channel64_writel(dc, DAIR, dai); |
364 | /* All 64-bit DMAC supports SMPCHN */ |
365 | channel64_writel(dc, CCR, dc->ccr); |
366 | /* Writing a non zero value to CHAR will assert XFACT */ |
367 | channel64_write_CHAR(dc, val: first->txd.phys); |
368 | } else { |
369 | channel32_writel(dc, CNTR, 0); |
370 | channel32_writel(dc, CSR, 0xffffffff); |
371 | if (ds) { |
372 | if (ds->tx_reg) { |
373 | sai = ds->reg_width; |
374 | dai = 0; |
375 | } else { |
376 | sai = 0; |
377 | dai = ds->reg_width; |
378 | } |
379 | } else { |
380 | sai = 4; |
381 | dai = 4; |
382 | } |
383 | channel32_writel(dc, SAIR, sai); |
384 | channel32_writel(dc, DAIR, dai); |
385 | if (txx9_dma_have_SMPCHN()) { |
386 | channel32_writel(dc, CCR, dc->ccr); |
387 | /* Writing a non zero value to CHAR will assert XFACT */ |
388 | channel32_writel(dc, CHAR, first->txd.phys); |
389 | } else { |
390 | channel32_writel(dc, CHAR, first->txd.phys); |
391 | channel32_writel(dc, CCR, dc->ccr); |
392 | } |
393 | } |
394 | } |
395 | |
396 | /*----------------------------------------------------------------------*/ |
397 | |
398 | static void |
399 | txx9dmac_descriptor_complete(struct txx9dmac_chan *dc, |
400 | struct txx9dmac_desc *desc) |
401 | { |
402 | struct dmaengine_desc_callback cb; |
403 | struct dma_async_tx_descriptor *txd = &desc->txd; |
404 | |
405 | dev_vdbg(chan2dev(&dc->chan), "descriptor %u %p complete\n" , |
406 | txd->cookie, desc); |
407 | |
408 | dma_cookie_complete(tx: txd); |
409 | dmaengine_desc_get_callback(tx: txd, cb: &cb); |
410 | |
411 | txx9dmac_sync_desc_for_cpu(dc, desc); |
412 | list_splice_init(list: &desc->tx_list, head: &dc->free_list); |
413 | list_move(list: &desc->desc_node, head: &dc->free_list); |
414 | |
415 | dma_descriptor_unmap(tx: txd); |
416 | /* |
417 | * The API requires that no submissions are done from a |
418 | * callback, so we don't need to drop the lock here |
419 | */ |
420 | dmaengine_desc_callback_invoke(cb: &cb, NULL); |
421 | dma_run_dependencies(tx: txd); |
422 | } |
423 | |
424 | static void txx9dmac_dequeue(struct txx9dmac_chan *dc, struct list_head *list) |
425 | { |
426 | struct txx9dmac_dev *ddev = dc->ddev; |
427 | struct txx9dmac_desc *desc; |
428 | struct txx9dmac_desc *prev = NULL; |
429 | |
430 | BUG_ON(!list_empty(list)); |
431 | do { |
432 | desc = txx9dmac_first_queued(dc); |
433 | if (prev) { |
434 | desc_write_CHAR(dc, desc: prev, val: desc->txd.phys); |
435 | dma_sync_single_for_device(dev: chan2parent(chan: &dc->chan), |
436 | addr: prev->txd.phys, size: ddev->descsize, |
437 | dir: DMA_TO_DEVICE); |
438 | } |
439 | prev = txx9dmac_last_child(desc); |
440 | list_move_tail(list: &desc->desc_node, head: list); |
441 | /* Make chain-completion interrupt happen */ |
442 | if ((desc->txd.flags & DMA_PREP_INTERRUPT) && |
443 | !txx9dmac_chan_INTENT(dc)) |
444 | break; |
445 | } while (!list_empty(head: &dc->queue)); |
446 | } |
447 | |
448 | static void txx9dmac_complete_all(struct txx9dmac_chan *dc) |
449 | { |
450 | struct txx9dmac_desc *desc, *_desc; |
451 | LIST_HEAD(list); |
452 | |
453 | /* |
454 | * Submit queued descriptors ASAP, i.e. before we go through |
455 | * the completed ones. |
456 | */ |
457 | list_splice_init(list: &dc->active_list, head: &list); |
458 | if (!list_empty(head: &dc->queue)) { |
459 | txx9dmac_dequeue(dc, list: &dc->active_list); |
460 | txx9dmac_dostart(dc, first: txx9dmac_first_active(dc)); |
461 | } |
462 | |
463 | list_for_each_entry_safe(desc, _desc, &list, desc_node) |
464 | txx9dmac_descriptor_complete(dc, desc); |
465 | } |
466 | |
467 | static void txx9dmac_dump_desc(struct txx9dmac_chan *dc, |
468 | struct txx9dmac_hwdesc *desc) |
469 | { |
470 | if (is_dmac64(dc)) { |
471 | #ifdef TXX9_DMA_USE_SIMPLE_CHAIN |
472 | dev_crit(chan2dev(&dc->chan), |
473 | " desc: ch%#llx s%#llx d%#llx c%#x\n" , |
474 | (u64)desc->CHAR, desc->SAR, desc->DAR, desc->CNTR); |
475 | #else |
476 | dev_crit(chan2dev(&dc->chan), |
477 | " desc: ch%#llx s%#llx d%#llx c%#x" |
478 | " si%#x di%#x cc%#x cs%#x\n" , |
479 | (u64)desc->CHAR, desc->SAR, desc->DAR, desc->CNTR, |
480 | desc->SAIR, desc->DAIR, desc->CCR, desc->CSR); |
481 | #endif |
482 | } else { |
483 | struct txx9dmac_hwdesc32 *d = (struct txx9dmac_hwdesc32 *)desc; |
484 | #ifdef TXX9_DMA_USE_SIMPLE_CHAIN |
485 | dev_crit(chan2dev(&dc->chan), |
486 | " desc: ch%#x s%#x d%#x c%#x\n" , |
487 | d->CHAR, d->SAR, d->DAR, d->CNTR); |
488 | #else |
489 | dev_crit(chan2dev(&dc->chan), |
490 | " desc: ch%#x s%#x d%#x c%#x" |
491 | " si%#x di%#x cc%#x cs%#x\n" , |
492 | d->CHAR, d->SAR, d->DAR, d->CNTR, |
493 | d->SAIR, d->DAIR, d->CCR, d->CSR); |
494 | #endif |
495 | } |
496 | } |
497 | |
498 | static void txx9dmac_handle_error(struct txx9dmac_chan *dc, u32 csr) |
499 | { |
500 | struct txx9dmac_desc *bad_desc; |
501 | struct txx9dmac_desc *child; |
502 | u32 errors; |
503 | |
504 | /* |
505 | * The descriptor currently at the head of the active list is |
506 | * borked. Since we don't have any way to report errors, we'll |
507 | * just have to scream loudly and try to carry on. |
508 | */ |
509 | dev_crit(chan2dev(&dc->chan), "Abnormal Chain Completion\n" ); |
510 | txx9dmac_dump_regs(dc); |
511 | |
512 | bad_desc = txx9dmac_first_active(dc); |
513 | list_del_init(entry: &bad_desc->desc_node); |
514 | |
515 | /* Clear all error flags and try to restart the controller */ |
516 | errors = csr & (TXX9_DMA_CSR_ABCHC | |
517 | TXX9_DMA_CSR_CFERR | TXX9_DMA_CSR_CHERR | |
518 | TXX9_DMA_CSR_DESERR | TXX9_DMA_CSR_SORERR); |
519 | channel_writel(dc, CSR, errors); |
520 | |
521 | if (list_empty(head: &dc->active_list) && !list_empty(head: &dc->queue)) |
522 | txx9dmac_dequeue(dc, list: &dc->active_list); |
523 | if (!list_empty(head: &dc->active_list)) |
524 | txx9dmac_dostart(dc, first: txx9dmac_first_active(dc)); |
525 | |
526 | dev_crit(chan2dev(&dc->chan), |
527 | "Bad descriptor submitted for DMA! (cookie: %d)\n" , |
528 | bad_desc->txd.cookie); |
529 | txx9dmac_dump_desc(dc, desc: &bad_desc->hwdesc); |
530 | list_for_each_entry(child, &bad_desc->tx_list, desc_node) |
531 | txx9dmac_dump_desc(dc, desc: &child->hwdesc); |
532 | /* Pretend the descriptor completed successfully */ |
533 | txx9dmac_descriptor_complete(dc, desc: bad_desc); |
534 | } |
535 | |
536 | static void txx9dmac_scan_descriptors(struct txx9dmac_chan *dc) |
537 | { |
538 | dma_addr_t chain; |
539 | struct txx9dmac_desc *desc, *_desc; |
540 | struct txx9dmac_desc *child; |
541 | u32 csr; |
542 | |
543 | if (is_dmac64(dc)) { |
544 | chain = channel64_read_CHAR(dc); |
545 | csr = channel64_readl(dc, CSR); |
546 | channel64_writel(dc, CSR, csr); |
547 | } else { |
548 | chain = channel32_readl(dc, CHAR); |
549 | csr = channel32_readl(dc, CSR); |
550 | channel32_writel(dc, CSR, csr); |
551 | } |
552 | /* For dynamic chain, we should look at XFACT instead of NCHNC */ |
553 | if (!(csr & (TXX9_DMA_CSR_XFACT | TXX9_DMA_CSR_ABCHC))) { |
554 | /* Everything we've submitted is done */ |
555 | txx9dmac_complete_all(dc); |
556 | return; |
557 | } |
558 | if (!(csr & TXX9_DMA_CSR_CHNEN)) |
559 | chain = 0; /* last descriptor of this chain */ |
560 | |
561 | dev_vdbg(chan2dev(&dc->chan), "scan_descriptors: char=%#llx\n" , |
562 | (u64)chain); |
563 | |
564 | list_for_each_entry_safe(desc, _desc, &dc->active_list, desc_node) { |
565 | if (desc_read_CHAR(dc, desc) == chain) { |
566 | /* This one is currently in progress */ |
567 | if (csr & TXX9_DMA_CSR_ABCHC) |
568 | goto scan_done; |
569 | return; |
570 | } |
571 | |
572 | list_for_each_entry(child, &desc->tx_list, desc_node) |
573 | if (desc_read_CHAR(dc, desc: child) == chain) { |
574 | /* Currently in progress */ |
575 | if (csr & TXX9_DMA_CSR_ABCHC) |
576 | goto scan_done; |
577 | return; |
578 | } |
579 | |
580 | /* |
581 | * No descriptors so far seem to be in progress, i.e. |
582 | * this one must be done. |
583 | */ |
584 | txx9dmac_descriptor_complete(dc, desc); |
585 | } |
586 | scan_done: |
587 | if (csr & TXX9_DMA_CSR_ABCHC) { |
588 | txx9dmac_handle_error(dc, csr); |
589 | return; |
590 | } |
591 | |
592 | dev_err(chan2dev(&dc->chan), |
593 | "BUG: All descriptors done, but channel not idle!\n" ); |
594 | |
595 | /* Try to continue after resetting the channel... */ |
596 | txx9dmac_reset_chan(dc); |
597 | |
598 | if (!list_empty(head: &dc->queue)) { |
599 | txx9dmac_dequeue(dc, list: &dc->active_list); |
600 | txx9dmac_dostart(dc, first: txx9dmac_first_active(dc)); |
601 | } |
602 | } |
603 | |
604 | static void txx9dmac_chan_tasklet(struct tasklet_struct *t) |
605 | { |
606 | int irq; |
607 | u32 csr; |
608 | struct txx9dmac_chan *dc; |
609 | |
610 | dc = from_tasklet(dc, t, tasklet); |
611 | csr = channel_readl(dc, CSR); |
612 | dev_vdbg(chan2dev(&dc->chan), "tasklet: status=%x\n" , csr); |
613 | |
614 | spin_lock(lock: &dc->lock); |
615 | if (csr & (TXX9_DMA_CSR_ABCHC | TXX9_DMA_CSR_NCHNC | |
616 | TXX9_DMA_CSR_NTRNFC)) |
617 | txx9dmac_scan_descriptors(dc); |
618 | spin_unlock(lock: &dc->lock); |
619 | irq = dc->irq; |
620 | |
621 | enable_irq(irq); |
622 | } |
623 | |
624 | static irqreturn_t txx9dmac_chan_interrupt(int irq, void *dev_id) |
625 | { |
626 | struct txx9dmac_chan *dc = dev_id; |
627 | |
628 | dev_vdbg(chan2dev(&dc->chan), "interrupt: status=%#x\n" , |
629 | channel_readl(dc, CSR)); |
630 | |
631 | tasklet_schedule(t: &dc->tasklet); |
632 | /* |
633 | * Just disable the interrupts. We'll turn them back on in the |
634 | * softirq handler. |
635 | */ |
636 | disable_irq_nosync(irq); |
637 | |
638 | return IRQ_HANDLED; |
639 | } |
640 | |
641 | static void txx9dmac_tasklet(struct tasklet_struct *t) |
642 | { |
643 | int irq; |
644 | u32 csr; |
645 | struct txx9dmac_chan *dc; |
646 | |
647 | struct txx9dmac_dev *ddev = from_tasklet(ddev, t, tasklet); |
648 | u32 mcr; |
649 | int i; |
650 | |
651 | mcr = dma_readl(ddev, MCR); |
652 | dev_vdbg(ddev->chan[0]->dma.dev, "tasklet: mcr=%x\n" , mcr); |
653 | for (i = 0; i < TXX9_DMA_MAX_NR_CHANNELS; i++) { |
654 | if ((mcr >> (24 + i)) & 0x11) { |
655 | dc = ddev->chan[i]; |
656 | csr = channel_readl(dc, CSR); |
657 | dev_vdbg(chan2dev(&dc->chan), "tasklet: status=%x\n" , |
658 | csr); |
659 | spin_lock(lock: &dc->lock); |
660 | if (csr & (TXX9_DMA_CSR_ABCHC | TXX9_DMA_CSR_NCHNC | |
661 | TXX9_DMA_CSR_NTRNFC)) |
662 | txx9dmac_scan_descriptors(dc); |
663 | spin_unlock(lock: &dc->lock); |
664 | } |
665 | } |
666 | irq = ddev->irq; |
667 | |
668 | enable_irq(irq); |
669 | } |
670 | |
671 | static irqreturn_t txx9dmac_interrupt(int irq, void *dev_id) |
672 | { |
673 | struct txx9dmac_dev *ddev = dev_id; |
674 | |
675 | dev_vdbg(ddev->chan[0]->dma.dev, "interrupt: status=%#x\n" , |
676 | dma_readl(ddev, MCR)); |
677 | |
678 | tasklet_schedule(t: &ddev->tasklet); |
679 | /* |
680 | * Just disable the interrupts. We'll turn them back on in the |
681 | * softirq handler. |
682 | */ |
683 | disable_irq_nosync(irq); |
684 | |
685 | return IRQ_HANDLED; |
686 | } |
687 | |
688 | /*----------------------------------------------------------------------*/ |
689 | |
690 | static dma_cookie_t txx9dmac_tx_submit(struct dma_async_tx_descriptor *tx) |
691 | { |
692 | struct txx9dmac_desc *desc = txd_to_txx9dmac_desc(txd: tx); |
693 | struct txx9dmac_chan *dc = to_txx9dmac_chan(chan: tx->chan); |
694 | dma_cookie_t cookie; |
695 | |
696 | spin_lock_bh(lock: &dc->lock); |
697 | cookie = dma_cookie_assign(tx); |
698 | |
699 | dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u %p\n" , |
700 | desc->txd.cookie, desc); |
701 | |
702 | list_add_tail(new: &desc->desc_node, head: &dc->queue); |
703 | spin_unlock_bh(lock: &dc->lock); |
704 | |
705 | return cookie; |
706 | } |
707 | |
708 | static struct dma_async_tx_descriptor * |
709 | txx9dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, |
710 | size_t len, unsigned long flags) |
711 | { |
712 | struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); |
713 | struct txx9dmac_dev *ddev = dc->ddev; |
714 | struct txx9dmac_desc *desc; |
715 | struct txx9dmac_desc *first; |
716 | struct txx9dmac_desc *prev; |
717 | size_t xfer_count; |
718 | size_t offset; |
719 | |
720 | dev_vdbg(chan2dev(chan), "prep_dma_memcpy d%#llx s%#llx l%#zx f%#lx\n" , |
721 | (u64)dest, (u64)src, len, flags); |
722 | |
723 | if (unlikely(!len)) { |
724 | dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n" ); |
725 | return NULL; |
726 | } |
727 | |
728 | prev = first = NULL; |
729 | |
730 | for (offset = 0; offset < len; offset += xfer_count) { |
731 | xfer_count = min_t(size_t, len - offset, TXX9_DMA_MAX_COUNT); |
732 | /* |
733 | * Workaround for ERT-TX49H2-033, ERT-TX49H3-020, |
734 | * ERT-TX49H4-016 (slightly conservative) |
735 | */ |
736 | if (__is_dmac64(ddev)) { |
737 | if (xfer_count > 0x100 && |
738 | (xfer_count & 0xff) >= 0xfa && |
739 | (xfer_count & 0xff) <= 0xff) |
740 | xfer_count -= 0x20; |
741 | } else { |
742 | if (xfer_count > 0x80 && |
743 | (xfer_count & 0x7f) >= 0x7e && |
744 | (xfer_count & 0x7f) <= 0x7f) |
745 | xfer_count -= 0x20; |
746 | } |
747 | |
748 | desc = txx9dmac_desc_get(dc); |
749 | if (!desc) { |
750 | txx9dmac_desc_put(dc, desc: first); |
751 | return NULL; |
752 | } |
753 | |
754 | if (__is_dmac64(ddev)) { |
755 | desc->hwdesc.SAR = src + offset; |
756 | desc->hwdesc.DAR = dest + offset; |
757 | desc->hwdesc.CNTR = xfer_count; |
758 | txx9dmac_desc_set_nosimple(ddev, desc, sai: 8, dai: 8, |
759 | ccr: dc->ccr | TXX9_DMA_CCR_XFACT); |
760 | } else { |
761 | desc->hwdesc32.SAR = src + offset; |
762 | desc->hwdesc32.DAR = dest + offset; |
763 | desc->hwdesc32.CNTR = xfer_count; |
764 | txx9dmac_desc_set_nosimple(ddev, desc, sai: 4, dai: 4, |
765 | ccr: dc->ccr | TXX9_DMA_CCR_XFACT); |
766 | } |
767 | |
768 | /* |
769 | * The descriptors on tx_list are not reachable from |
770 | * the dc->queue list or dc->active_list after a |
771 | * submit. If we put all descriptors on active_list, |
772 | * calling of callback on the completion will be more |
773 | * complex. |
774 | */ |
775 | if (!first) { |
776 | first = desc; |
777 | } else { |
778 | desc_write_CHAR(dc, desc: prev, val: desc->txd.phys); |
779 | dma_sync_single_for_device(dev: chan2parent(chan: &dc->chan), |
780 | addr: prev->txd.phys, size: ddev->descsize, |
781 | dir: DMA_TO_DEVICE); |
782 | list_add_tail(new: &desc->desc_node, head: &first->tx_list); |
783 | } |
784 | prev = desc; |
785 | } |
786 | |
787 | /* Trigger interrupt after last block */ |
788 | if (flags & DMA_PREP_INTERRUPT) |
789 | txx9dmac_desc_set_INTENT(ddev, desc: prev); |
790 | |
791 | desc_write_CHAR(dc, desc: prev, val: 0); |
792 | dma_sync_single_for_device(dev: chan2parent(chan: &dc->chan), |
793 | addr: prev->txd.phys, size: ddev->descsize, |
794 | dir: DMA_TO_DEVICE); |
795 | |
796 | first->txd.flags = flags; |
797 | first->len = len; |
798 | |
799 | return &first->txd; |
800 | } |
801 | |
802 | static struct dma_async_tx_descriptor * |
803 | txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, |
804 | unsigned int sg_len, enum dma_transfer_direction direction, |
805 | unsigned long flags, void *context) |
806 | { |
807 | struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); |
808 | struct txx9dmac_dev *ddev = dc->ddev; |
809 | struct txx9dmac_slave *ds = chan->private; |
810 | struct txx9dmac_desc *prev; |
811 | struct txx9dmac_desc *first; |
812 | unsigned int i; |
813 | struct scatterlist *sg; |
814 | |
815 | dev_vdbg(chan2dev(chan), "prep_dma_slave\n" ); |
816 | |
817 | BUG_ON(!ds || !ds->reg_width); |
818 | if (ds->tx_reg) |
819 | BUG_ON(direction != DMA_MEM_TO_DEV); |
820 | else |
821 | BUG_ON(direction != DMA_DEV_TO_MEM); |
822 | if (unlikely(!sg_len)) |
823 | return NULL; |
824 | |
825 | prev = first = NULL; |
826 | |
827 | for_each_sg(sgl, sg, sg_len, i) { |
828 | struct txx9dmac_desc *desc; |
829 | dma_addr_t mem; |
830 | u32 sai, dai; |
831 | |
832 | desc = txx9dmac_desc_get(dc); |
833 | if (!desc) { |
834 | txx9dmac_desc_put(dc, desc: first); |
835 | return NULL; |
836 | } |
837 | |
838 | mem = sg_dma_address(sg); |
839 | |
840 | if (__is_dmac64(ddev)) { |
841 | if (direction == DMA_MEM_TO_DEV) { |
842 | desc->hwdesc.SAR = mem; |
843 | desc->hwdesc.DAR = ds->tx_reg; |
844 | } else { |
845 | desc->hwdesc.SAR = ds->rx_reg; |
846 | desc->hwdesc.DAR = mem; |
847 | } |
848 | desc->hwdesc.CNTR = sg_dma_len(sg); |
849 | } else { |
850 | if (direction == DMA_MEM_TO_DEV) { |
851 | desc->hwdesc32.SAR = mem; |
852 | desc->hwdesc32.DAR = ds->tx_reg; |
853 | } else { |
854 | desc->hwdesc32.SAR = ds->rx_reg; |
855 | desc->hwdesc32.DAR = mem; |
856 | } |
857 | desc->hwdesc32.CNTR = sg_dma_len(sg); |
858 | } |
859 | if (direction == DMA_MEM_TO_DEV) { |
860 | sai = ds->reg_width; |
861 | dai = 0; |
862 | } else { |
863 | sai = 0; |
864 | dai = ds->reg_width; |
865 | } |
866 | txx9dmac_desc_set_nosimple(ddev, desc, sai, dai, |
867 | ccr: dc->ccr | TXX9_DMA_CCR_XFACT); |
868 | |
869 | if (!first) { |
870 | first = desc; |
871 | } else { |
872 | desc_write_CHAR(dc, desc: prev, val: desc->txd.phys); |
873 | dma_sync_single_for_device(dev: chan2parent(chan: &dc->chan), |
874 | addr: prev->txd.phys, |
875 | size: ddev->descsize, |
876 | dir: DMA_TO_DEVICE); |
877 | list_add_tail(new: &desc->desc_node, head: &first->tx_list); |
878 | } |
879 | prev = desc; |
880 | } |
881 | |
882 | /* Trigger interrupt after last block */ |
883 | if (flags & DMA_PREP_INTERRUPT) |
884 | txx9dmac_desc_set_INTENT(ddev, desc: prev); |
885 | |
886 | desc_write_CHAR(dc, desc: prev, val: 0); |
887 | dma_sync_single_for_device(dev: chan2parent(chan: &dc->chan), |
888 | addr: prev->txd.phys, size: ddev->descsize, |
889 | dir: DMA_TO_DEVICE); |
890 | |
891 | first->txd.flags = flags; |
892 | first->len = 0; |
893 | |
894 | return &first->txd; |
895 | } |
896 | |
897 | static int txx9dmac_terminate_all(struct dma_chan *chan) |
898 | { |
899 | struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); |
900 | struct txx9dmac_desc *desc, *_desc; |
901 | LIST_HEAD(list); |
902 | |
903 | dev_vdbg(chan2dev(chan), "terminate_all\n" ); |
904 | spin_lock_bh(lock: &dc->lock); |
905 | |
906 | txx9dmac_reset_chan(dc); |
907 | |
908 | /* active_list entries will end up before queued entries */ |
909 | list_splice_init(list: &dc->queue, head: &list); |
910 | list_splice_init(list: &dc->active_list, head: &list); |
911 | |
912 | spin_unlock_bh(lock: &dc->lock); |
913 | |
914 | /* Flush all pending and queued descriptors */ |
915 | list_for_each_entry_safe(desc, _desc, &list, desc_node) |
916 | txx9dmac_descriptor_complete(dc, desc); |
917 | |
918 | return 0; |
919 | } |
920 | |
921 | static enum dma_status |
922 | txx9dmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, |
923 | struct dma_tx_state *txstate) |
924 | { |
925 | struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); |
926 | enum dma_status ret; |
927 | |
928 | ret = dma_cookie_status(chan, cookie, state: txstate); |
929 | if (ret == DMA_COMPLETE) |
930 | return DMA_COMPLETE; |
931 | |
932 | spin_lock_bh(lock: &dc->lock); |
933 | txx9dmac_scan_descriptors(dc); |
934 | spin_unlock_bh(lock: &dc->lock); |
935 | |
936 | return dma_cookie_status(chan, cookie, state: txstate); |
937 | } |
938 | |
939 | static void txx9dmac_chain_dynamic(struct txx9dmac_chan *dc, |
940 | struct txx9dmac_desc *prev) |
941 | { |
942 | struct txx9dmac_dev *ddev = dc->ddev; |
943 | struct txx9dmac_desc *desc; |
944 | LIST_HEAD(list); |
945 | |
946 | prev = txx9dmac_last_child(desc: prev); |
947 | txx9dmac_dequeue(dc, list: &list); |
948 | desc = list_entry(list.next, struct txx9dmac_desc, desc_node); |
949 | desc_write_CHAR(dc, desc: prev, val: desc->txd.phys); |
950 | dma_sync_single_for_device(dev: chan2parent(chan: &dc->chan), |
951 | addr: prev->txd.phys, size: ddev->descsize, |
952 | dir: DMA_TO_DEVICE); |
953 | if (!(channel_readl(dc, CSR) & TXX9_DMA_CSR_CHNEN) && |
954 | channel_read_CHAR(dc) == prev->txd.phys) |
955 | /* Restart chain DMA */ |
956 | channel_write_CHAR(dc, val: desc->txd.phys); |
957 | list_splice_tail(list: &list, head: &dc->active_list); |
958 | } |
959 | |
960 | static void txx9dmac_issue_pending(struct dma_chan *chan) |
961 | { |
962 | struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); |
963 | |
964 | spin_lock_bh(lock: &dc->lock); |
965 | |
966 | if (!list_empty(head: &dc->active_list)) |
967 | txx9dmac_scan_descriptors(dc); |
968 | if (!list_empty(head: &dc->queue)) { |
969 | if (list_empty(head: &dc->active_list)) { |
970 | txx9dmac_dequeue(dc, list: &dc->active_list); |
971 | txx9dmac_dostart(dc, first: txx9dmac_first_active(dc)); |
972 | } else if (txx9_dma_have_SMPCHN()) { |
973 | struct txx9dmac_desc *prev = txx9dmac_last_active(dc); |
974 | |
975 | if (!(prev->txd.flags & DMA_PREP_INTERRUPT) || |
976 | txx9dmac_chan_INTENT(dc)) |
977 | txx9dmac_chain_dynamic(dc, prev); |
978 | } |
979 | } |
980 | |
981 | spin_unlock_bh(lock: &dc->lock); |
982 | } |
983 | |
984 | static int txx9dmac_alloc_chan_resources(struct dma_chan *chan) |
985 | { |
986 | struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); |
987 | struct txx9dmac_slave *ds = chan->private; |
988 | struct txx9dmac_desc *desc; |
989 | int i; |
990 | |
991 | dev_vdbg(chan2dev(chan), "alloc_chan_resources\n" ); |
992 | |
993 | /* ASSERT: channel is idle */ |
994 | if (channel_readl(dc, CSR) & TXX9_DMA_CSR_XFACT) { |
995 | dev_dbg(chan2dev(chan), "DMA channel not idle?\n" ); |
996 | return -EIO; |
997 | } |
998 | |
999 | dma_cookie_init(chan); |
1000 | |
1001 | dc->ccr = TXX9_DMA_CCR_IMMCHN | TXX9_DMA_CCR_INTENE | CCR_LE; |
1002 | txx9dmac_chan_set_SMPCHN(dc); |
1003 | if (!txx9_dma_have_SMPCHN() || (dc->ccr & TXX9_DMA_CCR_SMPCHN)) |
1004 | dc->ccr |= TXX9_DMA_CCR_INTENC; |
1005 | if (chan->device->device_prep_dma_memcpy) { |
1006 | if (ds) |
1007 | return -EINVAL; |
1008 | dc->ccr |= TXX9_DMA_CCR_XFSZ_X8; |
1009 | } else { |
1010 | if (!ds || |
1011 | (ds->tx_reg && ds->rx_reg) || (!ds->tx_reg && !ds->rx_reg)) |
1012 | return -EINVAL; |
1013 | dc->ccr |= TXX9_DMA_CCR_EXTRQ | |
1014 | TXX9_DMA_CCR_XFSZ(__ffs(ds->reg_width)); |
1015 | txx9dmac_chan_set_INTENT(dc); |
1016 | } |
1017 | |
1018 | spin_lock_bh(lock: &dc->lock); |
1019 | i = dc->descs_allocated; |
1020 | while (dc->descs_allocated < TXX9_DMA_INITIAL_DESC_COUNT) { |
1021 | spin_unlock_bh(lock: &dc->lock); |
1022 | |
1023 | desc = txx9dmac_desc_alloc(dc, GFP_KERNEL); |
1024 | if (!desc) { |
1025 | dev_info(chan2dev(chan), |
1026 | "only allocated %d descriptors\n" , i); |
1027 | spin_lock_bh(lock: &dc->lock); |
1028 | break; |
1029 | } |
1030 | txx9dmac_desc_put(dc, desc); |
1031 | |
1032 | spin_lock_bh(lock: &dc->lock); |
1033 | i = ++dc->descs_allocated; |
1034 | } |
1035 | spin_unlock_bh(lock: &dc->lock); |
1036 | |
1037 | dev_dbg(chan2dev(chan), |
1038 | "alloc_chan_resources allocated %d descriptors\n" , i); |
1039 | |
1040 | return i; |
1041 | } |
1042 | |
1043 | static void txx9dmac_free_chan_resources(struct dma_chan *chan) |
1044 | { |
1045 | struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); |
1046 | struct txx9dmac_dev *ddev = dc->ddev; |
1047 | struct txx9dmac_desc *desc, *_desc; |
1048 | LIST_HEAD(list); |
1049 | |
1050 | dev_dbg(chan2dev(chan), "free_chan_resources (descs allocated=%u)\n" , |
1051 | dc->descs_allocated); |
1052 | |
1053 | /* ASSERT: channel is idle */ |
1054 | BUG_ON(!list_empty(&dc->active_list)); |
1055 | BUG_ON(!list_empty(&dc->queue)); |
1056 | BUG_ON(channel_readl(dc, CSR) & TXX9_DMA_CSR_XFACT); |
1057 | |
1058 | spin_lock_bh(lock: &dc->lock); |
1059 | list_splice_init(list: &dc->free_list, head: &list); |
1060 | dc->descs_allocated = 0; |
1061 | spin_unlock_bh(lock: &dc->lock); |
1062 | |
1063 | list_for_each_entry_safe(desc, _desc, &list, desc_node) { |
1064 | dev_vdbg(chan2dev(chan), " freeing descriptor %p\n" , desc); |
1065 | dma_unmap_single(chan2parent(chan), desc->txd.phys, |
1066 | ddev->descsize, DMA_TO_DEVICE); |
1067 | kfree(objp: desc); |
1068 | } |
1069 | |
1070 | dev_vdbg(chan2dev(chan), "free_chan_resources done\n" ); |
1071 | } |
1072 | |
1073 | /*----------------------------------------------------------------------*/ |
1074 | |
1075 | static void txx9dmac_off(struct txx9dmac_dev *ddev) |
1076 | { |
1077 | dma_writel(ddev, MCR, 0); |
1078 | } |
1079 | |
1080 | static int __init txx9dmac_chan_probe(struct platform_device *pdev) |
1081 | { |
1082 | struct txx9dmac_chan_platform_data *cpdata = |
1083 | dev_get_platdata(dev: &pdev->dev); |
1084 | struct platform_device *dmac_dev = cpdata->dmac_dev; |
1085 | struct txx9dmac_platform_data *pdata = dev_get_platdata(dev: &dmac_dev->dev); |
1086 | struct txx9dmac_chan *dc; |
1087 | int err; |
1088 | int ch = pdev->id % TXX9_DMA_MAX_NR_CHANNELS; |
1089 | int irq; |
1090 | |
1091 | dc = devm_kzalloc(dev: &pdev->dev, size: sizeof(*dc), GFP_KERNEL); |
1092 | if (!dc) |
1093 | return -ENOMEM; |
1094 | |
1095 | dc->dma.dev = &pdev->dev; |
1096 | dc->dma.device_alloc_chan_resources = txx9dmac_alloc_chan_resources; |
1097 | dc->dma.device_free_chan_resources = txx9dmac_free_chan_resources; |
1098 | dc->dma.device_terminate_all = txx9dmac_terminate_all; |
1099 | dc->dma.device_tx_status = txx9dmac_tx_status; |
1100 | dc->dma.device_issue_pending = txx9dmac_issue_pending; |
1101 | if (pdata && pdata->memcpy_chan == ch) { |
1102 | dc->dma.device_prep_dma_memcpy = txx9dmac_prep_dma_memcpy; |
1103 | dma_cap_set(DMA_MEMCPY, dc->dma.cap_mask); |
1104 | } else { |
1105 | dc->dma.device_prep_slave_sg = txx9dmac_prep_slave_sg; |
1106 | dma_cap_set(DMA_SLAVE, dc->dma.cap_mask); |
1107 | dma_cap_set(DMA_PRIVATE, dc->dma.cap_mask); |
1108 | } |
1109 | |
1110 | INIT_LIST_HEAD(list: &dc->dma.channels); |
1111 | dc->ddev = platform_get_drvdata(pdev: dmac_dev); |
1112 | if (dc->ddev->irq < 0) { |
1113 | irq = platform_get_irq(pdev, 0); |
1114 | if (irq < 0) |
1115 | return irq; |
1116 | tasklet_setup(t: &dc->tasklet, callback: txx9dmac_chan_tasklet); |
1117 | dc->irq = irq; |
1118 | err = devm_request_irq(dev: &pdev->dev, irq: dc->irq, |
1119 | handler: txx9dmac_chan_interrupt, irqflags: 0, devname: dev_name(dev: &pdev->dev), dev_id: dc); |
1120 | if (err) |
1121 | return err; |
1122 | } else |
1123 | dc->irq = -1; |
1124 | dc->ddev->chan[ch] = dc; |
1125 | dc->chan.device = &dc->dma; |
1126 | list_add_tail(new: &dc->chan.device_node, head: &dc->chan.device->channels); |
1127 | dma_cookie_init(chan: &dc->chan); |
1128 | |
1129 | if (is_dmac64(dc)) |
1130 | dc->ch_regs = &__txx9dmac_regs(ddev: dc->ddev)->CHAN[ch]; |
1131 | else |
1132 | dc->ch_regs = &__txx9dmac_regs32(ddev: dc->ddev)->CHAN[ch]; |
1133 | spin_lock_init(&dc->lock); |
1134 | |
1135 | INIT_LIST_HEAD(list: &dc->active_list); |
1136 | INIT_LIST_HEAD(list: &dc->queue); |
1137 | INIT_LIST_HEAD(list: &dc->free_list); |
1138 | |
1139 | txx9dmac_reset_chan(dc); |
1140 | |
1141 | platform_set_drvdata(pdev, data: dc); |
1142 | |
1143 | err = dma_async_device_register(device: &dc->dma); |
1144 | if (err) |
1145 | return err; |
1146 | dev_dbg(&pdev->dev, "TXx9 DMA Channel (dma%d%s%s)\n" , |
1147 | dc->dma.dev_id, |
1148 | dma_has_cap(DMA_MEMCPY, dc->dma.cap_mask) ? " memcpy" : "" , |
1149 | dma_has_cap(DMA_SLAVE, dc->dma.cap_mask) ? " slave" : "" ); |
1150 | |
1151 | return 0; |
1152 | } |
1153 | |
1154 | static void txx9dmac_chan_remove(struct platform_device *pdev) |
1155 | { |
1156 | struct txx9dmac_chan *dc = platform_get_drvdata(pdev); |
1157 | |
1158 | |
1159 | dma_async_device_unregister(device: &dc->dma); |
1160 | if (dc->irq >= 0) { |
1161 | devm_free_irq(dev: &pdev->dev, irq: dc->irq, dev_id: dc); |
1162 | tasklet_kill(t: &dc->tasklet); |
1163 | } |
1164 | dc->ddev->chan[pdev->id % TXX9_DMA_MAX_NR_CHANNELS] = NULL; |
1165 | } |
1166 | |
1167 | static int __init txx9dmac_probe(struct platform_device *pdev) |
1168 | { |
1169 | struct txx9dmac_platform_data *pdata = dev_get_platdata(dev: &pdev->dev); |
1170 | struct resource *io; |
1171 | struct txx9dmac_dev *ddev; |
1172 | u32 mcr; |
1173 | int err; |
1174 | |
1175 | io = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1176 | if (!io) |
1177 | return -EINVAL; |
1178 | |
1179 | ddev = devm_kzalloc(dev: &pdev->dev, size: sizeof(*ddev), GFP_KERNEL); |
1180 | if (!ddev) |
1181 | return -ENOMEM; |
1182 | |
1183 | if (!devm_request_mem_region(&pdev->dev, io->start, resource_size(io), |
1184 | dev_name(&pdev->dev))) |
1185 | return -EBUSY; |
1186 | |
1187 | ddev->regs = devm_ioremap(dev: &pdev->dev, offset: io->start, size: resource_size(res: io)); |
1188 | if (!ddev->regs) |
1189 | return -ENOMEM; |
1190 | ddev->have_64bit_regs = pdata->have_64bit_regs; |
1191 | if (__is_dmac64(ddev)) |
1192 | ddev->descsize = sizeof(struct txx9dmac_hwdesc); |
1193 | else |
1194 | ddev->descsize = sizeof(struct txx9dmac_hwdesc32); |
1195 | |
1196 | /* force dma off, just in case */ |
1197 | txx9dmac_off(ddev); |
1198 | |
1199 | ddev->irq = platform_get_irq(pdev, 0); |
1200 | if (ddev->irq >= 0) { |
1201 | tasklet_setup(t: &ddev->tasklet, callback: txx9dmac_tasklet); |
1202 | err = devm_request_irq(dev: &pdev->dev, irq: ddev->irq, |
1203 | handler: txx9dmac_interrupt, irqflags: 0, devname: dev_name(dev: &pdev->dev), dev_id: ddev); |
1204 | if (err) |
1205 | return err; |
1206 | } |
1207 | |
1208 | mcr = TXX9_DMA_MCR_MSTEN | MCR_LE; |
1209 | if (pdata && pdata->memcpy_chan >= 0) |
1210 | mcr |= TXX9_DMA_MCR_FIFUM(pdata->memcpy_chan); |
1211 | dma_writel(ddev, MCR, mcr); |
1212 | |
1213 | platform_set_drvdata(pdev, data: ddev); |
1214 | return 0; |
1215 | } |
1216 | |
1217 | static void txx9dmac_remove(struct platform_device *pdev) |
1218 | { |
1219 | struct txx9dmac_dev *ddev = platform_get_drvdata(pdev); |
1220 | |
1221 | txx9dmac_off(ddev); |
1222 | if (ddev->irq >= 0) { |
1223 | devm_free_irq(dev: &pdev->dev, irq: ddev->irq, dev_id: ddev); |
1224 | tasklet_kill(t: &ddev->tasklet); |
1225 | } |
1226 | } |
1227 | |
1228 | static void txx9dmac_shutdown(struct platform_device *pdev) |
1229 | { |
1230 | struct txx9dmac_dev *ddev = platform_get_drvdata(pdev); |
1231 | |
1232 | txx9dmac_off(ddev); |
1233 | } |
1234 | |
1235 | static int txx9dmac_suspend_noirq(struct device *dev) |
1236 | { |
1237 | struct txx9dmac_dev *ddev = dev_get_drvdata(dev); |
1238 | |
1239 | txx9dmac_off(ddev); |
1240 | return 0; |
1241 | } |
1242 | |
1243 | static int txx9dmac_resume_noirq(struct device *dev) |
1244 | { |
1245 | struct txx9dmac_dev *ddev = dev_get_drvdata(dev); |
1246 | struct txx9dmac_platform_data *pdata = dev_get_platdata(dev); |
1247 | u32 mcr; |
1248 | |
1249 | mcr = TXX9_DMA_MCR_MSTEN | MCR_LE; |
1250 | if (pdata && pdata->memcpy_chan >= 0) |
1251 | mcr |= TXX9_DMA_MCR_FIFUM(pdata->memcpy_chan); |
1252 | dma_writel(ddev, MCR, mcr); |
1253 | return 0; |
1254 | |
1255 | } |
1256 | |
1257 | static const struct dev_pm_ops txx9dmac_dev_pm_ops = { |
1258 | .suspend_noirq = txx9dmac_suspend_noirq, |
1259 | .resume_noirq = txx9dmac_resume_noirq, |
1260 | }; |
1261 | |
1262 | static struct platform_driver txx9dmac_chan_driver = { |
1263 | .remove_new = txx9dmac_chan_remove, |
1264 | .driver = { |
1265 | .name = "txx9dmac-chan" , |
1266 | }, |
1267 | }; |
1268 | |
1269 | static struct platform_driver txx9dmac_driver = { |
1270 | .remove_new = txx9dmac_remove, |
1271 | .shutdown = txx9dmac_shutdown, |
1272 | .driver = { |
1273 | .name = "txx9dmac" , |
1274 | .pm = &txx9dmac_dev_pm_ops, |
1275 | }, |
1276 | }; |
1277 | |
1278 | static int __init txx9dmac_init(void) |
1279 | { |
1280 | int rc; |
1281 | |
1282 | rc = platform_driver_probe(&txx9dmac_driver, txx9dmac_probe); |
1283 | if (!rc) { |
1284 | rc = platform_driver_probe(&txx9dmac_chan_driver, |
1285 | txx9dmac_chan_probe); |
1286 | if (rc) |
1287 | platform_driver_unregister(&txx9dmac_driver); |
1288 | } |
1289 | return rc; |
1290 | } |
1291 | module_init(txx9dmac_init); |
1292 | |
1293 | static void __exit txx9dmac_exit(void) |
1294 | { |
1295 | platform_driver_unregister(&txx9dmac_chan_driver); |
1296 | platform_driver_unregister(&txx9dmac_driver); |
1297 | } |
1298 | module_exit(txx9dmac_exit); |
1299 | |
1300 | MODULE_LICENSE("GPL" ); |
1301 | MODULE_DESCRIPTION("TXx9 DMA Controller driver" ); |
1302 | MODULE_AUTHOR("Atsushi Nemoto <anemo@mba.ocn.ne.jp>" ); |
1303 | MODULE_ALIAS("platform:txx9dmac" ); |
1304 | MODULE_ALIAS("platform:txx9dmac-chan" ); |
1305 | |